From bc10a08e36de1ff2e850128b5a4b00d301aabe31 Mon Sep 17 00:00:00 2001 From: lloretgalleg Date: Tue, 26 Jan 2021 12:12:03 +0000 Subject: [PATCH 01/35] Bug 1417 solved: Timeout contacting EE grpc server Change-Id: Ie1b67cc20726bf0358f3a96fb0549455a32e7957 Signed-off-by: lloretgalleg --- osm_lcm/lcm.cfg | 2 ++ osm_lcm/lcm_helm_conn.py | 44 +++++++++++++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/osm_lcm/lcm.cfg b/osm_lcm/lcm.cfg index 64a7da5..96fb373 100644 --- a/osm_lcm/lcm.cfg +++ b/osm_lcm/lcm.cfg @@ -50,6 +50,8 @@ VCA: # pubkey: pubkey # cacert: cacert # apiproxy: apiproxy + #eegrpcinittimeout: 600 + #eegrpctimeout: 30 # loglevel: DEBUG # logfile: /var/log/osm/lcm-vca.log diff --git a/osm_lcm/lcm_helm_conn.py b/osm_lcm/lcm_helm_conn.py index 831190b..27c330f 100644 --- a/osm_lcm/lcm_helm_conn.py +++ b/osm_lcm/lcm_helm_conn.py @@ -40,7 +40,7 @@ from n2vc.exceptions import N2VCBadArgumentsException, N2VCException, N2VCExecut from osm_lcm.lcm_utils import deep_get -def retryer(max_wait_time=60, delay_time=10): +def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay"): def wrapper(func): retry_exceptions = ( ConnectionRefusedError @@ -48,6 +48,17 @@ def retryer(max_wait_time=60, delay_time=10): @functools.wraps(func) async def wrapped(*args, **kwargs): + # default values for wait time and delay_time + delay_time = 10 + max_wait_time = 300 + + # obtain arguments from variable names + self = args[0] + if self.__dict__.get(max_wait_time_var): + max_wait_time = self.__dict__.get(max_wait_time_var) + if self.__dict__.get(delay_time_var): + delay_time = self.__dict__.get(delay_time_var) + wait_time = max_wait_time while wait_time > 0: try: @@ -67,12 +78,12 @@ class LCMHelmConn(N2VCConnector, LcmBase): _KUBECTL_OSM_CLUSTER_NAME = "_system-osm-k8s" _EE_SERVICE_PORT = 50050 - # Time beetween retries - _EE_RETRY_DELAY = 10 # Initial max retry time - _MAX_INITIAL_RETRY_TIME = 300 - # Other retry time + _MAX_INITIAL_RETRY_TIME = 600 + # Max retry time for normal operations _MAX_RETRY_TIME = 30 + # Time beetween retries, retry time after a connection error is raised + _EE_RETRY_DELAY = 10 def __init__(self, log: object = None, @@ -102,13 +113,26 @@ class LCMHelmConn(N2VCConnector, LcmBase): ) self.log.debug("Initialize helm N2VC connector") + self.log.debug("initial vca_config: {}".format(vca_config)) # TODO - Obtain data from configuration self._ee_service_port = self._EE_SERVICE_PORT self._retry_delay = self._EE_RETRY_DELAY - self._max_retry_time = self._MAX_RETRY_TIME - self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME + + if self.vca_config and self.vca_config.get("eegrpcinittimeout"): + self._initial_retry_time = self.vca_config.get("eegrpcinittimeout") + self.log.debug("Initial retry time: {}".format(self._initial_retry_time)) + else: + self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME + self.log.debug("Applied default retry time: {}".format(self._initial_retry_time)) + + if self.vca_config and self.vca_config.get("eegrpctimeout"): + self._max_retry_time = self.vca_config.get("eegrpctimeout") + self.log.debug("Retry time: {}".format(self._max_retry_time)) + else: + self._max_retry_time = self._MAX_RETRY_TIME + self.log.debug("Applied default retry time: {}".format(self._max_retry_time)) # initialize helm connector for helmv2 and helmv3 self._k8sclusterhelm2 = K8sHelmConnector( @@ -458,7 +482,7 @@ class LCMHelmConn(N2VCConnector, LcmBase): ) -> str: pass - @retryer(max_wait_time=_MAX_INITIAL_RETRY_TIME, delay_time=_EE_RETRY_DELAY) + @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay") async def _get_ssh_key(self, ip_addr): channel = Channel(ip_addr, self._ee_service_port) try: @@ -469,11 +493,11 @@ class LCMHelmConn(N2VCConnector, LcmBase): finally: channel.close() - @retryer(max_wait_time=_MAX_INITIAL_RETRY_TIME, delay_time=_EE_RETRY_DELAY) + @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay") async def _execute_config_primitive(self, ip_addr, params, db_dict=None): return await self._execute_primitive_internal(ip_addr, "config", params, db_dict=db_dict) - @retryer(max_wait_time=_MAX_RETRY_TIME, delay_time=_EE_RETRY_DELAY) + @retryer(max_wait_time_var="_max_retry_time", delay_time_var="_retry_delay") async def _execute_primitive(self, ip_addr, primitive_name, params, db_dict=None): return await self._execute_primitive_internal(ip_addr, primitive_name, params, db_dict=db_dict) -- GitLab From a100366a1e9c730909c95fde4e21a51964e5ee44 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 16 Feb 2021 21:07:58 +0100 Subject: [PATCH 02/35] Fix minor issue If initial-config-primitive or config-primitive do not exists, there was an error. Change-Id: If43cef7369a5f6d57c99347ba8a4b7b08790d057 Signed-off-by: David Garcia (cherry picked from commit 95cc9c5f864ef0d5b78eca278e41a023d7e79281) --- osm_lcm/ns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 8e0beb6..5131f9f 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -3447,9 +3447,9 @@ class NsLcm(LcmBase): if kdu_name and get_kdu_configuration(db_vnfd, kdu_name): kdu_configuration = get_kdu_configuration(db_vnfd, kdu_name) actions = set() - for primitive in kdu_configuration["initial-config-primitive"]: + for primitive in kdu_configuration.get("initial-config-primitive", []): actions.add(primitive["name"]) - for primitive in kdu_configuration["config-primitive"]: + for primitive in kdu_configuration.get("config-primitive", []): actions.add(primitive["name"]) kdu_action = True if primitive_name in actions else False -- GitLab From 7dc946716e5fc51340e143442ce45bff8c948525 Mon Sep 17 00:00:00 2001 From: lloretgalleg Date: Mon, 8 Feb 2021 11:49:50 +0000 Subject: [PATCH 03/35] Fix bug 1432: No support for alternative images for sol006 Change-Id: Icc4a9871c0ae44fc516b6c60fd6e0a641324fcdd Signed-off-by: lloretgalleg --- osm_lcm/ns.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 5131f9f..e08e2d6 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -783,8 +783,23 @@ class NsLcm(LcmBase): ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])] if target_vim not in ns_flavor["vim_info"]: ns_flavor["vim_info"][target_vim] = {} - # image - ns_image = target["image"][int(vdur["ns-image-id"])] + + # deal with images + # in case alternative images are provided we must check if they should be applied + # for the vim_type, modify the vim_type taking into account + ns_image_id = int(vdur["ns-image-id"]) + if vdur.get("alt-image-ids"): + db_vim = get_vim_account(vnfr["vim-account-id"]) + vim_type = db_vim["vim_type"] + for alt_image_id in vdur.get("alt-image-ids"): + ns_alt_image = target["image"][int(alt_image_id)] + if vim_type == ns_alt_image.get("vim-type"): + # must use alternative image + self.logger.debug("use alternative image id: {}".format(alt_image_id)) + ns_image_id = alt_image_id + vdur["ns-image-id"] = ns_image_id + break + ns_image = target["image"][int(ns_image_id)] if target_vim not in ns_image["vim_info"]: ns_image["vim_info"][target_vim] = {} -- GitLab From 0cfe5b860e471c179a19367b8773836197f0747b Mon Sep 17 00:00:00 2001 From: garciaale Date: Tue, 16 Feb 2021 15:47:52 -0300 Subject: [PATCH 04/35] Updates LCM test descriptors to SOL006 3.3.1 Change-Id: Ic86d62bafe9fa4fdcaf7b750501d002851022f78 Signed-off-by: garciaale --- osm_lcm/tests/test_db_descriptors.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/osm_lcm/tests/test_db_descriptors.py b/osm_lcm/tests/test_db_descriptors.py index 080d88d..022f53c 100644 --- a/osm_lcm/tests/test_db_descriptors.py +++ b/osm_lcm/tests/test_db_descriptors.py @@ -885,9 +885,11 @@ db_vnfds_text = """ virtual-storage-desc: - id: mgmt-storage - size-of-storage: 10 + block-storage-data: + size-of-storage: 10 - id: data-storage - size-of-storage: 10 + block-storage-data: + size-of-storage: 10 sw-image-desc: - id: hackfest3-mgmt -- GitLab From e5a31bcab1fdb1b168d90307d84fe35c180797ce Mon Sep 17 00:00:00 2001 From: bravof Date: Wed, 17 Feb 2021 19:09:12 -0300 Subject: [PATCH 05/35] fix(configurations): LCM adapted for new configuration container in IM Change-Id: I5511a659e257a42d6bbdb48c495f93078bd12928 Signed-off-by: bravof --- osm_lcm/data_utils/vnfd.py | 30 ++++++---------- osm_lcm/ns.py | 42 +++++++++++----------- osm_lcm/tests/test_db_descriptors.py | 54 ++++++++++++++-------------- 3 files changed, 57 insertions(+), 69 deletions(-) diff --git a/osm_lcm/data_utils/vnfd.py b/osm_lcm/data_utils/vnfd.py index f312e57..16257db 100644 --- a/osm_lcm/data_utils/vnfd.py +++ b/osm_lcm/data_utils/vnfd.py @@ -23,7 +23,6 @@ ## from osm_lcm.data_utils import list_utils -from osm_lcm.lcm_utils import get_iterable def get_lcm_operations_configuration(vnfd): @@ -89,26 +88,17 @@ def get_vdu_profile(vnfd, vdu_profile_id): lambda vdu_profile: vdu_profile["id"] == vdu_profile_id) -def get_vnf_configuration(vnfd): - if "vnf-configuration-id" not in vnfd.get("df")[0]: +def get_configuration(vnfd, entity_id): + lcm_ops_config = vnfd.get("df")[0].get("lcm-operations-configuration") + if not lcm_ops_config: return None - vnf_config_id = vnfd.get("df")[0]["vnf-configuration-id"] - return list_utils.find_in_list( - vnfd.get("vnf-configuration", {}), - lambda vnf_config: vnf_config["id"] == vnf_config_id) - - -def get_vdu_configuration(vnfd, vdu_id): - vdu_profile = get_vdu_profile(vnfd, vdu_id) - return list_utils.find_in_list( - vnfd.get("vdu-configuration", ()), - lambda vdu_configuration: vdu_configuration["id"] == vdu_profile["vdu-configuration-id"]) - - -def get_kdu_configuration(vnfd, kdu_name): - for kdu in get_iterable(vnfd, "kdu"): - if kdu_name == kdu["name"]: - return kdu.get("kdu-configuration") + ops_vnf = lcm_ops_config.get("operate-vnf-op-config") + if not ops_vnf: + return None + day12ops = ops_vnf.get("day1-2", []) + list_utils.find_in_list( + day12ops, + lambda configuration: configuration["id"] == entity_id) def get_virtual_link_profiles(vnfd): diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index e08e2d6..81fdee5 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -28,9 +28,9 @@ from osm_lcm import ROclient from osm_lcm.ng_ro import NgRoClient, NgRoException from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict from osm_lcm.data_utils.nsd import get_vnf_profiles -from osm_lcm.data_utils.vnfd import get_vnf_configuration, get_vdu_list, get_vdu_profile, \ +from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \ get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \ - get_kdu_list, get_virtual_link_profiles, get_vdu, get_vdu_configuration, get_kdu_configuration, \ + get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \ get_vdu_index, get_scaling_aspect, get_number_of_instances from osm_lcm.data_utils.list_utils import find_in_list from osm_lcm.data_utils.vnfr import get_osm_params @@ -338,7 +338,6 @@ class NsLcm(LcmBase): # remove unused by RO configuration, monitoring, scaling and internal keys vnfd_RO.pop("_id", None) vnfd_RO.pop("_admin", None) - vnfd_RO.pop("vnf-configuration", None) vnfd_RO.pop("monitoring-param", None) vnfd_RO.pop("scaling-group-descriptor", None) vnfd_RO.pop("kdu", None) @@ -742,8 +741,8 @@ class NsLcm(LcmBase): self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all)) if ssh_keys_all: - vdu_configuration = get_vdu_configuration(vnfd, vdur["vdu-id-ref"]) - vnf_configuration = get_vnf_configuration(vnfd) + vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"]) + vnf_configuration = get_configuration(vnfd, vnfd["id"]) if vdu_configuration and vdu_configuration.get("config-access") and \ vdu_configuration.get("config-access").get("ssh-access"): vdur["ssh-keys"] = ssh_keys_all @@ -1795,7 +1794,7 @@ class NsLcm(LcmBase): if db_vnfr.get("additionalParamsForVnf"): deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())) - descriptor_config = get_vnf_configuration(vnfd) + descriptor_config = get_configuration(vnfd, vnfd["id"]) if descriptor_config: self._deploy_n2vc( logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), @@ -1820,7 +1819,7 @@ class NsLcm(LcmBase): # Deploy charms for each VDU that supports one. for vdud in get_vdu_list(vnfd): vdu_id = vdud["id"] - descriptor_config = get_vdu_configuration(vnfd, vdu_id) + descriptor_config = get_configuration(vnfd, vdu_id) vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id) if vdur.get("additionalParams"): @@ -1859,7 +1858,7 @@ class NsLcm(LcmBase): ) for kdud in get_kdu_list(vnfd): kdu_name = kdud["name"] - descriptor_config = kdud.get('kdu-configuration') + descriptor_config = get_configuration(vnfd, kdu_name) if descriptor_config: vdu_id = None vdu_index = 0 @@ -2050,7 +2049,7 @@ class NsLcm(LcmBase): if db_vnfd_list: for vnfd in db_vnfd_list: db_vnfd = self.db.get_one("vnfds", {"_id": vnfd}) - db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation')) + db_vnf_relations = get_configuration(db_vnfd, db_vnfd["id"]).get("relation", []) if db_vnf_relations: for r in db_vnf_relations: # check if this VCA is in the relation @@ -3036,16 +3035,13 @@ class NsLcm(LcmBase): config_descriptor = db_nsr.get("ns-configuration") elif vca.get("vdu_id"): db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] - vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None) - if vdud: - config_descriptor = vdud.get("vdu-configuration") + config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id")) elif vca.get("kdu_name"): db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] - kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None) - if kdud: - config_descriptor = kdud.get("kdu-configuration") + config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name")) else: - config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration") + db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] + config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) vca_type = vca.get("type") exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and vca.get("needed_terminate")) @@ -3424,11 +3420,11 @@ class NsLcm(LcmBase): # look for primitive config_primitive_desc = descriptor_configuration = None if vdu_id: - descriptor_configuration = get_vdu_configuration(db_vnfd, vdu_id) + descriptor_configuration = get_configuration(db_vnfd, vdu_id) elif kdu_name: - descriptor_configuration = get_kdu_configuration(db_vnfd, kdu_name) + descriptor_configuration = get_configuration(db_vnfd, kdu_name) elif vnf_index: - descriptor_configuration = get_vnf_configuration(db_vnfd) + descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"]) else: descriptor_configuration = db_nsd.get("ns-configuration") @@ -3459,8 +3455,8 @@ class NsLcm(LcmBase): desc_params = parse_yaml_strings(db_vnfr.get("additionalParamsForVnf")) else: desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs")) - if kdu_name and get_kdu_configuration(db_vnfd, kdu_name): - kdu_configuration = get_kdu_configuration(db_vnfd, kdu_name) + if kdu_name and get_configuration(db_vnfd, kdu_name): + kdu_configuration = get_configuration(db_vnfd, kdu_name) actions = set() for primitive in kdu_configuration.get("initial-config-primitive", []): actions.add(primitive["name"]) @@ -3812,7 +3808,9 @@ class NsLcm(LcmBase): "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive) # look for primitive - for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()): + for config_primitive in (get_configuration( + db_vnfd, db_vnfd["id"] + ) or {}).get("config-primitive", ()): if config_primitive["name"] == vnf_config_primitive: break else: diff --git a/osm_lcm/tests/test_db_descriptors.py b/osm_lcm/tests/test_db_descriptors.py index 022f53c..be420c7 100644 --- a/osm_lcm/tests/test_db_descriptors.py +++ b/osm_lcm/tests/test_db_descriptors.py @@ -955,14 +955,12 @@ db_vnfds_text = """ df: - id: hackfest_default - vnf-configuration-id: vnf-configuration-example vdu-profile: - id: mgmtVM min-number-of-instances: 1 - id: dataVM min-number-of-instances: 1 max-number-of-instances: 10 - vdu-configuration-id: vdu-configuration-example instantiation-level: - id: default vdu-level: @@ -998,32 +996,34 @@ db_vnfds_text = """ vnf-config-primitive-name-ref: touch - trigger: pre-scale-in vnf-config-primitive-name-ref: touch + lcm-operations-configuration: + operate-vnf-op-config: + day1-2: + - id: hackfest3charmed-vnf + initial-config-primitive: + - seq: "1" + name: config + parameter: + - name: ssh-hostname + value: + - name: ssh-username + value: ubuntu + - name: ssh-password + value: osm4u + - seq: "2" + name: touch + parameter: + - name: filename + value: + config-primitive: + - name: touch + parameter: + - data-type: STRING + default-value: + name: filename + juju: + charm: simple - vnf-configuration: - - id: vnf-configuration-example - initial-config-primitive: - - seq: "1" - name: config - parameter: - - name: ssh-hostname - value: - - name: ssh-username - value: ubuntu - - name: ssh-password - value: osm4u - - seq: "2" - name: touch - parameter: - - name: filename - value: - config-primitive: - - name: touch - parameter: - - data-type: STRING - default-value: - name: filename - juju: - charm: simple - _admin: created: 1575031727.5383403 modified: 1575031727.5383403 -- GitLab From a5ae90b046fc9760c542832e4543d4c4790f869a Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Fri, 12 Feb 2021 11:26:46 +0000 Subject: [PATCH 06/35] Fix bug 1442: set SDN target in vim_info for NS VLD Change-Id: Ie4ed89c261604c74a504adbed8482036b7db4951 Signed-off-by: garciadeblas --- osm_lcm/ns.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 81fdee5..f5bdff3 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -645,10 +645,13 @@ class NsLcm(LcmBase): } # check if this network needs SDN assist if vld.get("pci-interfaces"): - db_vim = VimAccountDB.get_vim_account_with_id(target_vld["vim_info"][0]["vim_account_id"]) + db_vim = get_vim_account(ns_params["vimAccountId"]) sdnc_id = db_vim["config"].get("sdn-controller") if sdnc_id: - target_vld["vim_info"].append({"sdnc_id": sdnc_id}) + sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"]) + target_sdn = "sdn:{}".format(sdnc_id) + target_vld["vim_info"][target_sdn] = { + "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")} nsd_vnf_profiles = get_vnf_profiles(nsd) for nsd_vnf_profile in nsd_vnf_profiles: -- GitLab From 9a256dbc33676a12665c8a77e1e22154d34eab4b Mon Sep 17 00:00:00 2001 From: bravof Date: Mon, 22 Feb 2021 18:02:07 -0300 Subject: [PATCH 07/35] fix(configuration): juju related changes to honor descriptor changes introduced in v8 and fixes for new configuration model Change-Id: Ib5131a94b944c1aed985c2a612cf2b5d871673de Signed-off-by: bravof --- osm_lcm/data_utils/vnfd.py | 10 ++++++++++ osm_lcm/ns.py | 17 +++++++++-------- osm_lcm/tests/test_db_descriptors.py | 11 +++++++++-- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/osm_lcm/data_utils/vnfd.py b/osm_lcm/data_utils/vnfd.py index 16257db..f816a8d 100644 --- a/osm_lcm/data_utils/vnfd.py +++ b/osm_lcm/data_utils/vnfd.py @@ -139,3 +139,13 @@ def get_number_of_instances(vnfd, vdu_id): ), lambda a_vdu: a_vdu["vdu-id"] == vdu_id )["number-of-instances"] + + +def get_juju_ee_ref(vnfd, entity_id): + return list_utils.find_in_list( + get_configuration(vnfd, entity_id).get( + "execution-environment-list", + [] + ), + lambda ee: "juju" in ee + ) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index f5bdff3..4077d01 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -31,7 +31,7 @@ from osm_lcm.data_utils.nsd import get_vnf_profiles from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \ get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \ get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \ - get_vdu_index, get_scaling_aspect, get_number_of_instances + get_vdu_index, get_scaling_aspect, get_number_of_instances, get_juju_ee_ref from osm_lcm.data_utils.list_utils import find_in_list from osm_lcm.data_utils.vnfr import get_osm_params from osm_lcm.data_utils.dict_utils import parse_yaml_strings @@ -2237,8 +2237,9 @@ class NsLcm(LcmBase): vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY" self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict) - kdu_config = kdud.get("kdu-configuration") - if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None: + kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"]) + if kdu_config and kdu_config.get("initial-config-primitive") and \ + get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None: initial_config_primitive_list = kdu_config.get("initial-config-primitive") initial_config_primitive_list.sort(key=lambda val: int(val["seq"])) @@ -2427,10 +2428,8 @@ class NsLcm(LcmBase): # fill db_nsr._admin.deployed.VCA. self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)) - if descriptor_config.get("juju"): # There is one execution envioronment of type juju - ee_list = [descriptor_config] - elif descriptor_config.get("execution-environment-list"): - ee_list = descriptor_config.get("execution-environment-list") + if "execution-environment-list" in descriptor_config: + ee_list = descriptor_config.get("execution-environment-list", []) else: # other types as script are not supported ee_list = [] @@ -3908,7 +3907,9 @@ class NsLcm(LcmBase): vnfr_params.update(db_vnfr["additionalParamsForVnf"]) # look for primitive - for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()): + for config_primitive in ( + get_configuration(db_vnfd, db_vnfd["id"]) or {} + ).get("config-primitive", ()): if config_primitive["name"] == vnf_config_primitive: break else: diff --git a/osm_lcm/tests/test_db_descriptors.py b/osm_lcm/tests/test_db_descriptors.py index be420c7..6b4be6a 100644 --- a/osm_lcm/tests/test_db_descriptors.py +++ b/osm_lcm/tests/test_db_descriptors.py @@ -1000,8 +1000,13 @@ db_vnfds_text = """ operate-vnf-op-config: day1-2: - id: hackfest3charmed-vnf + execution-environment-list: + - id: simple-ee + juju: + charm: simple initial-config-primitive: - seq: "1" + execution-environment-ref: simple-ee name: config parameter: - name: ssh-hostname @@ -1011,18 +1016,18 @@ db_vnfds_text = """ - name: ssh-password value: osm4u - seq: "2" + execution-environment-ref: simple-ee name: touch parameter: - name: filename value: config-primitive: - name: touch + execution-environment-ref: simple-ee parameter: - data-type: STRING default-value: name: filename - juju: - charm: simple - _admin: created: 1575031727.5383403 @@ -1047,6 +1052,8 @@ db_vnfds_text = """ - name: mgmt description: KNF with two KDU using helm-charts id: multikdu_knf + df: + - id: "default_df" k8s-cluster: nets: - external-connection-point-ref: mgmt -- GitLab From 132515638cd925a0d070fbf5b08f21ae7ddce05c Mon Sep 17 00:00:00 2001 From: aktas Date: Fri, 12 Feb 2021 22:19:10 +0300 Subject: [PATCH 08/35] Bug 585 Fix for scaling This fix should be merged with this https://osm.etsi.org/gerrit/c/osm/N2VC/+/10364 Change-Id: I43fb4e5c81dbbaed07f01ba1a3ba399f7425b347 Signed-off-by: aktas --- osm_lcm/data_utils/vnfd.py | 4 +- osm_lcm/data_utils/vnfr.py | 9 ++ osm_lcm/ns.py | 227 +++++++++++++++++++++++++++++++++++-- 3 files changed, 226 insertions(+), 14 deletions(-) diff --git a/osm_lcm/data_utils/vnfd.py b/osm_lcm/data_utils/vnfd.py index f816a8d..1b45b53 100644 --- a/osm_lcm/data_utils/vnfd.py +++ b/osm_lcm/data_utils/vnfd.py @@ -96,7 +96,7 @@ def get_configuration(vnfd, entity_id): if not ops_vnf: return None day12ops = ops_vnf.get("day1-2", []) - list_utils.find_in_list( + return list_utils.find_in_list( day12ops, lambda configuration: configuration["id"] == entity_id) @@ -138,7 +138,7 @@ def get_number_of_instances(vnfd, vdu_id): () ), lambda a_vdu: a_vdu["vdu-id"] == vdu_id - )["number-of-instances"] + ).get("number-of-instances", 1) def get_juju_ee_ref(vnfd, entity_id): diff --git a/osm_lcm/data_utils/vnfr.py b/osm_lcm/data_utils/vnfr.py index 042788e..9c0b148 100644 --- a/osm_lcm/data_utils/vnfr.py +++ b/osm_lcm/data_utils/vnfr.py @@ -23,6 +23,7 @@ ## from osm_lcm.data_utils import list_utils +from osm_lcm.lcm_utils import get_iterable def find_VNFR_by_VDU_ID(vnfr, vdu_id): @@ -57,3 +58,11 @@ def get_osm_params(db_vnfr, vdu_id=None, vdu_count_index=0): osm_params["vdu_id"] = vdu_id osm_params["count_index"] = vdu_count_index return osm_params + + +def get_vdur_index(db_vnfr, vdu_delta): + vdur_list = get_iterable(db_vnfr, "vdur") + if vdur_list: + return len([x for x in vdur_list if x.get("vdu-id-ref") == vdu_delta["id"]]) + else: + return 0 diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 4077d01..e5b1060 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -33,7 +33,7 @@ from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \ get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \ get_vdu_index, get_scaling_aspect, get_number_of_instances, get_juju_ee_ref from osm_lcm.data_utils.list_utils import find_in_list -from osm_lcm.data_utils.vnfr import get_osm_params +from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index from osm_lcm.data_utils.dict_utils import parse_yaml_strings from osm_lcm.data_utils.database.vim_account import VimAccountDB from n2vc.k8s_helm_conn import K8sHelmConnector @@ -1163,14 +1163,14 @@ class NsLcm(LcmBase): if vnfr_id: element_type = 'VNF' element_under_configuration = vnfr_id - namespace += ".{}".format(vnfr_id) + namespace += ".{}-{}".format(vnfr_id, vdu_index or 0) if vdu_id: namespace += ".{}-{}".format(vdu_id, vdu_index or 0) element_type = 'VDU' element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0) osm_config["osm"]["vdu_id"] = vdu_id elif kdu_name: - namespace += ".{}".format(kdu_name) + namespace += ".{}.{}".format(kdu_name, vdu_index or 0) element_type = 'KDU' element_under_configuration = kdu_name osm_config["osm"]["kdu_name"] = kdu_name @@ -2734,7 +2734,7 @@ class NsLcm(LcmBase): return vca["ee_id"] async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor, - vca_index, destroy_ee=True, exec_primitives=True): + vca_index, destroy_ee=True, exec_primitives=True, scaling_in=False): """ Execute the terminate primitives and destroy the execution environment (if destroy_ee=False :param logging_text: @@ -2745,6 +2745,7 @@ class NsLcm(LcmBase): :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has not executed properly + :param scaling_in: True destroys the application, False destroys the model :return: None or exception """ @@ -2802,7 +2803,7 @@ class NsLcm(LcmBase): await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"]) if destroy_ee: - await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"]) + await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"], scaling_in=scaling_in) async def _delete_all_N2VC(self, db_nsr: dict): self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING') @@ -3043,7 +3044,7 @@ class NsLcm(LcmBase): config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name")) else: db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] - config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) + config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) vca_type = vca.get("type") exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and vca.get("needed_terminate")) @@ -3608,6 +3609,7 @@ class NsLcm(LcmBase): logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id) stage = ['', '', ''] + tasks_dict_info = {} # ^ stage, step, VIM progress self.logger.debug(logging_text + "Enter") # get all needed from database @@ -3619,6 +3621,7 @@ class NsLcm(LcmBase): scale_process = None old_operational_status = "" old_config_status = "" + nsi_id = None try: # wait for any previous tasks in process step = "Waiting for previous operations to terminate" @@ -3663,6 +3666,8 @@ class NsLcm(LcmBase): step = "Getting vnfd from database" db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) + base_folder = db_vnfd["_admin"]["storage"] + step = "Getting scaling-group-descriptor" scaling_descriptor = find_in_list( get_scaling_aspect( @@ -3689,6 +3694,7 @@ class NsLcm(LcmBase): admin_scale_index += 1 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group RO_scaling_info = [] + VCA_scaling_info = [] vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []} if scaling_type == "SCALE_OUT": if "aspect-delta-details" not in scaling_descriptor: @@ -3705,7 +3711,7 @@ class NsLcm(LcmBase): for delta in deltas: for vdu_delta in delta["vdu-delta"]: vdud = get_vdu(db_vnfd, vdu_delta["id"]) - vdu_index = get_vdu_index(db_vnfr, vdu_delta["id"]) + vdu_index = get_vdur_index(db_vnfr, vdu_delta) cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd) if cloud_init_text: additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {} @@ -3716,11 +3722,11 @@ class NsLcm(LcmBase): if vdu_profile and "max-number-of-instances" in vdu_profile: max_instance_count = vdu_profile.get("max-number-of-instances", 10) - deafult_instance_num = get_number_of_instances(db_vnfd, vdud["id"]) + default_instance_num = get_number_of_instances(db_vnfd, vdud["id"]) nb_scale_op += vdu_delta.get("number-of-instances", 1) - if nb_scale_op + deafult_instance_num > max_instance_count: + if nb_scale_op + default_instance_num > max_instance_count: raise LcmException( "reached the limit of {} (max-instance-count) " "scaling-out operations for the " @@ -3742,6 +3748,14 @@ class NsLcm(LcmBase): vdud["id"] ) ) + VCA_scaling_info.append( + { + "osm_vdu_id": vdu_delta["id"], + "member-vnf-index": vnf_index, + "type": "create", + "vdu_index": vdu_index + x + } + ) RO_scaling_info.append( { "osm_vdu_id": vdu_delta["id"], @@ -3763,21 +3777,32 @@ class NsLcm(LcmBase): deltas = scaling_descriptor.get("aspect-delta-details")["deltas"] for delta in deltas: for vdu_delta in delta["vdu-delta"]: + vdu_index = get_vdur_index(db_vnfr, vdu_delta) min_instance_count = 0 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"]) if vdu_profile and "min-number-of-instances" in vdu_profile: min_instance_count = vdu_profile["min-number-of-instances"] - deafult_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"]) + default_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"]) nb_scale_op -= vdu_delta.get("number-of-instances", 1) - if nb_scale_op + deafult_instance_num < min_instance_count: + if nb_scale_op + default_instance_num < min_instance_count: raise LcmException( "reached the limit of {} (min-instance-count) scaling-in operations for the " "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group) ) RO_scaling_info.append({"osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, - "type": "delete", "count": vdu_delta.get("number-of-instances", 1)}) + "type": "delete", "count": vdu_delta.get("number-of-instances", 1), + "vdu_index": vdu_index - 1}) + for x in range(vdu_delta.get("number-of-instances", 1)): + VCA_scaling_info.append( + { + "osm_vdu_id": vdu_delta["id"], + "member-vnf-index": vnf_index, + "type": "delete", + "vdu_index": vdu_index - 1 - x + } + ) vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1) # update VDU_SCALING_INFO with the VDUs to delete ip_addresses @@ -3879,6 +3904,68 @@ class NsLcm(LcmBase): db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time() + # SCALE-IN VCA - BEGIN + if VCA_scaling_info: + step = db_nslcmop_update["detailed-status"] = \ + "Deleting the execution environments" + scale_process = "VCA" + for vdu_info in VCA_scaling_info: + if vdu_info["type"] == "delete": + member_vnf_index = str(vdu_info["member-vnf-index"]) + self.logger.debug(logging_text + "vdu info: {}".format(vdu_info)) + vdu_id = vdu_info["osm_vdu_id"] + vdu_index = int(vdu_info["vdu_index"]) + stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index) + stage[2] = step = "Scaling in VCA" + self._write_op_status( + op_id=nslcmop_id, + stage=stage + ) + vca_update = db_nsr["_admin"]["deployed"]["VCA"] + config_update = db_nsr["configurationStatus"] + for vca_index, vca in enumerate(vca_update): + if (vca or vca.get("ee_id")) and vca["member-vnf-index"] == member_vnf_index and \ + vca["vdu_count_index"] == vdu_index: + if vca.get("vdu_id"): + config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id")) + elif vca.get("kdu_name"): + config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name")) + else: + config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) + operation_params = db_nslcmop.get("operationParams") or {} + exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and + vca.get("needed_terminate")) + task = asyncio.ensure_future(asyncio.wait_for( + self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, + vca_index, destroy_ee=True, + exec_primitives=exec_terminate_primitives, + scaling_in=True), timeout=self.timeout_charm_delete)) + # wait before next removal + await asyncio.sleep(30) + tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id")) + del vca_update[vca_index] + del config_update[vca_index] + # wait for pending tasks of terminate primitives + if tasks_dict_info: + self.logger.debug(logging_text + + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys()))) + error_list = await self._wait_for_tasks(logging_text, tasks_dict_info, + min(self.timeout_charm_delete, + self.timeout_ns_terminate), + stage, nslcmop_id) + tasks_dict_info.clear() + if error_list: + raise LcmException("; ".join(error_list)) + + db_vca_and_config_update = { + "_admin.deployed.VCA": vca_update, + "configurationStatus": config_update + } + self.update_db_2("nsrs", db_nsr["_id"], db_vca_and_config_update) + scale_process = None + # SCALE-IN VCA - END + # SCALE RO - BEGIN if RO_scaling_info: scale_process = "RO" @@ -3890,6 +3977,117 @@ class NsLcm(LcmBase): scale_process = None if db_nsr_update: self.update_db_2("nsrs", nsr_id, db_nsr_update) + # SCALE RO - END + + # SCALE-UP VCA - BEGIN + if VCA_scaling_info: + step = db_nslcmop_update["detailed-status"] = \ + "Creating new execution environments" + scale_process = "VCA" + for vdu_info in VCA_scaling_info: + if vdu_info["type"] == "create": + member_vnf_index = str(vdu_info["member-vnf-index"]) + self.logger.debug(logging_text + "vdu info: {}".format(vdu_info)) + vnfd_id = db_vnfr["vnfd-ref"] + vdu_index = int(vdu_info["vdu_index"]) + deploy_params = {"OSM": get_osm_params(db_vnfr)} + if db_vnfr.get("additionalParamsForVnf"): + deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())) + descriptor_config = get_configuration(db_vnfd, db_vnfd["id"]) + if descriptor_config: + vdu_id = None + vdu_name = None + kdu_name = None + self._deploy_n2vc( + logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage + ) + vdu_id = vdu_info["osm_vdu_id"] + vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id) + descriptor_config = get_configuration(db_vnfd, vdu_id) + if vdur.get("additionalParams"): + deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"]) + else: + deploy_params_vdu = deploy_params + deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=vdu_index) + if descriptor_config: + vdu_name = None + kdu_name = None + stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index) + stage[2] = step = "Scaling out VCA" + self._write_op_status( + op_id=nslcmop_id, + stage=stage + ) + self._deploy_n2vc( + logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index), + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params_vdu, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage + ) + # TODO: scaling for kdu is not implemented yet. + kdu_name = vdu_info["osm_vdu_id"] + descriptor_config = get_configuration(db_vnfd, kdu_name) + if descriptor_config: + vdu_id = None + vdu_index = vdu_index + vdu_name = None + kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name) + deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} + if kdur.get("additionalParams"): + deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"]) + + self._deploy_n2vc( + logging_text=logging_text, + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params_kdu, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage + ) + # SCALE-UP VCA - END + scale_process = None # POST-SCALE BEGIN # execute primitive service POST-SCALING @@ -3984,6 +4182,11 @@ class NsLcm(LcmBase): self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True) finally: self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", current_operation_id=None) + if tasks_dict_info: + stage[1] = "Waiting for instantiate pending tasks." + self.logger.debug(logging_text + stage[1]) + exc = await self._wait_for_tasks(logging_text, tasks_dict_info, self.timeout_ns_deploy, + stage, nslcmop_id, nsr_id=nsr_id) if exc: db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc) nslcmop_operation_state = "FAILED" -- GitLab From d64e274c9164f8b57d08df5b80d516eb69def066 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Thu, 25 Feb 2021 20:19:18 +0100 Subject: [PATCH 09/35] Fix bug 1412: Generate kdu instance from LCM Change-Id: Ic8fbcd1d78eeef60ec0c943bcd233f8d8ee4f166 Signed-off-by: David Garcia --- osm_lcm/lcm_helm_conn.py | 30 +++++++++++++++++++---------- osm_lcm/ns.py | 13 ++++++++++--- osm_lcm/tests/test_lcm_helm_conn.py | 8 +++++++- osm_lcm/tests/test_ns.py | 4 +++- 4 files changed, 40 insertions(+), 15 deletions(-) diff --git a/osm_lcm/lcm_helm_conn.py b/osm_lcm/lcm_helm_conn.py index 27c330f..978b061 100644 --- a/osm_lcm/lcm_helm_conn.py +++ b/osm_lcm/lcm_helm_conn.py @@ -233,17 +233,27 @@ class LCMHelmConn(N2VCConnector, LcmBase): self.log.debug("install helm chart: {}".format(full_path)) if vca_type == "helm": - helm_id = await self._k8sclusterhelm2.install(system_cluster_uuid, kdu_model=full_path, - namespace=self._KUBECTL_OSM_NAMESPACE, - params=config, - db_dict=db_dict, - timeout=progress_timeout) + helm_id = self._k8sclusterhelm2.generate_kdu_instance_name( + db_dict=db_dict, + kdu_model=full_path, + ) + await self._k8sclusterhelm2.install(system_cluster_uuid, kdu_model=full_path, + kdu_instance=helm_id, + namespace=self._KUBECTL_OSM_NAMESPACE, + params=config, + db_dict=db_dict, + timeout=progress_timeout) else: - helm_id = await self._k8sclusterhelm3.install(system_cluster_uuid, kdu_model=full_path, - namespace=self._KUBECTL_OSM_NAMESPACE, - params=config, - db_dict=db_dict, - timeout=progress_timeout) + helm_id = self._k8sclusterhelm2.generate_kdu_instance_name( + db_dict=db_dict, + kdu_model=full_path, + ) + await self._k8sclusterhelm3.install(system_cluster_uuid, kdu_model=full_path, + kdu_instance=helm_id, + namespace=self._KUBECTL_OSM_NAMESPACE, + params=config, + db_dict=db_dict, + timeout=progress_timeout) ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id) return ee_id, None diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index e5b1060..75db8e5 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -1232,7 +1232,7 @@ class NsLcm(LcmBase): cloud_name=vca_k8s_cloud, credential_name=vca_k8s_cloud_credential, ) - elif vca_type == "helm" or vca_type == "helm-v3": + elif vca_type == "helm" or vca_type == "helm-v3": ee_id, credentials = await self.vca_map[vca_type].create_execution_environment( namespace=namespace, reuse_ee_id=ee_id, @@ -2192,7 +2192,12 @@ class NsLcm(LcmBase): "filter": {"_id": nsr_id}, "path": nsr_db_path} - kdu_instance = await self.k8scluster_map[k8sclustertype].install( + kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name( + db_dict=db_dict_install, + kdu_model=k8s_instance_info["kdu-model"], + ) + self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) + await self.k8scluster_map[k8sclustertype].install( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_model=k8s_instance_info["kdu-model"], atomic=True, @@ -2200,7 +2205,9 @@ class NsLcm(LcmBase): db_dict=db_dict_install, timeout=timeout, kdu_name=k8s_instance_info["kdu-name"], - namespace=k8s_instance_info["namespace"]) + namespace=k8s_instance_info["namespace"], + kdu_instance=kdu_instance, + ) self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) # Obtain services to obtain management service ip diff --git a/osm_lcm/tests/test_lcm_helm_conn.py b/osm_lcm/tests/test_lcm_helm_conn.py index db9f47d..47838b3 100644 --- a/osm_lcm/tests/test_lcm_helm_conn.py +++ b/osm_lcm/tests/test_lcm_helm_conn.py @@ -67,7 +67,12 @@ class TestLcmHelmConn(asynctest.TestCase): db_dict = {} artifact_path = "helm_sample_charm" helm_chart_id = "helm_sample_charm_0001" - self.helm_conn._k8sclusterhelm3.install = asynctest.CoroutineMock(return_value=helm_chart_id) + self.helm_conn._k8sclusterhelm3.install = asynctest.CoroutineMock(return_value=None) + self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name = Mock() + self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name.return_value = helm_chart_id + self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name = Mock() + self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name.return_value = helm_chart_id + self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}} ee_id, _ = await self.helm_conn.create_execution_environment(namespace, db_dict, @@ -77,6 +82,7 @@ class TestLcmHelmConn(asynctest.TestCase): "Check ee_id format: :.") self.helm_conn._k8sclusterhelm3.install.assert_called_once_with("myk8s_id", kdu_model="/helm_sample_charm", + kdu_instance=helm_chart_id, namespace="osm", db_dict=db_dict, params=None, timeout=None) diff --git a/osm_lcm/tests/test_ns.py b/osm_lcm/tests/test_ns.py index 6a609dc..c0dda15 100644 --- a/osm_lcm/tests/test_ns.py +++ b/osm_lcm/tests/test_ns.py @@ -532,7 +532,9 @@ class TestMyNS(asynctest.TestCase): db_vnfds = [db_vnfd] task_register = {} logging_text = "KDU" - self.my_ns.k8sclusterhelm3.install = asynctest.CoroutineMock(return_value="k8s_id") + self.my_ns.k8sclusterhelm3.generate_kdu_instance_name = asynctest.mock.Mock() + self.my_ns.k8sclusterhelm3.generate_kdu_instance_name.return_value = "k8s_id" + self.my_ns.k8sclusterhelm3.install = asynctest.CoroutineMock() self.my_ns.k8sclusterhelm3.synchronize_repos = asynctest.CoroutineMock(return_value=("", "")) self.my_ns.k8sclusterhelm3.get_services = asynctest.CoroutineMock(return_value=([])) await self.my_ns.deploy_kdus(logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_register) -- GitLab From 04694c678958696bbdb0a09b43e679b2548acf0e Mon Sep 17 00:00:00 2001 From: garciaale Date: Tue, 2 Mar 2021 10:49:28 -0300 Subject: [PATCH 10/35] Fixes 1440 with proper iteration of dict Change-Id: If1cdf143aaebb8c5246a8cae6cc6aff239ab0637 Signed-off-by: garciaale --- osm_lcm/ns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 75db8e5..a006f8e 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -595,7 +595,7 @@ class NsLcm(LcmBase): for param in ("vim-network-name", "vim-network-id"): if vld_params.get(param): if isinstance(vld_params[param], dict): - for vim, vim_net in vld_params[param]: + for vim, vim_net in vld_params[param].items(): other_target_vim = "vim:" + vim populate_dict(target_vld["vim_info"], (other_target_vim, param.replace("-", "_")), vim_net) else: # isinstance str -- GitLab From c43253de0f53c4bbd3f4b67c6d57c6efc437cd7a Mon Sep 17 00:00:00 2001 From: David Garcia Date: Thu, 4 Mar 2021 13:12:48 +0100 Subject: [PATCH 11/35] Fix minor issue in LCMHelmConn scaling_in argument has been added to the delete_execution_environment for the juju connector. This patch adds kwargs to the helm connector. The scaling_in is not currently used by helm. Change-Id: I4723d6f194cdd64a4b3bbef348457fb6c2a1e47c Signed-off-by: David Garcia --- osm_lcm/lcm_helm_conn.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/osm_lcm/lcm_helm_conn.py b/osm_lcm/lcm_helm_conn.py index 978b061..34e4915 100644 --- a/osm_lcm/lcm_helm_conn.py +++ b/osm_lcm/lcm_helm_conn.py @@ -434,7 +434,13 @@ class LCMHelmConn(N2VCConnector, LcmBase): # nothing to be done pass - async def delete_execution_environment(self, ee_id: str, db_dict: dict = None, total_timeout: float = None): + async def delete_execution_environment( + self, + ee_id: str, + db_dict: dict = None, + total_timeout: float = None, + **kwargs, + ): """ Delete an execution environment :param str ee_id: id of the execution environment to delete, included namespace.helm_id -- GitLab From 6d9a7511c261e9da05766aa32dd635462f762c64 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 5 Mar 2021 15:01:54 +0100 Subject: [PATCH 12/35] Add missing parameter to generate_kdu_instance_name In the commit c4da25cc that fixes 1412 I forgot to add the kdu_name variable. Change-Id: I12697d66e590660fe27b3a5abd32634b509a4fbd Signed-off-by: David Garcia --- osm_lcm/ns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index a006f8e..ca85661 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -2195,6 +2195,7 @@ class NsLcm(LcmBase): kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name( db_dict=db_dict_install, kdu_model=k8s_instance_info["kdu-model"], + kdu_name=k8s_instance_info["kdu-name"], ) self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) await self.k8scluster_map[k8sclustertype].install( -- GitLab From 625e025c3062278a5684762f5042df9e4e944ac5 Mon Sep 17 00:00:00 2001 From: limon Date: Mon, 15 Mar 2021 09:39:43 +0100 Subject: [PATCH 13/35] Fix Pre and Post Scale operations Change-Id: If865dec846b0d28e31f272d512cb47ad62b2948a Signed-off-by: limon --- osm_lcm/ns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index ca85661..7f73c19 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -3896,7 +3896,7 @@ class NsLcm(LcmBase): vdu_count_index=None, ee_descriptor_id=ee_descriptor_id) result, result_detail = await self._ns_execute_primitive( - ee_id, primitive_name, primitive_params, vca_type) + ee_id, primitive_name, primitive_params, vca_type=vca_type) self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format( vnf_config_primitive, result, result_detail)) # Update operationState = COMPLETED | FAILED @@ -4161,7 +4161,7 @@ class NsLcm(LcmBase): vdu_count_index=None, ee_descriptor_id=ee_descriptor_id) result, result_detail = await self._ns_execute_primitive( - ee_id, primitive_name, primitive_params, vca_type) + ee_id, primitive_name, primitive_params, vca_type=vca_type) self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format( vnf_config_primitive, result, result_detail)) # Update operationState = COMPLETED | FAILED -- GitLab From 3fde2c74e06b9480af749666ac0fd25af128d9a2 Mon Sep 17 00:00:00 2001 From: ksaikiranr Date: Mon, 15 Mar 2021 10:39:06 +0530 Subject: [PATCH 14/35] Feature-9904: Enhancing NG-UI to enable Juju operational view dashboard Implemented functions to receive vcaStatus update message from kafka and update nsr record in mongo. Addressed review comments Change-Id: I54f526c74fd27328c88721002d41bd85a182f9eb Signed-off-by: Priyadharshini G S Signed-off-by: ksaikiranr --- osm_lcm/lcm.py | 7 +++++++ osm_lcm/ns.py | 19 ++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/osm_lcm/lcm.py b/osm_lcm/lcm.py index 6c8be98..659a57c 100644 --- a/osm_lcm/lcm.py +++ b/osm_lcm/lcm.py @@ -339,6 +339,13 @@ class Lcm: task = asyncio.ensure_future(self.ns.terminate(nsr_id, nslcmop_id)) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_terminate", task) return + elif command == "vca_status_refresh": + nslcmop = params + nslcmop_id = nslcmop["_id"] + nsr_id = nslcmop["nsInstanceId"] + task = asyncio.ensure_future(self.ns.vca_status_refresh(nsr_id, nslcmop_id)) + self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh", task) + return elif command == "action": # self.logger.debug("Update NS {}".format(nsr_id)) nslcmop = params diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 7f73c19..7a07321 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -208,7 +208,6 @@ class NsLcm(LcmBase): # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}' # .format(table, filter, path, updated_data)) - try: nsr_id = filter.get('_id') @@ -3372,6 +3371,24 @@ class NsLcm(LcmBase): except Exception as e: return 'FAIL', 'Error executing action {}: {}'.format(primitive, e) + async def vca_status_refresh(self, nsr_id, nslcmop_id): + """ + Updating the vca_status with latest juju information in nsrs record + :param: nsr_id: Id of the nsr + :param: nslcmop_id: Id of the nslcmop + :return: None + """ + + self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id)) + db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + + for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']): + table, filter, path = "nsrs", {"_id": nsr_id}, "_admin.deployed.VCA.{}.".format(vca_index) + await self._on_update_n2vc_db(table, filter, path, {}) + + self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id)) + self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh") + async def action(self, nsr_id, nslcmop_id): # Try to lock HA task here task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id) -- GitLab From 5b2c45aad0bbfdd4410ed3912ff6a21cea1216a9 Mon Sep 17 00:00:00 2001 From: ksaikiranr Date: Wed, 27 Jan 2021 22:13:22 +0530 Subject: [PATCH 15/35] Feature-9904: Enhancing NG-UI to enable Juju operational view dashboard Integrated functions implemented in n2vc module for actions list, configs list and executed actions/history of actions Change-Id: I25db0d9fe4535b9908883a6f2175c3e80997e7cf Signed-off-by: ksaikiranr --- osm_lcm/ns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 7a07321..a11a2fa 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -222,6 +222,7 @@ class NsLcm(LcmBase): # vcaStatus db_dict = dict() db_dict['vcaStatus'] = status_dict + await self.n2vc.update_vca_status(db_dict['vcaStatus']) # update configurationStatus for this VCA try: -- GitLab From 656b6ddc53d7b6640a7b4fad1a7b80241dce9e70 Mon Sep 17 00:00:00 2001 From: ksaikiranr Date: Fri, 19 Feb 2021 10:25:18 +0530 Subject: [PATCH 16/35] Feature-9904: Enhancing NG-UI to enable Juju operational view dashboard In ns.py file Added _on_update_k8s_db function to update KNF vcaStatus in NSR record Integrated the vca status refresh function for updating KNF status Change-Id: Ieba74e3d6d45766ad4b808665a3c62b046a42b2a Signed-off-by: jayaramans Signed-off-by: ksaikiranr --- osm_lcm/ns.py | 51 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index a11a2fa..fab078f 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -137,7 +137,7 @@ class NsLcm(LcmBase): juju_command=self.vca_config.get("jujupath"), log=self.logger, loop=self.loop, - on_update_db=None, + on_update_db=self._on_update_k8s_db, vca_config=self.vca_config, fs=self.fs, db=self.db @@ -289,6 +289,40 @@ class NsLcm(LcmBase): except Exception as e: self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e)) + async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None): + """ + Updating vca status in NSR record + :param cluster_uuid: UUID of a k8s cluster + :param kdu_instance: The unique name of the KDU instance + :param filter: To get nsr_id + :return: none + """ + + # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}" + # .format(cluster_uuid, kdu_instance, filter)) + + try: + nsr_id = filter.get('_id') + + # get vca status for NS + vca_status = await self.k8sclusterjuju.status_kdu(cluster_uuid, + kdu_instance, + complete_status=True, + yaml_format=False) + # vcaStatus + db_dict = dict() + db_dict['vcaStatus'] = {nsr_id: vca_status} + + await self.k8sclusterjuju.update_vca_status(db_dict['vcaStatus'], kdu_instance) + + # write to database + self.update_db_2("nsrs", nsr_id, db_dict) + + except (asyncio.CancelledError, asyncio.TimeoutError): + raise + except Exception as e: + self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e)) + @staticmethod def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id): try: @@ -2259,7 +2293,7 @@ class NsLcm(LcmBase): cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_instance=kdu_instance, primitive_name=initial_config_primitive["name"], - params=primitive_params_, db_dict={}), + params=primitive_params_, db_dict=db_dict_install), timeout=timeout) except Exception as e: @@ -3382,10 +3416,15 @@ class NsLcm(LcmBase): self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id)) db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) - - for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']): - table, filter, path = "nsrs", {"_id": nsr_id}, "_admin.deployed.VCA.{}.".format(vca_index) - await self._on_update_n2vc_db(table, filter, path, {}) + if db_nsr['_admin']['deployed']['K8s']: + for k8s_index, k8s in enumerate(db_nsr['_admin']['deployed']['K8s']): + cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"] + await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id}) + else: + for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']): + table, filter = "nsrs", {"_id": nsr_id} + path = "_admin.deployed.VCA.{}.".format(vca_index) + await self._on_update_n2vc_db(table, filter, path, {}) self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id)) self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh") -- GitLab From 4369058fc74040102bc7d5d41cd92b579e9027dc Mon Sep 17 00:00:00 2001 From: ksaikiranr Date: Wed, 17 Mar 2021 11:41:22 +0530 Subject: [PATCH 17/35] Feature-9904: Enhancing NG-UI to enable Juju operational view dashboard Added unit tests for vca status refresh in lcm Change-Id: Ic2009a321991526f284d88858fd6ee19a2eed960 Signed-off-by: gspri Signed-off-by: ksaikiranr --- osm_lcm/tests/test_db_descriptors.py | 442 +++++++++++++++++++++++++++ osm_lcm/tests/test_ns.py | 19 ++ 2 files changed, 461 insertions(+) diff --git a/osm_lcm/tests/test_db_descriptors.py b/osm_lcm/tests/test_db_descriptors.py index 6b4be6a..9decd4e 100644 --- a/osm_lcm/tests/test_db_descriptors.py +++ b/osm_lcm/tests/test_db_descriptors.py @@ -231,6 +231,7 @@ db_nsrs_text = """ - _admin: created: 1566823354.3716335 deployed: + K8s: [] RO: nsd_id: 876573b5-968d-40b9-b52b-91bf5c5844f7 nsr_id: c9fe9908-3180-430d-b633-fca2f68db008 @@ -287,6 +288,446 @@ db_nsrs_text = """ create-time: 1566823354.36234 datacenter: ea958ba5-4e58-4405-bf42-6e3be15d4c3a description: default description + vcaStatus: + 8c707f16-2d9b-49d6-af5e-2ce9985b2adf: + applications: + app-vnf-1fb8538dfc39: + can_upgrade_to: '' + charm: 'local:xenial/simple-1' + charm_profile: '' + charm_version: '' + endpoint_bindings: null + err: null + exposed: false + int_: null + life: '' + meter_statuses: { } + provider_id: null + public_address: '' + relations: { } + series: xenial + status: + data: { } + err: null + info: Ready! + kind: '' + life: '' + since: '2021-02-17T08:39:54.239185095Z' + status: active + unknown_fields: { } + version: '' + subordinate_to: [ ] + units: + app-vnf-1fb8538dfc39/0: + address: null + agent_status: + data: { } + err: null + info: '' + kind: '' + life: '' + since: '2021-02-17T08:52:18.077155028Z' + status: idle + unknown_fields: { } + version: 2.8.1 + charm: '' + leader: true + machine: '0' + opened_ports: null + provider_id: null + public_address: 10.151.40.53 + subordinates: { } + unknown_fields: { } + workload_status: + data: { } + err: null + info: Ready! + kind: '' + life: '' + since: '2021-02-17T08:39:54.239185095Z' + status: active + unknown_fields: { } + version: '' + workload_version: '' + unknown_fields: + charm-verion: '' + workload_version: '' + actions: + generate-ssh-key: >- + Generate a new SSH keypair for this unit. This will replace any + existing previously generated keypair. + get-ssh-public-key: Get the public SSH key for this unit. + reboot: Reboot the VNF virtual machine. + restart: Stop the service on the VNF. + run: Run an arbitrary command + start: Stop the service on the VNF. + stop: Stop the service on the VNF. + touch: Touch a file on the VNF. + upgrade: Upgrade the software on the VNF. + verify-ssh-credentials: >- + Verify that this unit can authenticate with server specified by + ssh-hostname and ssh-username. + configs: + boolean-option: + default: false + description: A short description of the configuration option + source: default + type: boolean + value: false + int-option: + default: 9001 + description: A short description of the configuration option + source: default + type: int + value: 9001 + ssh-hostname: + default: '' + description: The hostname or IP address of the machine to + source: user + type: string + value: 192.168.61.90 + ssh-key-bits: + default: 4096 + description: The number of bits to use for the SSH key. + source: default + type: int + value: 4096 + ssh-key-type: + default: rsa + description: The type of encryption to use for the SSH key. + source: default + type: string + value: rsa + ssh-password: + default: '' + description: The password used to authenticate. + source: user + type: string + value: osm4u + ssh-private-key: + default: '' + description: DEPRECATED. The private ssh key to be used to authenticate. + source: default + type: string + value: '' + ssh-public-key: + default: '' + description: The public key of this unit. + source: default + type: string + value: '' + ssh-username: + default: '' + description: The username to login as. + source: user + type: string + value: ubuntu + string-option: + default: Default Value + description: A short description of the configuration option + source: default + type: string + value: Default Value + app-vnf-943ab4274bb6: + can_upgrade_to: '' + charm: 'local:xenial/simple-0' + charm_profile: '' + charm_version: '' + endpoint_bindings: null + err: null + exposed: false + int_: null + life: '' + meter_statuses: { } + provider_id: null + public_address: '' + relations: { } + series: xenial + status: + data: { } + err: null + info: Ready! + kind: '' + life: '' + since: '2021-02-17T08:39:15.165682713Z' + status: active + unknown_fields: { } + version: '' + subordinate_to: [ ] + units: + app-vnf-943ab4274bb6/0: + address: null + agent_status: + data: { } + err: null + info: '' + kind: '' + life: '' + since: '2021-02-17T08:46:06.473054303Z' + status: idle + unknown_fields: { } + version: 2.8.1 + charm: '' + leader: true + machine: '1' + opened_ports: null + provider_id: null + public_address: 10.151.40.117 + subordinates: { } + unknown_fields: { } + workload_status: + data: { } + err: null + info: Ready! + kind: '' + life: '' + since: '2021-02-17T08:39:15.165682713Z' + status: active + unknown_fields: { } + version: '' + workload_version: '' + unknown_fields: + charm-verion: '' + workload_version: '' + actions: + generate-ssh-key: >- + Generate a new SSH keypair for this unit. This will replace any + existing previously generated keypair. + get-ssh-public-key: Get the public SSH key for this unit. + reboot: Reboot the VNF virtual machine. + restart: Stop the service on the VNF. + run: Run an arbitrary command + start: Stop the service on the VNF. + stop: Stop the service on the VNF. + touch: Touch a file on the VNF. + upgrade: Upgrade the software on the VNF. + verify-ssh-credentials: >- + Verify that this unit can authenticate with server specified by + ssh-hostname and ssh-username. + configs: + boolean-option: + default: false + description: A short description of the configuration option + source: default + type: boolean + value: false + int-option: + default: 9001 + description: A short description of the configuration option + source: default + type: int + value: 9001 + ssh-hostname: + default: '' + description: The hostname or IP address of the machine to + source: user + type: string + value: 192.168.61.72 + ssh-key-bits: + default: 4096 + description: The number of bits to use for the SSH key. + source: default + type: int + value: 4096 + ssh-key-type: + default: rsa + description: The type of encryption to use for the SSH key. + source: default + type: string + value: rsa + ssh-password: + default: '' + description: The password used to authenticate. + source: user + type: string + value: osm4u + ssh-private-key: + default: '' + description: DEPRECATED. The private ssh key to be used to authenticate. + source: default + type: string + value: '' + ssh-public-key: + default: '' + description: The public key of this unit. + source: default + type: string + value: '' + ssh-username: + default: '' + description: The username to login as. + source: user + type: string + value: ubuntu + string-option: + default: Default Value + description: A short description of the configuration option + source: default + type: string + value: Default Value + branches: { } + controller_timestamp: '2021-02-17T09:17:38.006569064Z' + machines: + '0': + agent_status: + data: { } + err: null + info: '' + kind: '' + life: '' + since: '2021-02-17T08:37:46.637167056Z' + status: started + unknown_fields: { } + version: 2.8.1 + constraints: '' + containers: { } + display_name: '' + dns_name: 10.151.40.53 + hardware: arch=amd64 cores=0 mem=0M + has_vote: false + id_: '0' + instance_id: juju-0f027b-0 + instance_status: + data: { } + err: null + info: Running + kind: '' + life: '' + since: '2021-02-17T08:35:58.435458338Z' + status: running + unknown_fields: { } + version: '' + ip_addresses: + - 10.151.40.53 + jobs: + - JobHostUnits + lxd_profiles: { } + modification_status: + data: { } + err: null + info: '' + kind: '' + life: '' + since: '2021-02-17T08:35:34.663795891Z' + status: idle + unknown_fields: { } + version: '' + network_interfaces: + eth0: + dns_nameservers: null + gateway: 10.151.40.1 + ip_addresses: + - 10.151.40.53 + is_up: true + mac_address: '00:16:3e:99:bf:c7' + space: null + unknown_fields: { } + primary_controller_machine: null + series: xenial + unknown_fields: { } + + wants_vote: false + '1': + agent_status: + data: { } + err: null + info: '' + kind: '' + life: '' + since: '2021-02-17T08:37:00.893313184Z' + status: started + unknown_fields: { } + version: 2.8.1 + constraints: '' + containers: { } + display_name: '' + dns_name: 10.151.40.117 + hardware: arch=amd64 cores=0 mem=0M + has_vote: false + id_: '1' + instance_id: juju-0f027b-1 + instance_status: + data: { } + err: null + info: Running + kind: '' + life: '' + since: '2021-02-17T08:36:23.354547217Z' + status: running + unknown_fields: { } + version: '' + ip_addresses: + - 10.151.40.117 + jobs: + - JobHostUnits + lxd_profiles: { } + modification_status: + data: { } + err: null + info: '' + kind: '' + life: '' + since: '2021-02-17T08:35:34.768829507Z' + status: idle + unknown_fields: { } + version: '' + network_interfaces: + eth0: + dns_nameservers: null + gateway: 10.151.40.1 + ip_addresses: + - 10.151.40.117 + is_up: true + mac_address: '00:16:3e:99:fe:1c' + space: null + unknown_fields: { } + primary_controller_machine: null + series: xenial + unknown_fields: { } + wants_vote: false + model: + available_version: '' + cloud_tag: cloud-localhost + migration: null + name: 7c707f16-2d9b-49d6-af5e-2ce9985b2adf + region: localhost + unknown_fields: + meter-status: + color: '' + message: '' + model-status: + data: { } + info: '' + kind: '' + + life: '' + since: '2021-02-17T08:35:31.856691457Z' + status: available + version: '' + sla: unsupported + type: iaas + version: 2.8.1 + offers: { } + relations: [ ] + remote_applications: { } + unknown_fields: { } + executedActions: + - id: '6' + action: touch + status: completed + Code: '0' + output: '' + - id: '4' + action: touch + status: completed + Code: '0' + output: '' + - id: '2' + action: verify-ssh-credentials + status: completed + Code: '0' + output: ALF-1-mgmtvm-1 + verified: 'True' detailed-status: 'ERROR executing proxy charm initial primitives for member_vnf_index=1 vdu_id=None: charm error executing primitive verify-ssh-credentials for member_vnf_index=1 vdu_id=None: ''timeout after 600 seconds''' @@ -423,6 +864,7 @@ db_nsrs_text = """ create-time: 1575034636.9990137 datacenter: ea958ba5-4e58-4405-bf42-6e3be15d4c3a description: default description + vcaStatus: {} detailed-status: done id: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d instantiate_params: diff --git a/osm_lcm/tests/test_ns.py b/osm_lcm/tests/test_ns.py index c0dda15..28b8fbe 100644 --- a/osm_lcm/tests/test_ns.py +++ b/osm_lcm/tests/test_ns.py @@ -342,6 +342,25 @@ class TestMyNS(asynctest.TestCase): self.assertEqual(return_value, expected_value) # print("scale_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status"))) + async def test_vca_status_refresh(self): + nsr_id = descriptors.test_ids["TEST-A"]["ns"] + nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] + await self.my_ns.vca_status_refresh(nsr_id, nslcmop_id) + expected_value = dict() + return_value = dict() + vnf_descriptors = self.db.get_list("vnfds") + for i, _ in enumerate(vnf_descriptors): + for j, value in enumerate(vnf_descriptors[i]["df"]): + if "lcm-operations-configuration" in vnf_descriptors[i]["df"][j]: + if "day1-2" in value["lcm-operations-configuration"]["operate-vnf-op-config"]: + for k, v in enumerate(value["lcm-operations-configuration"]["operate-vnf-op-config"]["day1-2"]): + if "juju" in v["execution-environment-list"][k]: + expected_value = self.db.get_list("nsrs")[i]["vcaStatus"] + await self.my_ns._on_update_n2vc_db("nsrs", {"_id": nsr_id}, + "_admin.deployed.VCA.0", {}) + return_value = self.db.get_list("nsrs")[i]["vcaStatus"] + self.assertEqual(return_value, expected_value) + # Test _retry_or_skip_suboperation() # Expected result: # - if a suboperation's 'operationState' is marked as 'COMPLETED', SUBOPERATION_STATUS_SKIP is expected -- GitLab From b1c9f37d645d82c14f1ee85e8b9f8362cc978c94 Mon Sep 17 00:00:00 2001 From: ksaikiranr Date: Mon, 15 Mar 2021 11:07:29 +0530 Subject: [PATCH 18/35] Bug 1422 - NSR record contain stale vcaStatus after successful completion of day 2 fixed. Addressed review comments Change-Id: I2f5585ee48d4641136c6ac146bb10b3cf928b006 Signed-off-by: Priyadharshini G S Signed-off-by: ksaikiranr --- osm_lcm/ns.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index fab078f..5b0b7b0 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -3607,16 +3607,19 @@ class NsLcm(LcmBase): ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], member_vnf_index=vnf_index, vdu_id=vdu_id, vdu_count_index=vdu_count_index, ee_descriptor_id=ee_descriptor_id) - db_nslcmop_notif = {"collection": "nslcmops", - "filter": {"_id": nslcmop_id}, - "path": "admin.VCA"} + for vca_index, vca_deployed in enumerate(db_nsr['_admin']['deployed']['VCA']): + if vca_deployed.get("member-vnf-index") == vnf_index: + db_dict = {"collection": "nsrs", + "filter": {"_id": nsr_id}, + "path": "_admin.deployed.VCA.{}.".format(vca_index)} + break nslcmop_operation_state, detailed_status = await self._ns_execute_primitive( ee_id, primitive=primitive_name, primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params), timeout=timeout_ns_action, vca_type=vca_type, - db_dict=db_nslcmop_notif) + db_dict=db_dict) db_nslcmop_update["detailed-status"] = detailed_status error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else "" -- GitLab From acf83f8f25e8c900e32826be970804d9c82b00b3 Mon Sep 17 00:00:00 2001 From: beierlm Date: Wed, 17 Mar 2021 08:11:23 -0400 Subject: [PATCH 19/35] Feature 10296 Pip Standardization Creates standard template for tox.ini Introduces 'safety' for listing upstream CVEs Pins all versions of all upstream modules Removes Makefile step and places all build logic into tox.ini Change-Id: I15c8ec22ad911d5bfa67df5a85175642a59da971 Signed-off-by: beierlm --- .gitignore | 34 +++++ Dockerfile | 42 +++--- MANIFEST.in | 2 +- devops-stages/stage-build.sh | 5 +- devops-stages/stage-test.sh | 5 +- Makefile => requirements-dev.in | 16 +- requirements-dev.txt | 140 ++++++++++++++++++ requirements-dist.in | 17 +++ requirements-dist.txt | 23 +++ test-requirements.txt => requirements-test.in | 5 +- requirements-test.txt | 28 ++++ ...ython3-osm-lcm.postinst => requirements.in | 20 +-- requirements.txt | 80 +++++++--- setup.py | 21 +-- stdeb.cfg | 1 - tox.ini | 133 +++++++++++++---- 16 files changed, 449 insertions(+), 123 deletions(-) create mode 100644 .gitignore rename Makefile => requirements-dev.in (52%) create mode 100644 requirements-dev.txt create mode 100644 requirements-dist.in create mode 100644 requirements-dist.txt rename test-requirements.txt => requirements-test.in (96%) create mode 100644 requirements-test.txt rename debian/python3-osm-lcm.postinst => requirements.in (60%) mode change 100755 => 100644 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e6dc4cb --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +# Copyright ETSI Contributors and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__pycache__ +*.pyc +.tox/ +dist/ +.cache/ +.local/ +.eggs +osm_lcm.egg-info/ +.coverage +cover +coverage.xml +.tox +nosetests.xml +.cache +.vscode/ +.project +.settings +.pydevproject +pyvenv.cfg +venv/ diff --git a/Dockerfile b/Dockerfile index 79c1a50..3fad212 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,3 @@ -# Copyright 2018 Telefonica S.A. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,27 +10,27 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. - -# This Dockerfile is intented for devops and deb package generation +######################################################################################## +# This Dockerfile is intented for devops testing and deb package generation +# +# To run stage 2 locally: +# +# docker build -t stage2 . +# docker run -ti -v `pwd`:/work -w /work --entrypoint /bin/bash stage2 +# devops-stages/stage-test.sh +# devops-stages/stage-build.sh # -# Use Dockerfile.local for running osm/LCM in a docker container from source - FROM ubuntu:18.04 -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get --yes install git tox make debhelper wget \ - python-all python3 python3-pip python3-all apt-utils && \ - DEBIAN_FRONTEND=noninteractive pip3 install -U setuptools setuptools-version-command stdeb - -# TODO delete if not needed: -# libcurl4-gnutls-dev libgnutls-dev python-dev python3-dev python-setuptools - - -# Uncomment this block to generate automatically a debian package and show info -# # Set the working directory to /app -# WORKDIR /app -# # Copy the current directory contents into the container at /app -# ADD . /app -# CMD /app/devops-stages/stage-build.sh && find -name "*.deb" -exec dpkg -I {} ";" - +RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get -y install \ + debhelper \ + git \ + python3 \ + python3-all \ + python3-dev \ + python3-setuptools + +RUN python3 -m easy_install pip==21.0.1 +RUN pip3 install tox==3.22.0 diff --git a/MANIFEST.in b/MANIFEST.in index 84ee0e9..46ee438 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -14,6 +14,6 @@ # limitations under the License. include README.rst -recursive-include osm_lcm *.py *.xml *.sh lcm.cfg +recursive-include osm_lcm *.py *.xml *.sh lcm.cfg *.txt recursive-include devops-stages * diff --git a/devops-stages/stage-build.sh b/devops-stages/stage-build.sh index 2db54db..47c2efc 100755 --- a/devops-stages/stage-build.sh +++ b/devops-stages/stage-build.sh @@ -14,7 +14,6 @@ # under the License. ## -# For the moment it is not needed any post-intall action, so tox instead of Makefile is used - -make clean package +rm -rf dist deb_dist osm*.tar.gz *.egg-info .eggs +tox -e dist \ No newline at end of file diff --git a/devops-stages/stage-test.sh b/devops-stages/stage-test.sh index 36e445d..141fa55 100755 --- a/devops-stages/stage-test.sh +++ b/devops-stages/stage-test.sh @@ -13,5 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. ## -rm -f nosetests.xml -tox # flake8 unittest + +OUTPUT=$(TOX_PARALLEL_NO_SPINNER=1 tox --parallel=auto) +printf "$OUTPUT" diff --git a/Makefile b/requirements-dev.in similarity index 52% rename from Makefile rename to requirements-dev.in index cdbb183..823c52c 100644 --- a/Makefile +++ b/requirements-dev.in @@ -1,4 +1,4 @@ -# Copyright 2018 Telefonica S.A. +# Copyright ETSI Contributors and Others. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,15 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -all: clean package - -clean: - rm -rf dist deb_dist osm_lcm-*.tar.gz osm_lcm.egg-info .eggs - -package: - python3 setup.py --command-packages=stdeb.command sdist_dsc - cp debian/python3-osm-lcm.postinst deb_dist/osm-lcm*/debian - # cd deb_dist/osm-lcm*/debian && echo "osm-common python3-osm-common" > py3dist-overrides - # cd deb_dist/osm-lcm*/debian && echo "pip3 python3-pip" >> py3dist-overrides - cd deb_dist/osm-lcm*/ && dpkg-buildpackage -rfakeroot -uc -us - +-e git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common +-e git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..459afe0 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,140 @@ +-e git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc + # via -r requirements-dev.in +-e git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common + # via -r requirements-dev.in +aiokafka==0.7.0 + # via osm-common +bcrypt==3.2.0 + # via paramiko +cachetools==4.2.1 + # via google-auth +certifi==2020.12.5 + # via + # kubernetes + # requests +cffi==1.14.5 + # via + # bcrypt + # cryptography + # pynacl +chardet==4.0.0 + # via requests +cryptography==3.4.7 + # via paramiko +dataclasses==0.6 + # via osm-common +google-auth==1.28.0 + # via kubernetes +idna==2.10 + # via requests +juju==2.8.4 + # via n2vc +jujubundlelib==0.5.6 + # via theblues +kafka-python==2.0.2 + # via aiokafka +kubernetes==10.0.1 + # via n2vc +macaroonbakery==1.3.1 + # via + # juju + # theblues +mypy-extensions==0.4.3 + # via typing-inspect +oauthlib==3.1.0 + # via requests-oauthlib +paramiko==2.7.2 + # via juju +protobuf==3.15.6 + # via macaroonbakery +pyasn1-modules==0.2.8 + # via google-auth +pyasn1==0.4.8 + # via + # juju + # n2vc + # pyasn1-modules + # rsa +pycparser==2.20 + # via cffi +pycrypto==2.6.1 + # via osm-common +pymacaroons==0.13.0 + # via macaroonbakery +pymongo==3.11.3 + # via osm-common +pynacl==1.4.0 + # via + # macaroonbakery + # paramiko + # pymacaroons +pyrfc3339==1.1 + # via + # juju + # macaroonbakery +python-dateutil==2.8.1 + # via kubernetes +pytz==2021.1 + # via pyrfc3339 +pyyaml==5.4.1 + # via + # juju + # jujubundlelib + # kubernetes + # osm-common +requests-oauthlib==1.3.0 + # via kubernetes +requests==2.25.1 + # via + # kubernetes + # macaroonbakery + # requests-oauthlib + # theblues +rsa==4.7.2 + # via google-auth +six==1.15.0 + # via + # bcrypt + # google-auth + # kubernetes + # macaroonbakery + # protobuf + # pymacaroons + # pynacl + # python-dateutil + # websocket-client +theblues==0.5.2 + # via juju +toposort==1.6 + # via juju +typing-extensions==3.7.4.3 + # via typing-inspect +typing-inspect==0.6.0 + # via juju +urllib3==1.26.4 + # via + # kubernetes + # requests +websocket-client==0.58.0 + # via kubernetes +websockets==7.0 + # via juju + +# The following packages are considered to be unsafe in a requirements file: +# setuptools +####################################################################################### +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +####################################################################################### diff --git a/requirements-dist.in b/requirements-dist.in new file mode 100644 index 0000000..11f0a2a --- /dev/null +++ b/requirements-dist.in @@ -0,0 +1,17 @@ +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +stdeb +setuptools-version-command \ No newline at end of file diff --git a/requirements-dist.txt b/requirements-dist.txt new file mode 100644 index 0000000..7393626 --- /dev/null +++ b/requirements-dist.txt @@ -0,0 +1,23 @@ +setuptools-version-command==2.2 + # via -r requirements-dist.in +stdeb==0.10.0 + # via -r requirements-dist.in + +# The following packages are considered to be unsafe in a requirements file: +# setuptools +####################################################################################### +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +####################################################################################### diff --git a/test-requirements.txt b/requirements-test.in similarity index 96% rename from test-requirements.txt rename to requirements-test.in index c2aa856..15fb5ee 100644 --- a/test-requirements.txt +++ b/requirements-test.in @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -flake8<3.0 asynctest - +coverage +mock +nose2 \ No newline at end of file diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..3b87659 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,28 @@ +asynctest==0.13.0 + # via -r requirements-test.in +coverage==5.5 + # via + # -r requirements-test.in + # nose2 +mock==4.0.3 + # via -r requirements-test.in +nose2==0.10.0 + # via -r requirements-test.in +six==1.15.0 + # via nose2 +####################################################################################### +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +####################################################################################### diff --git a/debian/python3-osm-lcm.postinst b/requirements.in old mode 100755 new mode 100644 similarity index 60% rename from debian/python3-osm-lcm.postinst rename to requirements.in index 8a7082c..6307ee1 --- a/debian/python3-osm-lcm.postinst +++ b/requirements.in @@ -1,5 +1,3 @@ -#!/bin/bash - ## # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -12,17 +10,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -# For those usages not covered by the Apache License, Version 2.0 please -# contact with: OSM_TECH@list.etsi.org ## -echo "POST INSTALL OSM-LCM" -echo "Installing python dependencies grpcio-tools grpclib via pip..." -echo -python3 -m pip install -U pip -python3 -m pip install grpcio-tools grpclib - -# Creation of log folder -# mkdir -p /var/log/osm - +aiohttp +grpcio-tools +grpclib +idna==2.10 +jinja2 +pyyaml diff --git a/requirements.txt b/requirements.txt index 32a09a9..9212e2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1,64 @@ -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +aiohttp==3.7.4.post0 + # via -r requirements.in +async-timeout==3.0.1 + # via aiohttp +attrs==20.3.0 + # via aiohttp +chardet==4.0.0 + # via aiohttp +grpcio-tools==1.36.1 + # via -r requirements.in +grpcio==1.36.1 + # via grpcio-tools +grpclib==0.4.1 + # via -r requirements.in +h2==4.0.0 + # via grpclib +hpack==4.0.0 + # via h2 +hyperframe==6.0.0 + # via h2 +idna==2.10 + # via + # -r requirements.in + # yarl +jinja2==2.11.3 + # via -r requirements.in +markupsafe==1.1.1 + # via jinja2 +multidict==5.1.0 + # via + # aiohttp + # grpclib + # yarl +protobuf==3.15.6 + # via grpcio-tools +pyyaml==5.4.1 + # via -r requirements.in +six==1.15.0 + # via + # grpcio + # protobuf +typing-extensions==3.7.4.3 + # via aiohttp +yarl==1.6.3 + # via aiohttp + +# The following packages are considered to be unsafe in a requirements file: +# setuptools +####################################################################################### +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -## - -pyyaml -aiohttp>=2.3.10 -jinja2 -grpcio-tools -grpclib -git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common -git+https://osm.etsi.org/gerrit/osm/N2VC.git#egg=n2vc - +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +####################################################################################### diff --git a/setup.py b/setup.py index a2d8a1b..c1ae182 100644 --- a/setup.py +++ b/setup.py @@ -34,28 +34,13 @@ setup( # version=VERSION, # python_requires='>3.5.0', author='ETSI OSM', - author_email='alfonso.tiernosepulveda@telefonica.com', - maintainer='Alfonso Tierno', - maintainer_email='alfonso.tiernosepulveda@telefonica.com', + author_email='osmsupport@etsi.org', + maintainer='ETSI OSM', + maintainer_email='osmsupport@etsi.org', url='https://osm.etsi.org/gitweb/?p=osm/LCM.git;a=summary', license='Apache 2.0', packages=[_name], include_package_data=True, - # data_files=[('/etc/osm/', ['osm_lcm/lcm.cfg']), - # ('/etc/systemd/system/', ['osm_lcm/osm-lcm.service']), - # ], - install_requires=[ - # 'pymongo', - 'PyYAML', - 'aiohttp>=2.3.10', - 'osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common', - 'n2vc @ git+https://osm.etsi.org/gerrit/osm/N2VC.git#egg=n2vc', - 'jinja2', - 'grpcio-tools', - 'grpclib', - # TODO this is version installed by 'apt python3-aiohttp' on Ubuntu Sserver 14.04 - # version installed by pip 3.3.2 is not compatible. Code should be migrated to this version and use pip3 - ], setup_requires=['setuptools-version-command'], ) diff --git a/stdeb.cfg b/stdeb.cfg index 2794a1f..ba313de 100644 --- a/stdeb.cfg +++ b/stdeb.cfg @@ -17,4 +17,3 @@ ## [DEFAULT] X-Python3-Version : >= 3.6 -Depends3: python3-osm-common, python3-n2vc, python3-yaml, python3-aiohttp, python3-jinja2, python3-pip diff --git a/tox.ini b/tox.ini index 2b8b682..630fbca 100644 --- a/tox.ini +++ b/tox.ini @@ -1,4 +1,5 @@ -# Copyright 2018 Telefonica S.A. +####################################################################################### +# Copyright ETSI Contributors and Others. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,42 +13,118 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +####################################################################################### [tox] -envlist = cover, flake8 +envlist = black, cover, flake8, pylint, safety + +[tox:jenkins] +toxworkdir = /tmp/.tox [testenv] usedevelop = True basepython = python3 -install_command = python3 -m pip install -r requirements.txt -U {opts} {packages} +setenv = VIRTUAL_ENV={envdir} + PYTHONDONTWRITEBYTECODE = 1 +deps = -r{toxinidir}/requirements.txt + +####################################################################################### +[testenv:black] +deps = black +skip_install = true +commands = + - black --check --diff osm_lcm/ + +####################################################################################### [testenv:cover] -basepython = python3 -deps = - nose2 - nose2-cov - coverage - -rrequirements.txt - -rtest-requirements.txt +deps = {[testenv]deps} + -r{toxinidir}/requirements-dev.txt + -r{toxinidir}/requirements-test.txt commands = - coverage erase - nose2 -C --coverage osm_lcm --plugin nose2.plugins.junitxml -s osm_lcm/tests - coverage report --omit='*tests*' - coverage html -d ./cover --omit='*tests*' - coverage xml -o coverage.xml --omit='*tests*' + sh -c 'rm -f nosetests.xml' + coverage erase + nose2 -C --coverage osm_lcm + coverage report --omit='*tests*' + coverage html -d ./cover --omit='*tests*' + coverage xml -o coverage.xml --omit=*tests* +whitelist_externals = sh + +####################################################################################### [testenv:flake8] -basepython = python3 -deps = - flake8 - -rrequirements.txt -commands = flake8 osm_lcm --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,frontend_grpc.py,frontend_pb2.py \ - --ignore W291,W293,E226,W504 - -[testenv:build] -basepython = python3 -deps = stdeb - setuptools-version-command -commands = python3 setup.py --command-packages=stdeb.command bdist_deb +deps = flake8 +commands = + flake8 osm_lcm/ setup.py + + +####################################################################################### +[testenv:pylint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-dev.txt + -r{toxinidir}/requirements-test.txt + pylint +commands = + - pylint -E osm_lcm + + +####################################################################################### +[testenv:safety] +setenv = + LC_ALL=C.UTF-8 + LANG=C.UTF-8 +deps = {[testenv]deps} + safety +commands = + - safety check --full-report + + +####################################################################################### +[testenv:pip-compile] +deps = pip-tools==5.5.0 +commands = + - sh -c 'for file in requirements*.in ; do pip-compile -rU --no-header $file ;\ + out=`echo $file | sed "s/.in/.txt/"` ; \ + head -16 tox.ini >> $out ;\ + done' +whitelist_externals = sh + + +####################################################################################### +[testenv:dist] +deps = {[testenv]deps} + -r{toxinidir}/requirements-dist.txt + +# In the commands, we copy the requirements.txt to be presented as a source file (.py) +# so it gets included in the .deb package for others to consume +commands = + sh -c 'cp requirements.txt osm_lcm/requirements.txt' + python3 setup.py --command-packages=stdeb.command sdist_dsc + sh -c 'cd deb_dist/osm-lcm*/ && dpkg-buildpackage -rfakeroot -uc -us' + sh -c 'rm osm_lcm/requirements.txt' +whitelist_externals = sh +####################################################################################### +[flake8] +ignore = + W291, + W293, + W503, + E123, + E125, + E226, + E241, +# Temporary until code can be reformatted + E121, + E122, + E203, + E303, + E501 + W504 +exclude = + .git, + __pycache__, + .tox, +max-line-length = 120 +show-source = True +builtins = _ -- GitLab From dbcdf5ce78a34db9ae0aa7a24121d14e9cc4aa22 Mon Sep 17 00:00:00 2001 From: beierlm Date: Mon, 19 Apr 2021 15:31:57 -0400 Subject: [PATCH 20/35] Adding GitLab Scanner Adds gitlab-ci yaml file to enable security scans in GitLab mirror Change-Id: Ie660d19b4e9a834abe38589e5c32447531345d63 Signed-off-by: beierlm --- .gitlab-ci.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..eb9750e --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,17 @@ +# Copyright Contributors to ETSI OSM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +include: + - project: 'devops/cicd' + file: '/templates/osm-base.yml' -- GitLab From 1900848f8ab8e046f5184da5a2763e22ab7ba648 Mon Sep 17 00:00:00 2001 From: lloretgalleg Date: Mon, 19 Apr 2021 11:40:18 +0000 Subject: [PATCH 21/35] Fixed bug 1476: VLD params (ip-profile) inside a NS not used Change-Id: I3f330610659a1b627dd74bfd9e1dd4d722ebf7d2 Signed-off-by: lloretgalleg --- osm_lcm/ns.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 5b0b7b0..1bcf4c7 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -698,12 +698,25 @@ class NsLcm(LcmBase): # check at nsd descriptor, if there is an ip-profile vld_params = {} - virtual_link_profiles = get_virtual_link_profiles(nsd) + nsd_vlp = find_in_list( + get_virtual_link_profiles(nsd), + lambda a_link_profile: a_link_profile["virtual-link-desc-id"] == vld["id"]) + if nsd_vlp and nsd_vlp.get("virtual-link-protocol-data") and \ + nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"): + ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"]["l3-protocol-data"] + ip_profile_dest_data = {} + if "ip-version" in ip_profile_source_data: + ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"] + if "cidr" in ip_profile_source_data: + ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"] + if "gateway-ip" in ip_profile_source_data: + ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"] + if "dhcp-enabled" in ip_profile_source_data: + ip_profile_dest_data["dhcp-params"] = { + "enabled": ip_profile_source_data["dhcp-enabled"] + } + vld_params["ip-profile"] = ip_profile_dest_data - for vlp in virtual_link_profiles: - ip_profile = find_in_list(nsd["ip-profiles"], - lambda profile: profile["name"] == vlp["ip-profile-ref"]) - vld_params["ip-profile"] = ip_profile["ip-profile-params"] # update vld_params with instantiation params vld_instantiation_params = find_in_list(get_iterable(ns_params, "vld"), lambda a_vld: a_vld["name"] in (vld["name"], vld["id"])) -- GitLab From d2753c3f33a69ebc708d46f7e1561d891e362afc Mon Sep 17 00:00:00 2001 From: beierlm Date: Thu, 22 Apr 2021 12:58:58 -0400 Subject: [PATCH 22/35] Including upstream requirements Now that internal OSM software does not specify python modules in setup.py, we need to include the requirements.txt from the module in the requirements-dev.in to generate the full list properly. Change-Id: Ieebd667829444217d1d38e05261d6f50b9f0d180 Signed-off-by: beierlm --- requirements-dev.in | 7 ++- requirements-dev.txt | 133 +++++++++++++++++++++++++++++++------------ 2 files changed, 101 insertions(+), 39 deletions(-) diff --git a/requirements-dev.in b/requirements-dev.in index 823c52c..ec714a2 100644 --- a/requirements-dev.in +++ b/requirements-dev.in @@ -13,5 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. --e git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common --e git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc +git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common +-r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master + +git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc +-r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master diff --git a/requirements-dev.txt b/requirements-dev.txt index 459afe0..e9e1d38 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,99 +1,145 @@ --e git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc - # via -r requirements-dev.in --e git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common - # via -r requirements-dev.in aiokafka==0.7.0 - # via osm-common + # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master bcrypt==3.2.0 - # via paramiko + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # paramiko cachetools==4.2.1 - # via google-auth + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # google-auth certifi==2020.12.5 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # kubernetes # requests cffi==1.14.5 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # bcrypt # cryptography # pynacl chardet==4.0.0 - # via requests + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # requests cryptography==3.4.7 - # via paramiko + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # paramiko dataclasses==0.6 - # via osm-common + # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master google-auth==1.28.0 - # via kubernetes + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # kubernetes idna==2.10 - # via requests + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # requests juju==2.8.4 - # via n2vc + # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master jujubundlelib==0.5.6 - # via theblues + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # theblues kafka-python==2.0.2 - # via aiokafka + # via + # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master + # aiokafka kubernetes==10.0.1 - # via n2vc + # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master macaroonbakery==1.3.1 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # juju # theblues mypy-extensions==0.4.3 - # via typing-inspect + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # typing-inspect +git+https://osm.etsi.org/gerrit/osm/N2VC.git@master#egg=n2vc + # via -r requirements-dev.in oauthlib==3.1.0 - # via requests-oauthlib + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # requests-oauthlib +git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common + # via -r requirements-dev.in paramiko==2.7.2 - # via juju + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # juju protobuf==3.15.6 - # via macaroonbakery + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # macaroonbakery pyasn1-modules==0.2.8 - # via google-auth + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # google-auth pyasn1==0.4.8 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # juju - # n2vc # pyasn1-modules # rsa pycparser==2.20 - # via cffi + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # cffi pycrypto==2.6.1 - # via osm-common + # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master pymacaroons==0.13.0 - # via macaroonbakery + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # macaroonbakery pymongo==3.11.3 - # via osm-common + # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master pynacl==1.4.0 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # macaroonbakery # paramiko # pymacaroons pyrfc3339==1.1 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # juju # macaroonbakery python-dateutil==2.8.1 - # via kubernetes + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # kubernetes pytz==2021.1 - # via pyrfc3339 + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # pyrfc3339 pyyaml==5.4.1 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # juju # jujubundlelib # kubernetes - # osm-common requests-oauthlib==1.3.0 - # via kubernetes + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # kubernetes requests==2.25.1 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # kubernetes # macaroonbakery # requests-oauthlib # theblues rsa==4.7.2 - # via google-auth + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # google-auth six==1.15.0 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # bcrypt # google-auth # kubernetes @@ -104,21 +150,34 @@ six==1.15.0 # python-dateutil # websocket-client theblues==0.5.2 - # via juju + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # juju toposort==1.6 - # via juju + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # juju typing-extensions==3.7.4.3 - # via typing-inspect + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # typing-inspect typing-inspect==0.6.0 - # via juju + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # juju urllib3==1.26.4 # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # kubernetes # requests websocket-client==0.58.0 - # via kubernetes + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # kubernetes websockets==7.0 - # via juju + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # juju # The following packages are considered to be unsafe in a requirements file: # setuptools -- GitLab From c1fe90adf8ed0d671342c617fed7184629c7003e Mon Sep 17 00:00:00 2001 From: David Garcia Date: Wed, 31 Mar 2021 19:12:02 +0200 Subject: [PATCH 23/35] Feature 10239: Distributed VCA - Handle VCATopic: https://osm.etsi.org/gerrit/#/c/osm/NBI/+/10574/ - Pass vca_id to calls in N2VC, so N2VC can know to which VCA it needs to talk Depends on the following patch: https://osm.etsi.org/gerrit/#/c/osm/N2VC/+/10616/ Change-Id: I080c1aab94f70de83f2d33def74ccd03450dbdd6 Signed-off-by: David Garcia --- osm_lcm/lcm.py | 16 ++- osm_lcm/lcm_helm_conn.py | 6 +- osm_lcm/lcm_utils.py | 4 +- osm_lcm/ns.py | 243 ++++++++++++++++++++++++---------- osm_lcm/tests/test_vim_sdn.py | 160 ++++++++++++++++++++++ osm_lcm/vim_sdn.py | 150 ++++++++++++++++++++- requirements-dev.txt | 13 +- 7 files changed, 512 insertions(+), 80 deletions(-) create mode 100644 osm_lcm/tests/test_vim_sdn.py diff --git a/osm_lcm/lcm.py b/osm_lcm/lcm.py index 659a57c..ebfca7e 100644 --- a/osm_lcm/lcm.py +++ b/osm_lcm/lcm.py @@ -104,7 +104,7 @@ class Lcm: self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index+1] self.loop = loop or asyncio.get_event_loop() - self.ns = self.netslice = self.vim = self.wim = self.sdn = self.k8scluster = self.k8srepo = None + self.ns = self.netslice = self.vim = self.wim = self.sdn = self.k8scluster = self.vca = self.k8srepo = None # logging log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s" @@ -309,6 +309,17 @@ class Lcm: task = asyncio.ensure_future(self.k8scluster.delete(params, order_id)) self.lcm_tasks.register("k8scluster", k8scluster_id, order_id, "k8scluster_delete", task) return + elif topic == "vca": + if command == "create" or command == "created": + vca_id = params.get("_id") + task = asyncio.ensure_future(self.vca.create(params, order_id)) + self.lcm_tasks.register("vca", vca_id, order_id, "vca_create", task) + return + elif command == "delete" or command == "deleted": + vca_id = params.get("_id") + task = asyncio.ensure_future(self.vca.delete(params, order_id)) + self.lcm_tasks.register("vca", vca_id, order_id, "vca_delete", task) + return elif topic == "k8srepo": if command == "create" or command == "created": k8srepo_id = params.get("_id") @@ -486,7 +497,7 @@ class Lcm: self.first_start = True while self.consecutive_errors < 10: try: - topics = ("ns", "vim_account", "wim_account", "sdn", "nsi", "k8scluster", "k8srepo", "pla") + topics = ("ns", "vim_account", "wim_account", "sdn", "nsi", "k8scluster", "vca", "k8srepo", "pla") topics_admin = ("admin", ) await asyncio.gather( self.msg.aioread(topics, self.loop, self.kafka_read_callback, from_beginning=True), @@ -522,6 +533,7 @@ class Lcm: self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.config, self.loop) self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.config, self.loop) self.k8scluster = vim_sdn.K8sClusterLcm(self.msg, self.lcm_tasks, self.config, self.loop) + self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop) self.k8srepo = vim_sdn.K8sRepoLcm(self.msg, self.lcm_tasks, self.config, self.loop) # configure tsdb prometheus diff --git a/osm_lcm/lcm_helm_conn.py b/osm_lcm/lcm_helm_conn.py index 34e4915..a13824d 100644 --- a/osm_lcm/lcm_helm_conn.py +++ b/osm_lcm/lcm_helm_conn.py @@ -88,8 +88,6 @@ class LCMHelmConn(N2VCConnector, LcmBase): def __init__(self, log: object = None, loop: object = None, - url: str = None, - username: str = None, vca_config: dict = None, on_update_db=None, ): """ @@ -104,14 +102,12 @@ class LCMHelmConn(N2VCConnector, LcmBase): self, log=log, loop=loop, - url=url, - username=username, - vca_config=vca_config, on_update_db=on_update_db, db=self.db, fs=self.fs ) + self.vca_config = vca_config self.log.debug("Initialize helm N2VC connector") self.log.debug("initial vca_config: {}".format(vca_config)) diff --git a/osm_lcm/lcm_utils.py b/osm_lcm/lcm_utils.py index a05e5ac..a1569c1 100644 --- a/osm_lcm/lcm_utils.py +++ b/osm_lcm/lcm_utils.py @@ -146,7 +146,7 @@ class TaskRegistry(LcmBase): # NS/NSI: "services" VIM/WIM/SDN: "accounts" topic_service_list = ['ns', 'nsi'] - topic_account_list = ['vim', 'wim', 'sdn', 'k8scluster', 'k8srepo'] + topic_account_list = ['vim', 'wim', 'sdn', 'k8scluster', 'vca', 'k8srepo'] # Map topic to InstanceID topic2instid_dict = { @@ -161,6 +161,7 @@ class TaskRegistry(LcmBase): 'wim': 'wim_accounts', 'sdn': 'sdns', 'k8scluster': 'k8sclusters', + 'vca': 'vca', 'k8srepo': 'k8srepos'} def __init__(self, worker_id=None, logger=None): @@ -171,6 +172,7 @@ class TaskRegistry(LcmBase): "wim_account": {}, "sdn": {}, "k8scluster": {}, + "vca": {}, "k8srepo": {}, } self.worker_id = worker_id diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 1bcf4c7..0f5c6ab 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -97,9 +97,6 @@ class NsLcm(LcmBase): self.n2vc = N2VCJujuConnector( log=self.logger, loop=self.loop, - url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']), - username=self.vca_config.get('user', None), - vca_config=self.vca_config, on_update_db=self._on_update_n2vc_db, fs=self.fs, db=self.db @@ -108,8 +105,6 @@ class NsLcm(LcmBase): self.conn_helm_ee = LCMHelmConn( log=self.logger, loop=self.loop, - url=None, - username=None, vca_config=self.vca_config, on_update_db=self._on_update_n2vc_db ) @@ -138,7 +133,6 @@ class NsLcm(LcmBase): log=self.logger, loop=self.loop, on_update_db=self._on_update_k8s_db, - vca_config=self.vca_config, fs=self.fs, db=self.db ) @@ -200,7 +194,7 @@ class NsLcm(LcmBase): except Exception as e: self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e)) - async def _on_update_n2vc_db(self, table, filter, path, updated_data): + async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None): # remove last dot from path (if exists) if path.endswith('.'): @@ -217,12 +211,12 @@ class NsLcm(LcmBase): current_ns_status = nsr.get('nsState') # get vca status for NS - status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False) + status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False, vca_id=vca_id) # vcaStatus db_dict = dict() db_dict['vcaStatus'] = status_dict - await self.n2vc.update_vca_status(db_dict['vcaStatus']) + await self.n2vc.update_vca_status(db_dict['vcaStatus'], vca_id=vca_id) # update configurationStatus for this VCA try: @@ -289,7 +283,7 @@ class NsLcm(LcmBase): except Exception as e: self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e)) - async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None): + async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None, vca_id=None): """ Updating vca status in NSR record :param cluster_uuid: UUID of a k8s cluster @@ -305,15 +299,22 @@ class NsLcm(LcmBase): nsr_id = filter.get('_id') # get vca status for NS - vca_status = await self.k8sclusterjuju.status_kdu(cluster_uuid, - kdu_instance, - complete_status=True, - yaml_format=False) + vca_status = await self.k8sclusterjuju.status_kdu( + cluster_uuid, + kdu_instance, + complete_status=True, + yaml_format=False, + vca_id=vca_id, + ) # vcaStatus db_dict = dict() db_dict['vcaStatus'] = {nsr_id: vca_status} - await self.k8sclusterjuju.update_vca_status(db_dict['vcaStatus'], kdu_instance) + await self.k8sclusterjuju.update_vca_status( + db_dict['vcaStatus'], + kdu_instance, + vca_id=vca_id, + ) # write to database self.update_db_2("nsrs", nsr_id, db_dict) @@ -1179,6 +1180,12 @@ class NsLcm(LcmBase): raise LcmException("Configuration aborted because dependent charm/s timeout") + def get_vca_id(self, db_vnfr: dict, db_nsr: dict): + return ( + deep_get(db_vnfr, ("vca-id",)) or + deep_get(db_nsr, ("instantiate_params", "vcaId")) + ) + async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index, config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name, ee_config_descriptor): @@ -1248,12 +1255,7 @@ class NsLcm(LcmBase): # find old ee_id if exists ee_id = vca_deployed.get("ee_id") - vim_account_id = ( - deep_get(db_vnfr, ("vim-account-id",)) or - deep_get(deploy_params, ("OSM", "vim_account_id")) - ) - vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id) - vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id) + vca_id = self.get_vca_id(db_vnfr, db_nsr) # create or register execution environment in VCA if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"): @@ -1276,8 +1278,7 @@ class NsLcm(LcmBase): namespace=namespace, artifact_path=artifact_path, db_dict=db_dict, - cloud_name=vca_k8s_cloud, - credential_name=vca_k8s_cloud_credential, + vca_id=vca_id, ) elif vca_type == "helm" or vca_type == "helm-v3": ee_id, credentials = await self.vca_map[vca_type].create_execution_environment( @@ -1293,8 +1294,7 @@ class NsLcm(LcmBase): namespace=namespace, reuse_ee_id=ee_id, db_dict=db_dict, - cloud_name=vca_cloud, - credential_name=vca_cloud_credential, + vca_id=vca_id, ) elif vca_type == "native_charm": @@ -1333,8 +1333,7 @@ class NsLcm(LcmBase): credentials=credentials, namespace=namespace, db_dict=db_dict, - cloud_name=vca_cloud, - credential_name=vca_cloud_credential, + vca_id=vca_id, ) # for compatibility with MON/POL modules, the need model and application name at database @@ -1388,6 +1387,7 @@ class NsLcm(LcmBase): db_dict=db_dict, config=config, num_units=num_units, + vca_id=vca_id, ) # write in db flag of configuration_sw already installed @@ -1395,7 +1395,7 @@ class NsLcm(LcmBase): # add relations for this VCA (wait for other peers related with this VCA) await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id, - vca_index=vca_index, vca_type=vca_type) + vca_index=vca_index, vca_id=vca_id, vca_type=vca_type) # if SSH access is required, then get execution environment SSH public # if native charm we have waited already to VM be UP @@ -1408,7 +1408,11 @@ class NsLcm(LcmBase): # Needed to inject a ssh key user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) step = "Install configuration Software, getting public ssh key" - pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict) + pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key( + ee_id=ee_id, + db_dict=db_dict, + vca_id=vca_id + ) step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key) else: @@ -1476,7 +1480,8 @@ class NsLcm(LcmBase): ee_id=ee_id, primitive_name=initial_config_primitive["name"], params_dict=primitive_params_, - db_dict=db_dict + db_dict=db_dict, + vca_id=vca_id, ) # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives if check_if_terminated_needed: @@ -2063,8 +2068,15 @@ class NsLcm(LcmBase): self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate") - async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int, - timeout: int = 3600, vca_type: str = None) -> bool: + async def _add_vca_relations( + self, + logging_text, + nsr_id, + vca_index: int, + timeout: int = 3600, + vca_type: str = None, + vca_id: str = None, + ) -> bool: # steps: # 1. find all relations for this VCA @@ -2147,7 +2159,9 @@ class NsLcm(LcmBase): ee_id_1=from_vca_ee_id, ee_id_2=to_vca_ee_id, endpoint_1=from_vca_endpoint, - endpoint_2=to_vca_endpoint) + endpoint_2=to_vca_endpoint, + vca_id=vca_id, + ) # remove entry from relations list ns_relations.remove(r) else: @@ -2193,7 +2207,9 @@ class NsLcm(LcmBase): ee_id_1=from_vca_ee_id, ee_id_2=to_vca_ee_id, endpoint_1=from_vca_endpoint, - endpoint_2=to_vca_endpoint) + endpoint_2=to_vca_endpoint, + vca_id=vca_id, + ) # remove entry from relations list vnf_relations.remove(r) else: @@ -2230,7 +2246,8 @@ class NsLcm(LcmBase): return False async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict, - vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600): + vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600, + vca_id: str = None): try: k8sclustertype = k8s_instance_info["k8scluster-type"] @@ -2255,6 +2272,7 @@ class NsLcm(LcmBase): kdu_name=k8s_instance_info["kdu-name"], namespace=k8s_instance_info["namespace"], kdu_instance=kdu_instance, + vca_id=vca_id, ) self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) @@ -2306,8 +2324,11 @@ class NsLcm(LcmBase): cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_instance=kdu_instance, primitive_name=initial_config_primitive["name"], - params=primitive_params_, db_dict=db_dict_install), - timeout=timeout) + params=primitive_params_, db_dict=db_dict_install, + vca_id=vca_id, + ), + timeout=timeout + ) except Exception as e: # Prepare update db with error and raise exception @@ -2378,6 +2399,7 @@ class NsLcm(LcmBase): updated_v3_cluster_list = [] for vnfr_data in db_vnfrs.values(): + vca_id = self.get_vca_id(vnfr_data, {}) for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")): # Step 0: Prepare and set parameters desc_params = parse_yaml_strings(kdur.get("additionalParams")) @@ -2455,7 +2477,7 @@ class NsLcm(LcmBase): vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id) task = asyncio.ensure_future( self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id, - k8s_instance_info, k8params=desc_params, timeout=600)) + k8s_instance_info, k8params=desc_params, timeout=600, vca_id=vca_id)) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task) task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"]) @@ -2788,8 +2810,18 @@ class NsLcm(LcmBase): if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id: return vca["ee_id"] - async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor, - vca_index, destroy_ee=True, exec_primitives=True, scaling_in=False): + async def destroy_N2VC( + self, + logging_text, + db_nslcmop, + vca_deployed, + config_descriptor, + vca_index, + destroy_ee=True, + exec_primitives=True, + scaling_in=False, + vca_id: str = None, + ): """ Execute the terminate primitives and destroy the execution environment (if destroy_ee=False :param logging_text: @@ -2840,9 +2872,12 @@ class NsLcm(LcmBase): mapped_primitive_params) # Sub-operations: Call _ns_execute_primitive() instead of action() try: - result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive, - mapped_primitive_params, - vca_type=vca_type) + result, result_detail = await self._ns_execute_primitive( + vca_deployed["ee_id"], primitive, + mapped_primitive_params, + vca_type=vca_type, + vca_id=vca_id, + ) except LcmException: # this happens when VCA is not deployed. In this case it is not needed to terminate continue @@ -2858,13 +2893,21 @@ class NsLcm(LcmBase): await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"]) if destroy_ee: - await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"], scaling_in=scaling_in) + await self.vca_map[vca_type].delete_execution_environment( + vca_deployed["ee_id"], + scaling_in=scaling_in, + vca_id=vca_id, + ) - async def _delete_all_N2VC(self, db_nsr: dict): + async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None): self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING') namespace = "." + db_nsr["_id"] try: - await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete) + await self.n2vc.delete_namespace( + namespace=namespace, + total_timeout=self.timeout_charm_delete, + vca_id=vca_id, + ) except N2VCNotFound: # already deleted. Skip pass self._write_all_config_status(db_nsr=db_nsr, status='DELETED') @@ -3064,6 +3107,7 @@ class NsLcm(LcmBase): stage[1] = "Getting vnf descriptors from db." db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) + db_vnfrs_dict = {db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list} db_vnfds_from_id = {} db_vnfds_from_member_index = {} # Loop over VNFRs @@ -3086,6 +3130,8 @@ class NsLcm(LcmBase): for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")): config_descriptor = None + + vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr) if not vca or not vca.get("ee_id"): continue if not vca.get("member-vnf-index"): @@ -3109,8 +3155,17 @@ class NsLcm(LcmBase): # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format( # vca_index, vca.get("ee_id"), vca_type, destroy_ee)) task = asyncio.ensure_future( - self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index, - destroy_ee, exec_terminate_primitives)) + self.destroy_N2VC( + logging_text, + db_nslcmop, + vca, + config_descriptor, + vca_index, + destroy_ee, + exec_terminate_primitives, + vca_id=vca_id, + ) + ) tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id")) # wait for pending tasks of terminate primitives @@ -3129,8 +3184,13 @@ class NsLcm(LcmBase): if nsr_deployed.get("VCA"): stage[1] = "Deleting all execution environments." self.logger.debug(logging_text + stage[1]) - task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr), - timeout=self.timeout_charm_delete)) + vca_id = self.get_vca_id({}, db_nsr) + task_delete_ee = asyncio.ensure_future( + asyncio.wait_for( + self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), + timeout=self.timeout_charm_delete + ) + ) # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id)) tasks_dict_info[task_delete_ee] = "Terminating all VCA" @@ -3143,10 +3203,15 @@ class NsLcm(LcmBase): continue kdu_instance = kdu.get("kdu-instance") if kdu.get("k8scluster-type") in self.k8scluster_map: + # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs + vca_id = self.get_vca_id({}, db_nsr) task_delete_kdu_instance = asyncio.ensure_future( self.k8scluster_map[kdu["k8scluster-type"]].uninstall( cluster_uuid=kdu.get("k8scluster-uuid"), - kdu_instance=kdu_instance)) + kdu_instance=kdu_instance, + vca_id=vca_id, + ) + ) else: self.logger.error(logging_text + "Unknown k8s deployment type {}". format(kdu.get("k8scluster-type"))) @@ -3378,8 +3443,18 @@ class NsLcm(LcmBase): .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index)) return ee_id, vca_type - async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0, retries_interval=30, - timeout=None, vca_type=None, db_dict=None) -> (str, str): + async def _ns_execute_primitive( + self, + ee_id, + primitive, + primitive_params, + retries=0, + retries_interval=30, + timeout=None, + vca_type=None, + db_dict=None, + vca_id: str = None, + ) -> (str, str): try: if primitive == "config": primitive_params = {"params": primitive_params} @@ -3395,7 +3470,9 @@ class NsLcm(LcmBase): params_dict=primitive_params, progress_timeout=self.timeout_progress_primitive, total_timeout=self.timeout_primitive, - db_dict=db_dict), + db_dict=db_dict, + vca_id=vca_id, + ), timeout=timeout or self.timeout_primitive) # execution was OK break @@ -3429,10 +3506,11 @@ class NsLcm(LcmBase): self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id)) db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + vca_id = self.get_vca_id({}, db_nsr) if db_nsr['_admin']['deployed']['K8s']: for k8s_index, k8s in enumerate(db_nsr['_admin']['deployed']['K8s']): cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"] - await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id}) + await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id}, vca_id=vca_id) else: for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']): table, filter = "nsrs", {"_id": nsr_id} @@ -3492,6 +3570,7 @@ class NsLcm(LcmBase): step = "Getting nsd from database" db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) + vca_id = self.get_vca_id(db_vnfr, db_nsr) # for backward compatibility if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict): nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values()) @@ -3596,8 +3675,11 @@ class NsLcm(LcmBase): detailed_status = await asyncio.wait_for( self.k8scluster_map[kdu["k8scluster-type"]].status_kdu( cluster_uuid=kdu.get("k8scluster-uuid"), - kdu_instance=kdu.get("kdu-instance")), - timeout=timeout_ns_action) + kdu_instance=kdu.get("kdu-instance"), + vca_id=vca_id, + ), + timeout=timeout_ns_action + ) else: kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id) params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params) @@ -3608,8 +3690,11 @@ class NsLcm(LcmBase): kdu_instance=kdu_instance, primitive_name=primitive_name, params=params, db_dict=db_dict, - timeout=timeout_ns_action), - timeout=timeout_ns_action) + timeout=timeout_ns_action, + vca_id=vca_id, + ), + timeout=timeout_ns_action + ) if detailed_status: nslcmop_operation_state = 'COMPLETED' @@ -3632,7 +3717,9 @@ class NsLcm(LcmBase): primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params), timeout=timeout_ns_action, vca_type=vca_type, - db_dict=db_dict) + db_dict=db_dict, + vca_id=vca_id, + ) db_nslcmop_update["detailed-status"] = detailed_status error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else "" @@ -3744,6 +3831,8 @@ class NsLcm(LcmBase): step = "Getting vnfr from database" db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}) + vca_id = self.get_vca_id(db_vnfr, db_nsr) + step = "Getting vnfd from database" db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) @@ -3969,7 +4058,11 @@ class NsLcm(LcmBase): vdu_count_index=None, ee_descriptor_id=ee_descriptor_id) result, result_detail = await self._ns_execute_primitive( - ee_id, primitive_name, primitive_params, vca_type=vca_type) + ee_id, primitive_name, + primitive_params, + vca_type=vca_type, + vca_id=vca_id, + ) self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format( vnf_config_primitive, result, result_detail)) # Update operationState = COMPLETED | FAILED @@ -4017,11 +4110,22 @@ class NsLcm(LcmBase): operation_params = db_nslcmop.get("operationParams") or {} exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and vca.get("needed_terminate")) - task = asyncio.ensure_future(asyncio.wait_for( - self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, - vca_index, destroy_ee=True, - exec_primitives=exec_terminate_primitives, - scaling_in=True), timeout=self.timeout_charm_delete)) + task = asyncio.ensure_future( + asyncio.wait_for( + self.destroy_N2VC( + logging_text, + db_nslcmop, + vca, + config_descriptor, + vca_index, + destroy_ee=True, + exec_primitives=exec_terminate_primitives, + scaling_in=True, + vca_id=vca_id, + ), + timeout=self.timeout_charm_delete + ) + ) # wait before next removal await asyncio.sleep(30) tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id")) @@ -4234,7 +4338,12 @@ class NsLcm(LcmBase): vdu_count_index=None, ee_descriptor_id=ee_descriptor_id) result, result_detail = await self._ns_execute_primitive( - ee_id, primitive_name, primitive_params, vca_type=vca_type) + ee_id, + primitive_name, + primitive_params, + vca_type=vca_type, + vca_id=vca_id, + ) self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format( vnf_config_primitive, result, result_detail)) # Update operationState = COMPLETED | FAILED diff --git a/osm_lcm/tests/test_vim_sdn.py b/osm_lcm/tests/test_vim_sdn.py new file mode 100644 index 0000000..f6b75e0 --- /dev/null +++ b/osm_lcm/tests/test_vim_sdn.py @@ -0,0 +1,160 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from unittest import TestCase +from unittest.mock import Mock, patch, MagicMock + + +from osm_common import msgbase +from osm_common.dbbase import DbException +from osm_lcm.vim_sdn import VcaLcm + + +class AsyncMock(MagicMock): + async def __call__(self, *args, **kwargs): + return super(AsyncMock, self).__call__(*args, **kwargs) + + +class TestVcaLcm(TestCase): + @patch("osm_lcm.lcm_utils.Database") + @patch("osm_lcm.lcm_utils.Filesystem") + def setUp(self, mock_filesystem, mock_database): + self.loop = asyncio.get_event_loop() + self.msg = Mock(msgbase.MsgBase()) + self.lcm_tasks = Mock() + self.config = {"database": {"driver": "mongo"}} + self.vca_lcm = VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop) + self.vca_lcm.db = Mock() + self.vca_lcm.fs = Mock() + + def test_vca_lcm_create(self): + vca_content = {"op_id": "order-id", "_id": "id"} + db_vca = { + "_id": "vca-id", + "secret": "secret", + "cacert": "cacert", + "schema_version": "1.11", + } + order_id = "order-id" + self.lcm_tasks.lock_HA.return_value = True + self.vca_lcm.db.get_one.return_value = db_vca + self.vca_lcm.n2vc.validate_vca = AsyncMock() + self.vca_lcm.update_db_2 = Mock() + + self.loop.run_until_complete(self.vca_lcm.create(vca_content, order_id)) + + self.lcm_tasks.lock_HA.assert_called_with("vca", "create", "order-id") + self.vca_lcm.db.encrypt_decrypt_fields.assert_called_with( + db_vca, + "decrypt", + ["secret", "cacert"], + schema_version="1.11", + salt="vca-id", + ) + self.vca_lcm.update_db_2.assert_called_with( + "vca", + "id", + { + "_admin.operationalState": "ENABLED", + "_admin.detailed-status": "Connectivity: ok", + }, + ) + self.lcm_tasks.unlock_HA.assert_called_with( + "vca", + "create", + "order-id", + operationState="COMPLETED", + detailed_status="VCA validated", + ) + self.lcm_tasks.remove.assert_called_with("vca", "id", "order-id") + + def test_vca_lcm_create_exception(self): + vca_content = {"op_id": "order-id", "_id": "id"} + db_vca = { + "_id": "vca-id", + "secret": "secret", + "cacert": "cacert", + "schema_version": "1.11", + } + order_id = "order-id" + self.lcm_tasks.lock_HA.return_value = True + self.vca_lcm.db.get_one.return_value = db_vca + self.vca_lcm.n2vc.validate_vca = AsyncMock() + self.vca_lcm.n2vc.validate_vca.side_effect = Exception("failed") + self.vca_lcm.update_db_2 = Mock() + self.vca_lcm.update_db_2.side_effect = DbException("failed") + self.loop.run_until_complete(self.vca_lcm.create(vca_content, order_id)) + + self.lcm_tasks.lock_HA.assert_called_with("vca", "create", "order-id") + self.vca_lcm.db.encrypt_decrypt_fields.assert_called_with( + db_vca, + "decrypt", + ["secret", "cacert"], + schema_version="1.11", + salt="vca-id", + ) + self.vca_lcm.update_db_2.assert_called_with( + "vca", + "id", + { + "_admin.operationalState": "ERROR", + "_admin.detailed-status": "Failed with exception: failed", + }, + ) + self.lcm_tasks.unlock_HA.assert_not_called() + self.lcm_tasks.remove.assert_called_with("vca", "id", "order-id") + + def test_vca_lcm_delete(self): + vca_content = {"op_id": "order-id", "_id": "id"} + order_id = "order-id" + self.lcm_tasks.lock_HA.return_value = True + self.vca_lcm.update_db_2 = Mock() + + self.loop.run_until_complete(self.vca_lcm.delete(vca_content, order_id)) + + self.lcm_tasks.lock_HA.assert_called_with("vca", "delete", "order-id") + self.vca_lcm.db.del_one.assert_called_with("vca", {"_id": "id"}) + self.vca_lcm.update_db_2.assert_called_with("vca", "id", None) + self.lcm_tasks.unlock_HA.assert_called_with( + "vca", + "delete", + "order-id", + operationState="COMPLETED", + detailed_status="deleted", + ) + self.lcm_tasks.remove.assert_called_with("vca", "id", "order-id") + + def test_vca_lcm_delete_exception(self): + vca_content = {"op_id": "order-id", "_id": "id"} + order_id = "order-id" + self.lcm_tasks.lock_HA.return_value = True + self.vca_lcm.update_db_2 = Mock() + self.vca_lcm.db.del_one.side_effect = Exception("failed deleting") + self.vca_lcm.update_db_2.side_effect = DbException("failed") + + self.loop.run_until_complete(self.vca_lcm.delete(vca_content, order_id)) + + self.lcm_tasks.lock_HA.assert_called_with("vca", "delete", "order-id") + self.vca_lcm.db.del_one.assert_called_with("vca", {"_id": "id"}) + self.vca_lcm.update_db_2.assert_called_with( + "vca", + "id", + { + "_admin.operationalState": "ERROR", + "_admin.detailed-status": "Failed with exception: failed deleting", + }, + ) + self.lcm_tasks.unlock_HA.not_called() + self.lcm_tasks.remove.assert_called_with("vca", "id", "order-id") diff --git a/osm_lcm/vim_sdn.py b/osm_lcm/vim_sdn.py index 13b95c4..a1623ba 100644 --- a/osm_lcm/vim_sdn.py +++ b/osm_lcm/vim_sdn.py @@ -25,6 +25,7 @@ from osm_lcm.lcm_utils import LcmException, LcmBase, deep_get from n2vc.k8s_helm_conn import K8sHelmConnector from n2vc.k8s_helm3_conn import K8sHelm3Connector from n2vc.k8s_juju_conn import K8sJujuConnector +from n2vc.n2vc_juju_conn import N2VCJujuConnector from n2vc.exceptions import K8sException, N2VCException from osm_common.dbbase import DbException from copy import deepcopy @@ -937,7 +938,6 @@ class K8sClusterLcm(LcmBase): log=self.logger, loop=self.loop, on_update_db=None, - vca_config=self.vca_config, db=self.db, fs=self.fs ) @@ -975,8 +975,13 @@ class K8sClusterLcm(LcmBase): for task_name in ("helm-chart", "juju-bundle", "helm-chart-v3"): if init_target and task_name not in init_target: continue - task = asyncio.ensure_future(self.k8s_map[task_name].init_env(k8s_credentials, - reuse_cluster_uuid=k8scluster_id)) + task = asyncio.ensure_future( + self.k8s_map[task_name].init_env( + k8s_credentials, + reuse_cluster_uuid=k8scluster_id, + vca_id=db_k8scluster.get("vca_id"), + ) + ) pending_tasks.append(task) task2name[task] = task_name @@ -1089,7 +1094,11 @@ class K8sClusterLcm(LcmBase): if k8s_jb_id: # delete in reverse order of creation step = "Removing juju-bundle '{}'".format(k8s_jb_id) uninstall_sw = deep_get(db_k8scluster, ("_admin", "juju-bundle", "created")) or False - cluster_removed = await self.juju_k8scluster.reset(cluster_uuid=k8s_jb_id, uninstall_sw=uninstall_sw) + cluster_removed = await self.juju_k8scluster.reset( + cluster_uuid=k8s_jb_id, + uninstall_sw=uninstall_sw, + vca_id=db_k8scluster.get("vca_id"), + ) db_k8scluster_update["_admin.juju-bundle.id"] = None db_k8scluster_update["_admin.juju-bundle.operationalState"] = "DISABLED" @@ -1153,6 +1162,139 @@ class K8sClusterLcm(LcmBase): self.lcm_tasks.remove("k8scluster", k8scluster_id, order_id) +class VcaLcm(LcmBase): + timeout_create = 30 + + def __init__(self, msg, lcm_tasks, config, loop): + """ + Init, Connect to database, filesystem storage, and messaging + :param config: two level dictionary with configuration. Top level should contain 'database', 'storage', + :return: None + """ + + self.logger = logging.getLogger("lcm.vca") + self.loop = loop + self.lcm_tasks = lcm_tasks + + super().__init__(msg, self.logger) + + # create N2VC connector + self.n2vc = N2VCJujuConnector( + log=self.logger, + loop=self.loop, + fs=self.fs, + db=self.db + ) + + def _get_vca_by_id(self, vca_id: str) -> dict: + db_vca = self.db.get_one("vca", {"_id": vca_id}) + self.db.encrypt_decrypt_fields( + db_vca, + "decrypt", + ["secret", "cacert"], + schema_version=db_vca["schema_version"], salt=db_vca["_id"] + ) + return db_vca + + async def create(self, vca_content, order_id): + op_id = vca_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("vca", "create", op_id): + return + + vca_id = vca_content["_id"] + self.logger.debug("Task vca_create={} {}".format(vca_id, "Enter")) + + db_vca = None + db_vca_update = {} + + try: + self.logger.debug("Task vca_create={} {}".format(vca_id, "Getting vca from db")) + db_vca = self._get_vca_by_id(vca_id) + + task = asyncio.ensure_future( + asyncio.wait_for( + self.n2vc.validate_vca(db_vca["_id"]), + timeout=self.timeout_create, + ) + ) + + await asyncio.wait([task], return_when=asyncio.FIRST_COMPLETED) + if task.exception(): + raise task.exception() + self.logger.debug("Task vca_create={} {}".format(vca_id, "vca registered and validated successfully")) + db_vca_update["_admin.operationalState"] = "ENABLED" + db_vca_update["_admin.detailed-status"] = "Connectivity: ok" + operation_details = "VCA validated" + operation_state = "COMPLETED" + + self.logger.debug("Task vca_create={} {}".format(vca_id, "Done. Result: {}".format(operation_state))) + + except Exception as e: + error_msg = "Failed with exception: {}".format(e) + self.logger.error("Task vca_create={} {}".format(vca_id, error_msg)) + db_vca_update["_admin.operationalState"] = "ERROR" + db_vca_update["_admin.detailed-status"] = error_msg + operation_state = "FAILED" + operation_details = error_msg + finally: + try: + self.update_db_2("vca", vca_id, db_vca_update) + + # Register the operation and unlock + self.lcm_tasks.unlock_HA( + "vca", + "create", + op_id, + operationState=operation_state, + detailed_status=operation_details + ) + except DbException as e: + self.logger.error("Task vca_create={} {}".format(vca_id, "Cannot update database: {}".format(e))) + self.lcm_tasks.remove("vca", vca_id, order_id) + + async def delete(self, vca_content, order_id): + + # HA tasks and backward compatibility: + # If "vim_content" does not include "op_id", we a running a legacy NBI version. + # In such a case, HA is not supported by NBI, "op_id" is None, and lock_HA() will do nothing. + # Register "delete" task here for related future HA operations + op_id = vca_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("vca", "delete", op_id): + return + + db_vca_update = {} + vca_id = vca_content["_id"] + + try: + self.logger.debug("Task vca_delete={} {}".format(vca_id, "Deleting vca from db")) + self.db.del_one("vca", {"_id": vca_id}) + db_vca_update = None + operation_details = "deleted" + operation_state = "COMPLETED" + + self.logger.debug("Task vca_delete={} {}".format(vca_id, "Done. Result: {}".format(operation_state))) + except Exception as e: + error_msg = "Failed with exception: {}".format(e) + self.logger.error("Task vca_delete={} {}".format(vca_id, error_msg)) + db_vca_update["_admin.operationalState"] = "ERROR" + db_vca_update["_admin.detailed-status"] = error_msg + operation_state = "FAILED" + operation_details = error_msg + finally: + try: + self.update_db_2("vca", vca_id, db_vca_update) + self.lcm_tasks.unlock_HA( + "vca", + "delete", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) + except DbException as e: + self.logger.error("Task vca_delete={} {}".format(vca_id, "Cannot update database: {}".format(e))) + self.lcm_tasks.remove("vca", vca_id, order_id) + + class K8sRepoLcm(LcmBase): def __init__(self, msg, lcm_tasks, config, loop): diff --git a/requirements-dev.txt b/requirements-dev.txt index e9e1d38..a3f9edc 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,9 @@ aiokafka==0.7.0 # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master +async-timeout==3.0.1 + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # retrying-async bcrypt==3.2.0 # via # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master @@ -54,6 +58,8 @@ macaroonbakery==1.3.1 # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # juju # theblues +motor==1.3.1 + # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master mypy-extensions==0.4.3 # via # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master @@ -95,7 +101,10 @@ pymacaroons==0.13.0 # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master # macaroonbakery pymongo==3.11.3 - # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master + # via + # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master + # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master + # motor pynacl==1.4.0 # via # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master @@ -133,6 +142,8 @@ requests==2.25.1 # macaroonbakery # requests-oauthlib # theblues +retrying-async==1.2.0 + # via -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master rsa==4.7.2 # via # -r https://osm.etsi.org/gitweb/?p=osm/N2VC.git;a=blob_plain;f=requirements.txt;hb=master -- GitLab From 02ae5404cf6e5fed364fdc22505c9d0f11fa5398 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 4 May 2021 13:15:57 +0200 Subject: [PATCH 24/35] Fix bug 1525 This patch adds **kwargs in the LCMHelmConn methods that are not used. The feature 10239 added the vca_id parameter for the LCM and N2VC communication for Juju related execution environments. The current patch will avoid having to add extra parameters that are not needed nor used in the LCMHelmmConn. Change-Id: I673a36c7c13891a53de1d7dc547f13c81616d373 Signed-off-by: David Garcia --- osm_lcm/lcm_helm_conn.py | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/osm_lcm/lcm_helm_conn.py b/osm_lcm/lcm_helm_conn.py index a13824d..845a413 100644 --- a/osm_lcm/lcm_helm_conn.py +++ b/osm_lcm/lcm_helm_conn.py @@ -265,20 +265,11 @@ class LCMHelmConn(N2VCConnector, LcmBase): # nothing to do pass - async def install_configuration_sw(self, - ee_id: str, - artifact_path: str, - db_dict: dict, - progress_timeout: float = None, - total_timeout: float = None, - config: dict = None, - num_units: int = 1, - vca_type: str = None - ): + async def install_configuration_sw(self, *args, **kwargs): # nothing to do pass - async def add_relation(self, ee_id_1: str, ee_id_2: str, endpoint_1: str, endpoint_2: str): + async def add_relation(self, *args, **kwargs): # nothing to do pass @@ -286,12 +277,18 @@ class LCMHelmConn(N2VCConnector, LcmBase): # nothing to to pass - async def get_status(self, namespace: str, yaml_format: bool = True): + async def get_status(self, *args, **kwargs): # not used for this connector pass - async def get_ee_ssh_public__key(self, ee_id: str, db_dict: dict, progress_timeout: float = None, - total_timeout: float = None) -> str: + async def get_ee_ssh_public__key( + self, + ee_id: str, + db_dict: dict, + progress_timeout: float = None, + total_timeout: float = None, + **kwargs, + ) -> str: """ Obtains ssh-public key from ee executing GetSShKey method from the ee. @@ -327,8 +324,16 @@ class LCMHelmConn(N2VCConnector, LcmBase): self.log.error("Error obtaining ee ssh_key: {}".format(e), exc_info=True) raise N2VCException("Error obtaining ee ssh_ke: {}".format(e)) - async def exec_primitive(self, ee_id: str, primitive_name: str, params_dict: dict, db_dict: dict = None, - progress_timeout: float = None, total_timeout: float = None) -> str: + async def exec_primitive( + self, + ee_id: str, + primitive_name: str, + params_dict: dict, + db_dict: dict = None, + progress_timeout: float = None, + total_timeout: float = None, + **kwargs, + ) -> str: """ Execute a primitive in the execution environment -- GitLab From c458bf58d07be523b7198fe484c31b8fc96478b3 Mon Sep 17 00:00:00 2001 From: aktas Date: Fri, 26 Feb 2021 15:41:45 +0300 Subject: [PATCH 25/35] Fix 1462 asyncio.sleep is not needed with the changes on https://osm.etsi.org/gerrit/c/osm/N2VC/+/10399. Change-Id: Ie7179140011c0c13d4957b8b7824125ac0cac4dd Signed-off-by: aktas --- osm_lcm/ns.py | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 0f5c6ab..dc69449 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -4126,8 +4126,6 @@ class NsLcm(LcmBase): timeout=self.timeout_charm_delete ) ) - # wait before next removal - await asyncio.sleep(30) tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id")) del vca_update[vca_index] del config_update[vca_index] @@ -4240,37 +4238,6 @@ class NsLcm(LcmBase): task_instantiation_info=tasks_dict_info, stage=stage ) - # TODO: scaling for kdu is not implemented yet. - kdu_name = vdu_info["osm_vdu_id"] - descriptor_config = get_configuration(db_vnfd, kdu_name) - if descriptor_config: - vdu_id = None - vdu_index = vdu_index - vdu_name = None - kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name) - deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} - if kdur.get("additionalParams"): - deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"]) - - self._deploy_n2vc( - logging_text=logging_text, - db_nsr=db_nsr, - db_vnfr=db_vnfr, - nslcmop_id=nslcmop_id, - nsr_id=nsr_id, - nsi_id=nsi_id, - vnfd_id=vnfd_id, - vdu_id=vdu_id, - kdu_name=kdu_name, - member_vnf_index=member_vnf_index, - vdu_index=vdu_index, - vdu_name=vdu_name, - deploy_params=deploy_params_kdu, - descriptor_config=descriptor_config, - base_folder=base_folder, - task_instantiation_info=tasks_dict_info, - stage=stage - ) # SCALE-UP VCA - END scale_process = None -- GitLab From 45966a0a302621ed3f2d03f50cd61a584fddf0ab Mon Sep 17 00:00:00 2001 From: aktas Date: Tue, 4 May 2021 19:32:45 +0300 Subject: [PATCH 26/35] Bug 1526 Fix If vnfd:id and vnfd:df:lcm-operations-configuration:operate-vnf-op-config:day1-2:id are not the same, it fails to add the relation defined in NSD. Change-Id: I15e0a23c726fe4257c1b628faae2b2b50c0675e9 Signed-off-by: aktas --- osm_lcm/ns.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index dc69449..6fd9e5a 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -2110,8 +2110,11 @@ class NsLcm(LcmBase): db_vnfd_list = db_nsr.get('vnfd-id') if db_vnfd_list: for vnfd in db_vnfd_list: + db_vnf_relations = None db_vnfd = self.db.get_one("vnfds", {"_id": vnfd}) - db_vnf_relations = get_configuration(db_vnfd, db_vnfd["id"]).get("relation", []) + db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"]) + if db_vnf_configuration: + db_vnf_relations = db_vnf_configuration.get("relation", []) if db_vnf_relations: for r in db_vnf_relations: # check if this VCA is in the relation -- GitLab From 6ec62b7495f91c1c8388f835696d581ef35c69d6 Mon Sep 17 00:00:00 2001 From: bravof Date: Thu, 25 Feb 2021 17:20:35 -0300 Subject: [PATCH 27/35] fix/feat(relations): external connection point ref now works with multiple KDU Change-Id: I01fac4167617450670999c87200bd3d8179f7293 Signed-off-by: bravof --- osm_lcm/ns.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 6fd9e5a..443a09e 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -2287,6 +2287,12 @@ class NsLcm(LcmBase): # Obtain management service info (if exists) vnfr_update_dict = {} + kdu_config = get_configuration(vnfd, kdud["name"]) + if kdu_config: + target_ee_list = kdu_config.get("execution-environment-list", []) + else: + target_ee_list = [] + if services: vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")] @@ -2306,6 +2312,11 @@ class NsLcm(LcmBase): if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp: vnfr_update_dict["ip-address"] = ip + if find_in_list( + target_ee_list, + lambda ee: ee.get("external-connection-point-ref", "") == service_external_cp + ): + vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip break else: self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"])) -- GitLab From 021e70d2eb5caa25a7e731ed70f2a008cf8057d7 Mon Sep 17 00:00:00 2001 From: bravof Date: Thu, 11 Mar 2021 12:03:30 -0300 Subject: [PATCH 28/35] fix(FS): sync problems fixed syncing only the particular NSD and VNFD that the instantiation needs Change-Id: I7410f6f39aa58a57ab91410c45dc7d7042b0293d Signed-off-by: bravof --- osm_lcm/ns.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 443a09e..6b82da9 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -1696,9 +1696,6 @@ class NsLcm(LcmBase): # wait for any previous tasks in process await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) - stage[1] = "Sync filesystem from database." - self.fs.sync() # TODO, make use of partial sync, only for the needed packages - # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds) stage[1] = "Reading from database." # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id @@ -1731,6 +1728,7 @@ class NsLcm(LcmBase): db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"]) nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) + self.fs.sync(db_nsr["nsd-id"]) db_nsr["nsd"] = nsd # nsr_name = db_nsr["name"] # TODO short-name?? @@ -1747,6 +1745,7 @@ class NsLcm(LcmBase): db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr vnfd_id = vnfr["vnfd-id"] vnfd_ref = vnfr["vnfd-ref"] + self.fs.sync(vnfd_id) # if we haven't this vnfd, read it from db if vnfd_id not in db_vnfds: -- GitLab From 5697b8b03a3acd17827ce536cb8aff15df8776ad Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Wed, 24 Mar 2021 09:17:02 +0100 Subject: [PATCH 29/35] Reformat LCM to standardized format Change-Id: I2259f4fc330129ff2d6c805618c2e069aa74564c Signed-off-by: garciadeblas --- osm_lcm/ROclient.py | 723 ++-- osm_lcm/__init__.py | 5 +- osm_lcm/data_utils/database/database.py | 9 +- osm_lcm/data_utils/filesystem/filesystem.py | 9 +- osm_lcm/data_utils/vnfd.py | 61 +- osm_lcm/data_utils/vnfr.py | 13 +- osm_lcm/frontend_grpc.py | 21 +- osm_lcm/frontend_pb2.py | 429 ++- osm_lcm/lcm.py | 415 ++- osm_lcm/lcm_hc.py | 6 +- osm_lcm/lcm_helm_conn.py | 246 +- osm_lcm/lcm_utils.py | 179 +- osm_lcm/netslice.py | 456 ++- osm_lcm/ng_ro.py | 85 +- osm_lcm/ns.py | 3404 +++++++++++++------ osm_lcm/prometheus.py | 105 +- osm_lcm/tests/test_db_descriptors.py | 2 +- osm_lcm/tests/test_lcm_helm_conn.py | 87 +- osm_lcm/tests/test_ns.py | 460 ++- osm_lcm/tests/test_prometheus.py | 106 +- osm_lcm/vim_sdn.py | 814 +++-- setup.py | 24 +- tox.ini | 1 + 23 files changed, 5278 insertions(+), 2382 deletions(-) diff --git a/osm_lcm/ROclient.py b/osm_lcm/ROclient.py index de61c23..32dd1bf 100644 --- a/osm_lcm/ROclient.py +++ b/osm_lcm/ROclient.py @@ -57,23 +57,29 @@ def remove_envelop(item, indata=None): if not indata: return {} if item == "vnfd": - if clean_indata.get('vnfd:vnfd-catalog'): - clean_indata = clean_indata['vnfd:vnfd-catalog'] - elif clean_indata.get('vnfd-catalog'): - clean_indata = clean_indata['vnfd-catalog'] - if clean_indata.get('vnfd'): - if not isinstance(clean_indata['vnfd'], list) or len(clean_indata['vnfd']) != 1: + if clean_indata.get("vnfd:vnfd-catalog"): + clean_indata = clean_indata["vnfd:vnfd-catalog"] + elif clean_indata.get("vnfd-catalog"): + clean_indata = clean_indata["vnfd-catalog"] + if clean_indata.get("vnfd"): + if ( + not isinstance(clean_indata["vnfd"], list) + or len(clean_indata["vnfd"]) != 1 + ): raise ROClientException("'vnfd' must be a list only one element") - clean_indata = clean_indata['vnfd'][0] + clean_indata = clean_indata["vnfd"][0] elif item == "nsd": - if clean_indata.get('nsd:nsd-catalog'): - clean_indata = clean_indata['nsd:nsd-catalog'] - elif clean_indata.get('nsd-catalog'): - clean_indata = clean_indata['nsd-catalog'] - if clean_indata.get('nsd'): - if not isinstance(clean_indata['nsd'], list) or len(clean_indata['nsd']) != 1: + if clean_indata.get("nsd:nsd-catalog"): + clean_indata = clean_indata["nsd:nsd-catalog"] + elif clean_indata.get("nsd-catalog"): + clean_indata = clean_indata["nsd-catalog"] + if clean_indata.get("nsd"): + if ( + not isinstance(clean_indata["nsd"], list) + or len(clean_indata["nsd"]) != 1 + ): raise ROClientException("'nsd' must be a list only one element") - clean_indata = clean_indata['nsd'][0] + clean_indata = clean_indata["nsd"][0] elif item == "sdn": if len(indata) == 1 and "sdn_controller" in indata: clean_indata = indata["sdn_controller"] @@ -99,20 +105,28 @@ def remove_envelop(item, indata=None): class ROClient: - headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'} - client_to_RO = {'tenant': 'tenants', 'vim': 'datacenters', 'vim_account': 'datacenters', 'sdn': 'sdn_controllers', - 'vnfd': 'vnfs', 'nsd': 'scenarios', 'wim': 'wims', 'wim_account': 'wims', - 'ns': 'instances'} + headers_req = {"Accept": "application/yaml", "content-type": "application/yaml"} + client_to_RO = { + "tenant": "tenants", + "vim": "datacenters", + "vim_account": "datacenters", + "sdn": "sdn_controllers", + "vnfd": "vnfs", + "nsd": "scenarios", + "wim": "wims", + "wim_account": "wims", + "ns": "instances", + } mandatory_for_create = { - 'tenant': ("name", ), - 'vnfd': ("name", "id"), - 'nsd': ("name", "id"), - 'ns': ("name", "scenario", "datacenter"), - 'vim': ("name", "vim_url"), - 'wim': ("name", "wim_url"), - 'vim_account': (), - 'wim_account': (), - 'sdn': ("name", 'type'), + "tenant": ("name",), + "vnfd": ("name", "id"), + "nsd": ("name", "id"), + "ns": ("name", "scenario", "datacenter"), + "vim": ("name", "vim_url"), + "wim": ("name", "wim_url"), + "vim_account": (), + "wim_account": (), + "sdn": ("name", "type"), } timeout_large = 120 timeout_short = 30 @@ -127,7 +141,7 @@ class ROClient: self.tenant = None self.datacenter_id_name = kwargs.get("datacenter") self.datacenter = None - logger_name = kwargs.get('logger_name', 'lcm.ro') + logger_name = kwargs.get("logger_name", "lcm.ro") self.logger = logging.getLogger(logger_name) if kwargs.get("loglevel"): self.logger.setLevel(kwargs["loglevel"]) @@ -135,47 +149,55 @@ class ROClient: requests = kwargs.get("TODO remove") def __getitem__(self, index): - if index == 'tenant': + if index == "tenant": return self.tenant_id_name - elif index == 'datacenter': + elif index == "datacenter": return self.datacenter_id_name - elif index == 'username': + elif index == "username": return self.username - elif index == 'password': + elif index == "password": return self.password - elif index == 'uri': + elif index == "uri": return self.uri else: raise KeyError("Invalid key '{}'".format(index)) def __setitem__(self, index, value): - if index == 'tenant': + if index == "tenant": self.tenant_id_name = value - elif index == 'datacenter' or index == 'vim': + elif index == "datacenter" or index == "vim": self.datacenter_id_name = value - elif index == 'username': + elif index == "username": self.username = value - elif index == 'password': + elif index == "password": self.password = value - elif index == 'uri': + elif index == "uri": self.uri = value else: raise KeyError("Invalid key '{}'".format(index)) - self.tenant = None # force to reload tenant with different credentials + self.tenant = None # force to reload tenant with different credentials self.datacenter = None # force to reload datacenter with different credentials @staticmethod def _parse(descriptor, descriptor_format, response=False): - if descriptor_format and descriptor_format != "json" and descriptor_format != "yaml": - raise ROClientException("'descriptor_format' must be a 'json' or 'yaml' text") + if ( + descriptor_format + and descriptor_format != "json" + and descriptor_format != "yaml" + ): + raise ROClientException( + "'descriptor_format' must be a 'json' or 'yaml' text" + ) if descriptor_format != "json": try: return yaml.load(descriptor) except yaml.YAMLError as exc: error_pos = "" - if hasattr(exc, 'problem_mark'): + if hasattr(exc, "problem_mark"): mark = exc.problem_mark - error_pos = " at line:{} column:{}s".format(mark.line + 1, mark.column + 1) + error_pos = " at line:{} column:{}s".format( + mark.line + 1, mark.column + 1 + ) error_text = "yaml format error" + error_pos elif descriptor_format != "yaml": try: @@ -203,9 +225,11 @@ class ROClient: return yaml.load(descriptor, Loader=yaml.Loader) except yaml.YAMLError as exc: error_pos = "" - if hasattr(exc, 'problem_mark'): + if hasattr(exc, "problem_mark"): mark = exc.problem_mark - error_pos = " at line:{} column:{}s".format(mark.line + 1, mark.column + 1) + error_pos = " at line:{} column:{}s".format( + mark.line + 1, mark.column + 1 + ) error_text = "yaml format error" + error_pos if response: raise ROClientException(error_text) @@ -233,21 +257,21 @@ class ROClient: :return: a new dic with {: {indata} } where envelop can be e.g. tenant, datacenter, ... """ if item == "vnfd": - return {'vnfd-catalog': {'vnfd': [indata]}} + return {"vnfd-catalog": {"vnfd": [indata]}} elif item == "nsd": - return {'nsd-catalog': {'nsd': [indata]}} + return {"nsd-catalog": {"nsd": [indata]}} elif item == "tenant": - return {'tenant': indata} + return {"tenant": indata} elif item in ("vim", "vim_account", "datacenter"): - return {'datacenter': indata} + return {"datacenter": indata} elif item == "wim": - return {'wim': indata} + return {"wim": indata} elif item == "wim_account": - return {'wim_account': indata} + return {"wim_account": indata} elif item == "ns" or item == "instances": - return {'instance': indata} + return {"instance": indata} elif item == "sdn": - return {'sdn_controller': indata} + return {"sdn_controller": indata} else: assert False, "_create_envelop with unknown item {}".format(item) @@ -268,7 +292,10 @@ class ROClient: kitem_old = int(kitem) else: raise ROClientException( - "Invalid query string '{}'. Descriptor is not a list nor dict at '{}'".format(k, kitem)) + "Invalid query string '{}'. Descriptor is not a list nor dict at '{}'".format( + k, kitem + ) + ) if v == "__DELETE__": del update_content[kitem_old] else: @@ -276,13 +303,22 @@ class ROClient: return desc except KeyError: raise ROClientException( - "Invalid query string '{}'. Descriptor does not contain '{}'".format(k, kitem_old)) + "Invalid query string '{}'. Descriptor does not contain '{}'".format( + k, kitem_old + ) + ) except ValueError: - raise ROClientException("Invalid query string '{}'. Expected integer index list instead of '{}'".format( - k, kitem)) + raise ROClientException( + "Invalid query string '{}'. Expected integer index list instead of '{}'".format( + k, kitem + ) + ) except IndexError: raise ROClientException( - "Invalid query string '{}'. Index '{}' out of range".format(k, kitem_old)) + "Invalid query string '{}'. Index '{}' out of range".format( + k, kitem_old + ) + ) @staticmethod def check_ns_status(ns_descriptor): @@ -313,7 +349,10 @@ class ROClient: def _get_sdn_ref(sce_net_id): # look for the network associated to the SDN network and obtain the identification - net = next((x for x in ns_descriptor["nets"] if x.get("sce_net_id") == sce_net_id), None) + net = next( + (x for x in ns_descriptor["nets"] if x.get("sce_net_id") == sce_net_id), + None, + ) if not sce_net_id or not net: return "" return _get_ref(net) @@ -322,15 +361,23 @@ class ROClient: total["networks"] = len(ns_descriptor["nets"]) for net in ns_descriptor["nets"]: if net["status"] in ("ERROR", "VIM_ERROR"): - error_list.append("Error at VIM network {}: {}".format(_get_ref(net), net["error_msg"])) + error_list.append( + "Error at VIM network {}: {}".format( + _get_ref(net), net["error_msg"] + ) + ) elif net["status"] == "ACTIVE": done["networks"] += 1 total["SDN_networks"] = len(ns_descriptor["sdn_nets"]) for sdn_net in ns_descriptor["sdn_nets"]: if sdn_net["status"] in ("ERROR", "VIM_ERROR", "WIM_ERROR"): - error_list.append("Error at SDN network {}: {}".format(_get_sdn_ref(sdn_net.get("sce_net_id")), - sdn_net["error_msg"])) + error_list.append( + "Error at SDN network {}: {}".format( + _get_sdn_ref(sdn_net.get("sce_net_id")), + sdn_net["error_msg"], + ) + ) elif sdn_net["status"] == "ACTIVE": done["SDN_networks"] += 1 @@ -338,19 +385,35 @@ class ROClient: for vm in vnf["vms"]: total["VMs"] += 1 if vm["status"] in ("ERROR", "VIM_ERROR"): - error_list.append("Error at VIM VM {}: {}".format(_get_ref(vm), vm["error_msg"])) + error_list.append( + "Error at VIM VM {}: {}".format( + _get_ref(vm), vm["error_msg"] + ) + ) elif vm["status"] == "ACTIVE": done["VMs"] += 1 if error_list: # skip errors caused because other dependendent task is on error - return "ERROR", "; ".join([el for el in error_list if "because depends on failed ACTION" not in el]) + return "ERROR", "; ".join( + [ + el + for el in error_list + if "because depends on failed ACTION" not in el + ] + ) if all(total[x] == done[x] for x in total): # DONE == TOTAL for all items - return "ACTIVE", str({x: total[x] for x in total if total[x]}) # print only those which value is not 0 + return "ACTIVE", str( + {x: total[x] for x in total if total[x]} + ) # print only those which value is not 0 else: - return "BUILD", str({x: "{}/{}".format(done[x], total[x]) for x in total if total[x]}) + return "BUILD", str( + {x: "{}/{}".format(done[x], total[x]) for x in total if total[x]} + ) # print done/total for each item if total is not 0 except Exception as e: - raise ROClientException("Unexpected RO ns descriptor. Wrong version? {}".format(e)) from e + raise ROClientException( + "Unexpected RO ns descriptor. Wrong version? {}".format(e) + ) from e @staticmethod def check_action_status(action_descriptor): @@ -385,10 +448,13 @@ class ROClient: other_done += 1 if net_total == net_done and vm_total == vm_done and other_total == other_done: - return "ACTIVE", "VMs {}, networks: {}, other: {} ".format(vm_total, net_total, other_total) + return "ACTIVE", "VMs {}, networks: {}, other: {} ".format( + vm_total, net_total, other_total + ) else: - return "BUILD", "VMs: {}/{}, networks: {}/{}, other: {}/{}".format(vm_done, vm_total, net_done, net_total, - other_done, other_total) + return "BUILD", "VMs: {}/{}, networks: {}/{}, other: {}/{}".format( + vm_done, vm_total, net_done, net_total, other_done, other_total + ) @staticmethod def get_ns_vnf_info(ns_descriptor): @@ -410,27 +476,35 @@ class ROClient: ns_info = {} for vnf in ns_descriptor["vnfs"]: if not vnf.get("ip_address") and vnf.get("vms"): - raise ROClientException("ns member_vnf_index '{}' has no IP address".format( - vnf["member_vnf_index"]), http_code=409) - vnfr_info = { - "ip_address": vnf.get("ip_address"), - "vdur": {} - } + raise ROClientException( + "ns member_vnf_index '{}' has no IP address".format( + vnf["member_vnf_index"] + ), + http_code=409, + ) + vnfr_info = {"ip_address": vnf.get("ip_address"), "vdur": {}} for vm in vnf["vms"]: vdur = { "vim_id": vm.get("vim_vm_id"), "ip_address": vm.get("ip_address"), - "interfaces": {} + "interfaces": {}, } for iface in vm["interfaces"]: if iface.get("type") == "mgmt" and not iface.get("ip_address"): - raise ROClientException("ns member_vnf_index '{}' vm '{}' management interface '{}' has no IP " - "address".format(vnf["member_vnf_index"], vm["vdu_osm_id"], - iface["external_name"]), http_code=409) - vdur["interfaces"][iface["internal_name"]] = {"ip_address": iface.get("ip_address"), - "mac_address": iface.get("mac_address"), - "vim_id": iface.get("vim_interface_id"), - } + raise ROClientException( + "ns member_vnf_index '{}' vm '{}' management interface '{}' has no IP " + "address".format( + vnf["member_vnf_index"], + vm["vdu_osm_id"], + iface["external_name"], + ), + http_code=409, + ) + vdur["interfaces"][iface["internal_name"]] = { + "ip_address": iface.get("ip_address"), + "mac_address": iface.get("mac_address"), + "vim_id": iface.get("vim_interface_id"), + } vnfr_info["vdur"][vm["vdu_osm_id"]] = vdur ns_info[str(vnf["member_vnf_index"])] = vnfr_info return ns_info @@ -450,38 +524,62 @@ class ROClient: if self.check_if_uuid(item_id_name): item_id = item_id_name url += "/" + item_id_name - elif item_id_name and item_id_name.startswith("'") and item_id_name.endswith("'"): + elif ( + item_id_name and item_id_name.startswith("'") and item_id_name.endswith("'") + ): item_id_name = item_id_name[1:-1] self.logger.debug("RO GET %s", url) # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.get(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "GET {} [{}] {}".format(url, response.status, response_text[:100]) + ) if response.status == 404: # NOT_FOUND - raise ROClientException("No {} found with id '{}'".format(item[:-1], item_id_name), - http_code=404) + raise ROClientException( + "No {} found with id '{}'".format(item[:-1], item_id_name), + http_code=404, + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), http_code=response.status + ) content = self._parse_yaml(response_text, response=True) if item_id: return item_id desc = content[item] - assert isinstance(desc, list), "_get_item_uuid get a non dict with a list inside {}".format(type(desc)) + assert isinstance( + desc, list + ), "_get_item_uuid get a non dict with a list inside {}".format(type(desc)) uuid = None for i in desc: if item_id_name and i["name"] != item_id_name: continue if uuid: # found more than one raise ROClientException( - "Found more than one {} with name '{}'. uuid must be used".format(item, item_id_name), - http_code=404) + "Found more than one {} with name '{}'. uuid must be used".format( + item, item_id_name + ), + http_code=404, + ) uuid = i["uuid"] if not uuid: - raise ROClientException("No {} found with name '{}'".format(item[:-1], item_id_name), http_code=404) + raise ROClientException( + "No {} found with name '{}'".format(item[:-1], item_id_name), + http_code=404, + ) return uuid - async def _get_item(self, session, item, item_id_name, extra_item=None, extra_item_id=None, all_tenants=False): + async def _get_item( + self, + session, + item, + item_id_name, + extra_item=None, + extra_item_id=None, + all_tenants=False, + ): if all_tenants: tenant_text = "/any" elif all_tenants is None: @@ -506,25 +604,41 @@ class ROClient: # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.get(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "GET {} [{}] {}".format(url, response.status, response_text[:100]) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), http_code=response.status + ) return self._parse_yaml(response_text, response=True) async def _get_tenant(self, session): if not self.tenant: - self.tenant = await self._get_item_uuid(session, "tenants", self.tenant_id_name, None) + self.tenant = await self._get_item_uuid( + session, "tenants", self.tenant_id_name, None + ) return self.tenant async def _get_datacenter(self, session): if not self.tenant: await self._get_tenant(session) if not self.datacenter: - self.datacenter = await self._get_item_uuid(session, "datacenters", self.datacenter_id_name, True) + self.datacenter = await self._get_item_uuid( + session, "datacenters", self.datacenter_id_name, True + ) return self.datacenter - async def _create_item(self, session, item, descriptor, item_id_name=None, action=None, all_tenants=False): + async def _create_item( + self, + session, + item, + descriptor, + item_id_name=None, + action=None, + all_tenants=False, + ): if all_tenants: tenant_text = "/any" elif all_tenants is None: @@ -559,15 +673,27 @@ class ROClient: else: action = "/{}".format(action) - url = "{}{apiver}{tenant}/{item}{id}{action}".format(self.uri, apiver=api_version_text, - tenant=tenant_text, item=item, id=uuid, action=action) + url = "{}{apiver}{tenant}/{item}{id}{action}".format( + self.uri, + apiver=api_version_text, + tenant=tenant_text, + item=item, + id=uuid, + action=action, + ) self.logger.debug("RO POST %s %s", url, payload_req) # timeout = aiohttp.ClientTimeout(total=self.timeout_large) - async with session.post(url, headers=self.headers_req, data=payload_req) as response: + async with session.post( + url, headers=self.headers_req, data=payload_req + ) as response: response_text = await response.read() - self.logger.debug("POST {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "POST {} [{}] {}".format(url, response.status, response_text[:100]) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), http_code=response.status + ) return self._parse_yaml(response_text, response=True) @@ -583,9 +709,11 @@ class ROClient: if not self.check_if_uuid(item_id_name): # check that exist _all_tenants = all_tenants - if item in ("datacenters", 'wims'): + if item in ("datacenters", "wims"): _all_tenants = True - uuid = await self._get_item_uuid(session, item, item_id_name, all_tenants=_all_tenants) + uuid = await self._get_item_uuid( + session, item, item_id_name, all_tenants=_all_tenants + ) else: uuid = item_id_name @@ -594,9 +722,13 @@ class ROClient: # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.delete(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("DELETE {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "DELETE {} [{}] {}".format(url, response.status, response_text[:100]) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), http_code=response.status + ) return self._parse_yaml(response_text, response=True) @@ -620,9 +752,13 @@ class ROClient: # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.get(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "GET {} [{}] {}".format(url, response.status, response_text[:100]) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), http_code=response.status + ) return self._parse_yaml(response_text, response=True) @@ -642,11 +778,17 @@ class ROClient: url = "{}{}/{}/{}".format(self.uri, tenant_text, item, item_id) self.logger.debug("RO PUT %s %s", url, payload_req) # timeout = aiohttp.ClientTimeout(total=self.timeout_large) - async with session.put(url, headers=self.headers_req, data=payload_req) as response: + async with session.put( + url, headers=self.headers_req, data=payload_req + ) as response: response_text = await response.read() - self.logger.debug("PUT {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "PUT {} [{}] {}".format(url, response.status, response_text[:100]) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), http_code=response.status + ) return self._parse_yaml(response_text, response=True) @@ -663,22 +805,36 @@ class ROClient: # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.get(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "GET {} [{}] {}".format( + url, response.status, response_text[:100] + ) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), + http_code=response.status, + ) for word in str(response_text).split(" "): if "." in word: version_text, _, _ = word.partition("-") return version_text - raise ROClientException("Got invalid version text: '{}'".format(response_text), http_code=500) + raise ROClientException( + "Got invalid version text: '{}'".format(response_text), + http_code=500, + ) except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise ROClientException(e, http_code=504) except asyncio.TimeoutError: raise ROClientException("Timeout", http_code=504) except Exception as e: - raise ROClientException("Got invalid version text: '{}'; causing exception {}".format(response_text, e), - http_code=500) + raise ROClientException( + "Got invalid version text: '{}'; causing exception {}".format( + response_text, e + ), + http_code=500, + ) async def get_list(self, item, all_tenants=False, filter_by=None): """ @@ -691,25 +847,38 @@ class ROClient: try: if item not in self.client_to_RO: raise ROClientException("Invalid item {}".format(item)) - if item == 'tenant': + if item == "tenant": all_tenants = None async with aiohttp.ClientSession(loop=self.loop) as session: - content = await self._list_item(session, self.client_to_RO[item], all_tenants=all_tenants, - filter_dict=filter_by) + content = await self._list_item( + session, + self.client_to_RO[item], + all_tenants=all_tenants, + filter_dict=filter_by, + ) if isinstance(content, dict): if len(content) == 1: for _, v in content.items(): return v return content.values()[0] else: - raise ROClientException("Output not a list neither dict with len equal 1", http_code=500) + raise ROClientException( + "Output not a list neither dict with len equal 1", http_code=500 + ) return content except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise ROClientException(e, http_code=504) except asyncio.TimeoutError: raise ROClientException("Timeout", http_code=504) - async def show(self, item, item_id_name=None, extra_item=None, extra_item_id=None, all_tenants=False): + async def show( + self, + item, + item_id_name=None, + extra_item=None, + extra_item_id=None, + all_tenants=False, + ): """ Obtain the information of an item from its id or name :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns' @@ -723,16 +892,22 @@ class ROClient: try: if item not in self.client_to_RO: raise ROClientException("Invalid item {}".format(item)) - if item == 'tenant': + if item == "tenant": all_tenants = None - elif item == 'vim': + elif item == "vim": all_tenants = True - elif item == 'vim_account': + elif item == "vim_account": all_tenants = False async with aiohttp.ClientSession(loop=self.loop) as session: - content = await self._get_item(session, self.client_to_RO[item], item_id_name, extra_item=extra_item, - extra_item_id=extra_item_id, all_tenants=all_tenants) + content = await self._get_item( + session, + self.client_to_RO[item], + item_id_name, + extra_item=extra_item, + extra_item_id=extra_item_id, + all_tenants=all_tenants, + ) return remove_envelop(item, content) except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise ROClientException(e, http_code=504) @@ -750,11 +925,16 @@ class ROClient: try: if item not in self.client_to_RO: raise ROClientException("Invalid item {}".format(item)) - if item in ('tenant', 'vim', 'wim'): + if item in ("tenant", "vim", "wim"): all_tenants = None async with aiohttp.ClientSession(loop=self.loop) as session: - result = await self._del_item(session, self.client_to_RO[item], item_id_name, all_tenants=all_tenants) + result = await self._del_item( + session, + self.client_to_RO[item], + item_id_name, + all_tenants=all_tenants, + ) # in case of ns delete, get the action_id embeded in text if item == "ns" and result.get("result"): _, _, action_id = result["result"].partition("action_id=") @@ -767,8 +947,10 @@ class ROClient: except asyncio.TimeoutError: raise ROClientException("Timeout", http_code=504) - async def edit(self, item, item_id_name, descriptor=None, descriptor_format=None, **kwargs): - """ Edit an item + async def edit( + self, item, item_id_name, descriptor=None, descriptor_format=None, **kwargs + ): + """Edit an item :param item: can be 'tenant', 'vim', 'vnfd', 'nsd', 'ns', 'vim' :param item_id_name: RO id or name of the item. Raise and exception if more than one found :param descriptor: can be a dict, or a yaml/json text. Autodetect unless descriptor_format is provided @@ -793,22 +975,31 @@ class ROClient: if kwargs: desc = self.update_descriptor(desc, kwargs) all_tenants = False - if item in ('tenant', 'vim'): + if item in ("tenant", "vim"): all_tenants = None create_desc = self._create_envelop(item, desc) async with aiohttp.ClientSession(loop=self.loop) as session: _all_tenants = all_tenants - if item == 'vim': + if item == "vim": _all_tenants = True - item_id = await self._get_item_uuid(session, self.client_to_RO[item], item_id_name, - all_tenants=_all_tenants) - if item == 'vim': + item_id = await self._get_item_uuid( + session, + self.client_to_RO[item], + item_id_name, + all_tenants=_all_tenants, + ) + if item == "vim": _all_tenants = None # await self._get_tenant(session) - outdata = await self._edit_item(session, self.client_to_RO[item], item_id, create_desc, - all_tenants=_all_tenants) + outdata = await self._edit_item( + session, + self.client_to_RO[item], + item_id, + create_desc, + all_tenants=_all_tenants, + ) return remove_envelop(item, outdata) except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise ROClientException(e, http_code=504) @@ -843,24 +1034,32 @@ class ROClient: for mandatory in self.mandatory_for_create[item]: if mandatory not in desc: - raise ROClientException("'{}' is mandatory parameter for {}".format(mandatory, item)) + raise ROClientException( + "'{}' is mandatory parameter for {}".format(mandatory, item) + ) all_tenants = False - if item in ('tenant', 'vim', 'wim'): + if item in ("tenant", "vim", "wim"): all_tenants = None create_desc = self._create_envelop(item, desc) async with aiohttp.ClientSession(loop=self.loop) as session: - outdata = await self._create_item(session, self.client_to_RO[item], create_desc, - all_tenants=all_tenants) + outdata = await self._create_item( + session, + self.client_to_RO[item], + create_desc, + all_tenants=all_tenants, + ) return remove_envelop(item, outdata) except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise ROClientException(e, http_code=504) except asyncio.TimeoutError: raise ROClientException("Timeout", http_code=504) - async def create_action(self, item, item_id_name, descriptor=None, descriptor_format=None, **kwargs): + async def create_action( + self, item, item_id_name, descriptor=None, descriptor_format=None, **kwargs + ): """ Performs an action over an item :param item: can be 'tenant', 'vnfd', 'nsd', 'ns', 'vim', 'vim_account', 'sdn' @@ -888,7 +1087,7 @@ class ROClient: desc = self.update_descriptor(desc, kwargs) all_tenants = False - if item in ('tenant', 'vim'): + if item in ("tenant", "vim"): all_tenants = None action = None @@ -902,20 +1101,27 @@ class ROClient: async with aiohttp.ClientSession(loop=self.loop) as session: _all_tenants = all_tenants - if item == 'vim': + if item == "vim": _all_tenants = True # item_id = await self._get_item_uuid(session, self.client_to_RO[item], item_id_name, # all_tenants=_all_tenants) - outdata = await self._create_item(session, self.client_to_RO[item], create_desc, - item_id_name=item_id_name, # item_id_name=item_id - action=action, all_tenants=_all_tenants) + outdata = await self._create_item( + session, + self.client_to_RO[item], + create_desc, + item_id_name=item_id_name, # item_id_name=item_id + action=action, + all_tenants=_all_tenants, + ) return remove_envelop(item, outdata) except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise ROClientException(e, http_code=504) except asyncio.TimeoutError: raise ROClientException("Timeout", http_code=504) - async def attach(self, item, item_id_name=None, descriptor=None, descriptor_format=None, **kwargs): + async def attach( + self, item, item_id_name=None, descriptor=None, descriptor_format=None, **kwargs + ): """ Attach a datacenter or wim to a tenant, creating a vim_account, wim_account :param item: can be vim_account or wim_account @@ -943,27 +1149,47 @@ class ROClient: if item == "vim_account": if not desc.get("vim_tenant_name") and not desc.get("vim_tenant_id"): - raise ROClientException("Wrong descriptor. At least vim_tenant_name or vim_tenant_id must be " - "provided") + raise ROClientException( + "Wrong descriptor. At least vim_tenant_name or vim_tenant_id must be " + "provided" + ) elif item != "wim_account": - raise ROClientException("Attach with unknown item {}. Must be 'vim_account' or 'wim_account'". - format(item)) + raise ROClientException( + "Attach with unknown item {}. Must be 'vim_account' or 'wim_account'".format( + item + ) + ) create_desc = self._create_envelop(item, desc) payload_req = yaml.safe_dump(create_desc) async with aiohttp.ClientSession(loop=self.loop) as session: # check that exist - item_id = await self._get_item_uuid(session, self.client_to_RO[item], item_id_name, all_tenants=True) + item_id = await self._get_item_uuid( + session, self.client_to_RO[item], item_id_name, all_tenants=True + ) await self._get_tenant(session) - url = "{}/{tenant}/{item}/{item_id}".format(self.uri, tenant=self.tenant, - item=self.client_to_RO[item], item_id=item_id) + url = "{}/{tenant}/{item}/{item_id}".format( + self.uri, + tenant=self.tenant, + item=self.client_to_RO[item], + item_id=item_id, + ) self.logger.debug("RO POST %s %s", url, payload_req) # timeout = aiohttp.ClientTimeout(total=self.timeout_large) - async with session.post(url, headers=self.headers_req, data=payload_req) as response: + async with session.post( + url, headers=self.headers_req, data=payload_req + ) as response: response_text = await response.read() - self.logger.debug("POST {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "POST {} [{}] {}".format( + url, response.status, response_text[:100] + ) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), + http_code=response.status, + ) response_desc = self._parse_yaml(response_text, response=True) desc = remove_envelop(item, response_desc) @@ -978,19 +1204,32 @@ class ROClient: try: async with aiohttp.ClientSession(loop=self.loop) as session: # check that exist - item_id = await self._get_item_uuid(session, self.client_to_RO[item], item_id_name, all_tenants=False) + item_id = await self._get_item_uuid( + session, self.client_to_RO[item], item_id_name, all_tenants=False + ) tenant = await self._get_tenant(session) - url = "{}/{tenant}/{item}/{datacenter}".format(self.uri, tenant=tenant, - item=self.client_to_RO[item], datacenter=item_id) + url = "{}/{tenant}/{item}/{datacenter}".format( + self.uri, + tenant=tenant, + item=self.client_to_RO[item], + datacenter=item_id, + ) self.logger.debug("RO DELETE %s", url) # timeout = aiohttp.ClientTimeout(total=self.timeout_large) async with session.delete(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("DELETE {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "DELETE {} [{}] {}".format( + url, response.status, response_text[:100] + ) + ) if response.status >= 300: - raise ROClientException(self._parse_error_yaml(response_text), http_code=response.status) + raise ROClientException( + self._parse_error_yaml(response_text), + http_code=response.status, + ) response_desc = self._parse_yaml(response_text, response=True) desc = remove_envelop(item, response_desc) @@ -1003,8 +1242,15 @@ class ROClient: # TODO convert to asyncio # DATACENTERS - def edit_datacenter(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, - **kwargs): + def edit_datacenter( + self, + uuid=None, + name=None, + descriptor=None, + descriptor_format=None, + all_tenants=False, + **kwargs + ): """Edit the parameters of a datacenter Params: must supply a descriptor or/and a parameter to change uuid or/and name. If only name is supplied, there must be only one or an exception is raised @@ -1030,16 +1276,26 @@ class ROClient: else: raise ROClientException("Missing descriptor") - if 'datacenter' not in descriptor or len(descriptor) != 1: - raise ROClientException("Descriptor must contain only one 'datacenter' field") + if "datacenter" not in descriptor or len(descriptor) != 1: + raise ROClientException( + "Descriptor must contain only one 'datacenter' field" + ) for param in kwargs: - if param == 'new_name': - descriptor['datacenter']['name'] = kwargs[param] + if param == "new_name": + descriptor["datacenter"]["name"] = kwargs[param] else: - descriptor['datacenter'][param] = kwargs[param] + descriptor["datacenter"][param] = kwargs[param] return self._edit_item("datacenters", descriptor, uuid, name, all_tenants=None) - def edit_scenario(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False, **kwargs): + def edit_scenario( + self, + uuid=None, + name=None, + descriptor=None, + descriptor_format=None, + all_tenants=False, + **kwargs + ): """Edit the parameters of a scenario Params: must supply a descriptor or/and a parameters to change uuid or/and name. If only name is supplied, there must be only one or an exception is raised @@ -1063,13 +1319,13 @@ class ROClient: else: raise ROClientException("Missing descriptor") - if 'scenario' not in descriptor or len(descriptor) > 2: + if "scenario" not in descriptor or len(descriptor) > 2: raise ROClientException("Descriptor must contain only one 'scenario' field") for param in kwargs: - if param == 'new_name': - descriptor['scenario']['name'] = kwargs[param] + if param == "new_name": + descriptor["scenario"]["name"] = kwargs[param] else: - descriptor['scenario'][param] = kwargs[param] + descriptor["scenario"][param] = kwargs[param] return self._edit_item("scenarios", descriptor, uuid, name, all_tenants=None) # VIM ACTIONS @@ -1091,20 +1347,31 @@ class ROClient: """ session = None # TODO remove when changed to asyncio if item not in ("tenants", "networks", "images"): - raise ROClientException("Unknown value for item '{}', must be 'tenants', 'nets' or " - "images".format(str(item))) + raise ROClientException( + "Unknown value for item '{}', must be 'tenants', 'nets' or " + "images".format(str(item)) + ) - image_actions = ['list', 'get', 'show', 'delete'] + image_actions = ["list", "get", "show", "delete"] if item == "images" and action not in image_actions: - raise ROClientException("Only available actions for item '{}' are {}\n" - "Requested action was '{}'".format(item, ', '.join(image_actions), action)) + raise ROClientException( + "Only available actions for item '{}' are {}\n" + "Requested action was '{}'".format( + item, ", ".join(image_actions), action + ) + ) if all_tenants: tenant_text = "/any" else: tenant_text = "/" + self._get_tenant() if "datacenter_id" in kwargs or "datacenter_name" in kwargs: - datacenter = self._get_item_uuid(session, "datacenters", kwargs.get("datacenter"), all_tenants=all_tenants) + datacenter = self._get_item_uuid( + session, + "datacenters", + kwargs.get("datacenter"), + all_tenants=all_tenants, + ) else: datacenter = self.get_datacenter(session) @@ -1119,7 +1386,9 @@ class ROClient: else: raise ROClientException(str(content), http_code=mano_response.status) elif action == "get" or action == "show": - url = "{}{}/vim/{}/{}/{}".format(self.uri, tenant_text, datacenter, item, uuid) + url = "{}{}/vim/{}/{}/{}".format( + self.uri, tenant_text, datacenter, item, uuid + ) self.logger.debug("GET %s", url) mano_response = requests.get(url, headers=self.headers_req) self.logger.debug("RO response: %s", mano_response.text) @@ -1129,7 +1398,9 @@ class ROClient: else: raise ROClientException(str(content), http_code=mano_response.status) elif action == "delete": - url = "{}{}/vim/{}/{}/{}".format(self.uri, tenant_text, datacenter, item, uuid) + url = "{}{}/vim/{}/{}/{}".format( + self.uri, tenant_text, datacenter, item, uuid + ) self.logger.debug("DELETE %s", url) mano_response = requests.delete(url, headers=self.headers_req) self.logger.debug("RO response: %s", mano_response.text) @@ -1141,7 +1412,9 @@ class ROClient: elif action == "create": if "descriptor" in kwargs: if isinstance(kwargs["descriptor"], str): - descriptor = self._parse(kwargs["descriptor"], kwargs.get("descriptor_format")) + descriptor = self._parse( + kwargs["descriptor"], kwargs.get("descriptor_format") + ) else: descriptor = kwargs["descriptor"] elif "name" in kwargs: @@ -1150,16 +1423,20 @@ class ROClient: raise ROClientException("Missing descriptor") if item[:-1] not in descriptor or len(descriptor) != 1: - raise ROClientException("Descriptor must contain only one 'tenant' field") + raise ROClientException( + "Descriptor must contain only one 'tenant' field" + ) if "name" in kwargs: - descriptor[item[:-1]]['name'] = kwargs["name"] + descriptor[item[:-1]]["name"] = kwargs["name"] if "description" in kwargs: - descriptor[item[:-1]]['description'] = kwargs["description"] + descriptor[item[:-1]]["description"] = kwargs["description"] payload_req = yaml.safe_dump(descriptor) # print payload_req url = "{}{}/vim/{}/{}".format(self.uri, tenant_text, datacenter, item) self.logger.debug("RO POST %s %s", url, payload_req) - mano_response = requests.post(url, headers=self.headers_req, data=payload_req) + mano_response = requests.post( + url, headers=self.headers_req, data=payload_req + ) self.logger.debug("RO response: %s", mano_response.text) content = self._parse_yaml(mano_response.text, response=True) if mano_response.status_code == 200: @@ -1170,7 +1447,7 @@ class ROClient: raise ROClientException("Unknown value for action '{}".format(str(action))) -if __name__ == '__main__': +if __name__ == "__main__": RO_URL = "http://localhost:9090/openmano" TEST_TENANT = "myTenant" TEST_VIM1 = "myvim" @@ -1198,37 +1475,67 @@ if __name__ == '__main__': tenant_id = True content = loop.run_until_complete(myClient.show("tenant", TEST_TENANT)) print("tenant", TEST_TENANT, content) - content = loop.run_until_complete(myClient.edit("tenant", TEST_TENANT, description="another description")) + content = loop.run_until_complete( + myClient.edit("tenant", TEST_TENANT, description="another description") + ) content = loop.run_until_complete(myClient.show("tenant", TEST_TENANT)) print("tenant edited", TEST_TENANT, content) myClient["tenant"] = TEST_TENANT # test VIM - content = loop.run_until_complete(myClient.create("vim", name=TEST_VIM1, type=TEST_TYPE1, vim_url=TEST_URL1, - config=TEST_CONFIG1)) + content = loop.run_until_complete( + myClient.create( + "vim", + name=TEST_VIM1, + type=TEST_TYPE1, + vim_url=TEST_URL1, + config=TEST_CONFIG1, + ) + ) vim_id = True content = loop.run_until_complete(myClient.get_list("vim")) print("vim", content) content = loop.run_until_complete(myClient.show("vim", TEST_VIM1)) print("vim", TEST_VIM1, content) - content = loop.run_until_complete(myClient.edit("vim", TEST_VIM1, description="another description", - name=TEST_VIM2, type=TEST_TYPE2, vim_url=TEST_URL2, - config=TEST_CONFIG2)) + content = loop.run_until_complete( + myClient.edit( + "vim", + TEST_VIM1, + description="another description", + name=TEST_VIM2, + type=TEST_TYPE2, + vim_url=TEST_URL2, + config=TEST_CONFIG2, + ) + ) content = loop.run_until_complete(myClient.show("vim", TEST_VIM2)) print("vim edited", TEST_VIM2, content) # test VIM_ACCOUNT - content = loop.run_until_complete(myClient.attach_datacenter(TEST_VIM2, vim_username='user', - vim_password='pass', vim_tenant_name='vimtenant1', - config=TEST_CONFIG1)) + content = loop.run_until_complete( + myClient.attach_datacenter( + TEST_VIM2, + vim_username="user", + vim_password="pass", + vim_tenant_name="vimtenant1", + config=TEST_CONFIG1, + ) + ) vim_id = True content = loop.run_until_complete(myClient.get_list("vim_account")) print("vim_account", content) content = loop.run_until_complete(myClient.show("vim_account", TEST_VIM2)) print("vim_account", TEST_VIM2, content) - content = loop.run_until_complete(myClient.edit("vim_account", TEST_VIM2, vim_username='user2', - vim_password='pass2', vim_tenant_name="vimtenant2", - config=TEST_CONFIG2)) + content = loop.run_until_complete( + myClient.edit( + "vim_account", + TEST_VIM2, + vim_username="user2", + vim_password="pass2", + vim_tenant_name="vimtenant2", + config=TEST_CONFIG2, + ) + ) content = loop.run_until_complete(myClient.show("vim_account", TEST_VIM2)) print("vim_account edited", TEST_VIM2, content) @@ -1237,9 +1544,13 @@ if __name__ == '__main__': except Exception as e: logger.error("Error {}".format(e), exc_info=True) - for item in (("vim_account", TEST_VIM1), ("vim", TEST_VIM1), - ("vim_account", TEST_VIM2), ("vim", TEST_VIM2), - ("tenant", TEST_TENANT)): + for item in ( + ("vim_account", TEST_VIM1), + ("vim", TEST_VIM1), + ("vim_account", TEST_VIM2), + ("vim", TEST_VIM2), + ("tenant", TEST_TENANT), + ): try: content = loop.run_until_complete(myClient.delete(item[0], item[1])) print("{} {} deleted; {}".format(item[0], item[1], content)) diff --git a/osm_lcm/__init__.py b/osm_lcm/__init__.py index d013101..b21755a 100644 --- a/osm_lcm/__init__.py +++ b/osm_lcm/__init__.py @@ -13,12 +13,13 @@ ## # version moved to lcm.py. uncomment if LCM is installed as library and installed -version = '7.0.1.post18' -version_date = '2020-04-08' +version = "7.0.1.post18" +version_date = "2020-04-08" # Try to get version from package using pkg_resources (available with setuptools) try: from pkg_resources import get_distribution + version = get_distribution("osm_lcm").version except Exception: pass diff --git a/osm_lcm/data_utils/database/database.py b/osm_lcm/data_utils/database/database.py index eeb44c4..dc7c8d9 100644 --- a/osm_lcm/data_utils/database/database.py +++ b/osm_lcm/data_utils/database/database.py @@ -30,7 +30,7 @@ from osm_common.dbbase import DbException class Database: class __Database: def __init__(self, config): - self.logger = logging.getLogger('lcm') + self.logger = logging.getLogger("lcm") try: # TODO check database version if config["database"]["driver"] == "mongo": @@ -40,8 +40,11 @@ class Database: self.db = dbmemory.DbMemory() self.db.db_connect(config["database"]) else: - raise Exception("Invalid configuration param '{}' at '[database]':'driver'".format( - config["database"]["driver"])) + raise Exception( + "Invalid configuration param '{}' at '[database]':'driver'".format( + config["database"]["driver"] + ) + ) except (DbException) as e: self.logger.critical(str(e), exc_info=True) raise Exception(str(e)) diff --git a/osm_lcm/data_utils/filesystem/filesystem.py b/osm_lcm/data_utils/filesystem/filesystem.py index bc26c96..c0110ef 100644 --- a/osm_lcm/data_utils/filesystem/filesystem.py +++ b/osm_lcm/data_utils/filesystem/filesystem.py @@ -30,7 +30,7 @@ from osm_common import fslocal, fsmongo class Filesystem: class __Filesystem: def __init__(self, config): - self.logger = logging.getLogger('lcm') + self.logger = logging.getLogger("lcm") try: if config["storage"]["driver"] == "local": self.fs = fslocal.FsLocal() @@ -39,8 +39,11 @@ class Filesystem: self.fs = fsmongo.FsMongo() self.fs.fs_connect(config["storage"]) else: - raise Exception("Invalid configuration param '{}' at '[storage]':'driver'".format( - config["storage"]["driver"])) + raise Exception( + "Invalid configuration param '{}' at '[storage]':'driver'".format( + config["storage"]["driver"] + ) + ) except (FsException) as e: self.logger.critical(str(e), exc_info=True) raise Exception(str(e)) diff --git a/osm_lcm/data_utils/vnfd.py b/osm_lcm/data_utils/vnfd.py index 1b45b53..5351c41 100644 --- a/osm_lcm/data_utils/vnfd.py +++ b/osm_lcm/data_utils/vnfd.py @@ -37,7 +37,9 @@ def get_kdu_list(vnfd): return vnfd.get("kdu", ()) -def get_ee_sorted_initial_config_primitive_list(primitive_list, vca_deployed, ee_descriptor_id): +def get_ee_sorted_initial_config_primitive_list( + primitive_list, vca_deployed, ee_descriptor_id +): """ Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal primitives as verify-ssh-credentials, or config when needed @@ -50,10 +52,12 @@ def get_ee_sorted_initial_config_primitive_list(primitive_list, vca_deployed, ee """ primitive_list = primitive_list or [] primitive_list = [ - p for p in primitive_list if p.get("execution-environment-ref", ee_descriptor_id) == ee_descriptor_id + p + for p in primitive_list + if p.get("execution-environment-ref", ee_descriptor_id) == ee_descriptor_id ] if primitive_list: - primitive_list.sort(key=lambda val: int(val['seq'])) + primitive_list.sort(key=lambda val: int(val["seq"])) # look for primitive config, and get the position. None if not present config_position = None @@ -67,25 +71,34 @@ def get_ee_sorted_initial_config_primitive_list(primitive_list, vca_deployed, ee primitive_list.insert(0, {"name": "config", "parameter": []}) config_position = 0 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config - if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"): - primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []}) + if ( + vca_deployed["member-vnf-index"] + and config_position is not None + and vca_deployed.get("ssh-public-key") + ): + primitive_list.insert( + config_position + 1, {"name": "verify-ssh-credentials", "parameter": []} + ) return primitive_list def get_ee_sorted_terminate_config_primitive_list(primitive_list, ee_descriptor_id): primitive_list = primitive_list or [] primitive_list = [ - p for p in primitive_list if p.get("execution-environment-ref", ee_descriptor_id) == ee_descriptor_id + p + for p in primitive_list + if p.get("execution-environment-ref", ee_descriptor_id) == ee_descriptor_id ] if primitive_list: - primitive_list.sort(key=lambda val: int(val['seq'])) + primitive_list.sort(key=lambda val: int(val["seq"])) return primitive_list def get_vdu_profile(vnfd, vdu_profile_id): return list_utils.find_in_list( vnfd.get("df", ())[0]["vdu-profile"], - lambda vdu_profile: vdu_profile["id"] == vdu_profile_id) + lambda vdu_profile: vdu_profile["id"] == vdu_profile_id, + ) def get_configuration(vnfd, entity_id): @@ -97,8 +110,8 @@ def get_configuration(vnfd, entity_id): return None day12ops = ops_vnf.get("day1-2", []) return list_utils.find_in_list( - day12ops, - lambda configuration: configuration["id"] == entity_id) + day12ops, lambda configuration: configuration["id"] == entity_id + ) def get_virtual_link_profiles(vnfd): @@ -106,15 +119,13 @@ def get_virtual_link_profiles(vnfd): def get_vdu(vnfd, vdu_id): - return list_utils.find_in_list( - vnfd.get("vdu", ()), - lambda vdu: vdu["id"] == vdu_id) + return list_utils.find_in_list(vnfd.get("vdu", ()), lambda vdu: vdu["id"] == vdu_id) def get_vdu_index(vnfd, vdu_id): target_vdu = list_utils.find_in_list( - vnfd.get("vdu", ()), - lambda vdu: vdu["id"] == vdu_id) + vnfd.get("vdu", ()), lambda vdu: vdu["id"] == vdu_id + ) if target_vdu: return vnfd.get("vdu", ()).index(target_vdu) else: @@ -127,25 +138,13 @@ def get_scaling_aspect(vnfd): def get_number_of_instances(vnfd, vdu_id): return list_utils.find_in_list( - vnfd.get( - "df", - () - )[0].get( - "instantiation-level", - () - )[0].get( - "vdu-level", - () - ), - lambda a_vdu: a_vdu["vdu-id"] == vdu_id + vnfd.get("df", ())[0].get("instantiation-level", ())[0].get("vdu-level", ()), + lambda a_vdu: a_vdu["vdu-id"] == vdu_id, ).get("number-of-instances", 1) def get_juju_ee_ref(vnfd, entity_id): return list_utils.find_in_list( - get_configuration(vnfd, entity_id).get( - "execution-environment-list", - [] - ), - lambda ee: "juju" in ee + get_configuration(vnfd, entity_id).get("execution-environment-list", []), + lambda ee: "juju" in ee, ) diff --git a/osm_lcm/data_utils/vnfr.py b/osm_lcm/data_utils/vnfr.py index 9c0b148..7e4d164 100644 --- a/osm_lcm/data_utils/vnfr.py +++ b/osm_lcm/data_utils/vnfr.py @@ -32,7 +32,8 @@ def find_VNFR_by_VDU_ID(vnfr, vdu_id): def get_osm_params(db_vnfr, vdu_id=None, vdu_count_index=0): osm_params = { - x.replace("-", "_"): db_vnfr[x] for x in ("ip-address", "vim-account-id", "vnfd-id", "vnfd-ref") + x.replace("-", "_"): db_vnfr[x] + for x in ("ip-address", "vim-account-id", "vnfd-id", "vnfd-ref") if db_vnfr.get(x) is not None } osm_params["ns_id"] = db_vnfr["nsr-id-ref"] @@ -44,14 +45,16 @@ def get_osm_params(db_vnfr, vdu_id=None, vdu_count_index=0): vdu = { "count_index": vdur["count-index"], "vdu_id": vdur["vdu-id-ref"], - "interfaces": {} + "interfaces": {}, } if vdur.get("ip-address"): vdu["ip_address"] = vdur["ip-address"] for iface in vdur["interfaces"]: - vdu["interfaces"][iface["name"]] = \ - {x.replace("-", "_"): iface[x] for x in ("mac-address", "ip-address", "name") - if iface.get(x) is not None} + vdu["interfaces"][iface["name"]] = { + x.replace("-", "_"): iface[x] + for x in ("mac-address", "ip-address", "name") + if iface.get(x) is not None + } vdu_id_index = "{}-{}".format(vdur["vdu-id-ref"], vdur["count-index"]) osm_params["vdu"][vdu_id_index] = vdu if vdu_id: diff --git a/osm_lcm/frontend_grpc.py b/osm_lcm/frontend_grpc.py index 88308e1..2c3a7d9 100644 --- a/osm_lcm/frontend_grpc.py +++ b/osm_lcm/frontend_grpc.py @@ -23,6 +23,7 @@ import typing import grpclib.const import grpclib.client + if typing.TYPE_CHECKING: import grpclib.server @@ -30,24 +31,29 @@ import osm_lcm.frontend_pb2 class FrontendExecutorBase(abc.ABC): - @abc.abstractmethod - async def RunPrimitive(self, stream: 'grpclib.server.Stream[osm_lcm.frontend_pb2.PrimitiveRequest, osm_lcm.frontend_pb2.PrimitiveReply]') -> None: + async def RunPrimitive( + self, + stream: "grpclib.server.Stream[osm_lcm.frontend_pb2.PrimitiveRequest, osm_lcm.frontend_pb2.PrimitiveReply]", + ) -> None: pass @abc.abstractmethod - async def GetSshKey(self, stream: 'grpclib.server.Stream[osm_lcm.frontend_pb2.SshKeyRequest, osm_lcm.frontend_pb2.SshKeyReply]') -> None: + async def GetSshKey( + self, + stream: "grpclib.server.Stream[osm_lcm.frontend_pb2.SshKeyRequest, osm_lcm.frontend_pb2.SshKeyReply]", + ) -> None: pass def __mapping__(self) -> typing.Dict[str, grpclib.const.Handler]: return { - '/osm_ee.FrontendExecutor/RunPrimitive': grpclib.const.Handler( + "/osm_ee.FrontendExecutor/RunPrimitive": grpclib.const.Handler( self.RunPrimitive, grpclib.const.Cardinality.UNARY_STREAM, osm_lcm.frontend_pb2.PrimitiveRequest, osm_lcm.frontend_pb2.PrimitiveReply, ), - '/osm_ee.FrontendExecutor/GetSshKey': grpclib.const.Handler( + "/osm_ee.FrontendExecutor/GetSshKey": grpclib.const.Handler( self.GetSshKey, grpclib.const.Cardinality.UNARY_UNARY, osm_lcm.frontend_pb2.SshKeyRequest, @@ -57,17 +63,16 @@ class FrontendExecutorBase(abc.ABC): class FrontendExecutorStub: - def __init__(self, channel: grpclib.client.Channel) -> None: self.RunPrimitive = grpclib.client.UnaryStreamMethod( channel, - '/osm_ee.FrontendExecutor/RunPrimitive', + "/osm_ee.FrontendExecutor/RunPrimitive", osm_lcm.frontend_pb2.PrimitiveRequest, osm_lcm.frontend_pb2.PrimitiveReply, ) self.GetSshKey = grpclib.client.UnaryUnaryMethod( channel, - '/osm_ee.FrontendExecutor/GetSshKey', + "/osm_ee.FrontendExecutor/GetSshKey", osm_lcm.frontend_pb2.SshKeyRequest, osm_lcm.frontend_pb2.SshKeyReply, ) diff --git a/osm_lcm/frontend_pb2.py b/osm_lcm/frontend_pb2.py index 3407e73..0e6c2a9 100644 --- a/osm_lcm/frontend_pb2.py +++ b/osm_lcm/frontend_pb2.py @@ -23,228 +23,295 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - DESCRIPTOR = _descriptor.FileDescriptor( - name='osm_lcm/frontend.proto', - package='osm_ee', - syntax='proto3', - serialized_options=b'\n\027com.etsi.osm.lcm.osm_eeB\014GrpcExecutorP\001\242\002\003OEE', - serialized_pb=b'\n\x16osm_lcm/frontend.proto\x12\x06osm_ee\"<\n\x10PrimitiveRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06params\x18\x03 \x01(\t\":\n\x0ePrimitiveReply\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x18\n\x10\x64\x65tailed_message\x18\x02 \x01(\t\"\x0f\n\rSshKeyRequest\"\x1e\n\x0bSshKeyReply\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x10\x46rontendExecutor\x12\x44\n\x0cRunPrimitive\x12\x18.osm_ee.PrimitiveRequest\x1a\x16.osm_ee.PrimitiveReply\"\x00\x30\x01\x12\x39\n\tGetSshKey\x12\x15.osm_ee.SshKeyRequest\x1a\x13.osm_ee.SshKeyReply\"\x00\x42/\n\x17\x63om.etsi.osm.lcm.osm_eeB\x0cGrpcExecutorP\x01\xa2\x02\x03OEEb\x06proto3' + name="osm_lcm/frontend.proto", + package="osm_ee", + syntax="proto3", + serialized_options=b"\n\027com.etsi.osm.lcm.osm_eeB\014GrpcExecutorP\001\242\002\003OEE", + serialized_pb=b'\n\x16osm_lcm/frontend.proto\x12\x06osm_ee"<\n\x10PrimitiveRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06params\x18\x03 \x01(\t":\n\x0ePrimitiveReply\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x18\n\x10\x64\x65tailed_message\x18\x02 \x01(\t"\x0f\n\rSshKeyRequest"\x1e\n\x0bSshKeyReply\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x10\x46rontendExecutor\x12\x44\n\x0cRunPrimitive\x12\x18.osm_ee.PrimitiveRequest\x1a\x16.osm_ee.PrimitiveReply"\x00\x30\x01\x12\x39\n\tGetSshKey\x12\x15.osm_ee.SshKeyRequest\x1a\x13.osm_ee.SshKeyReply"\x00\x42/\n\x17\x63om.etsi.osm.lcm.osm_eeB\x0cGrpcExecutorP\x01\xa2\x02\x03OEEb\x06proto3', ) - - _PRIMITIVEREQUEST = _descriptor.Descriptor( - name='PrimitiveRequest', - full_name='osm_ee.PrimitiveRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='osm_ee.PrimitiveRequest.id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='name', full_name='osm_ee.PrimitiveRequest.name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='params', full_name='osm_ee.PrimitiveRequest.params', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=34, - serialized_end=94, + name="PrimitiveRequest", + full_name="osm_ee.PrimitiveRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="id", + full_name="osm_ee.PrimitiveRequest.id", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="name", + full_name="osm_ee.PrimitiveRequest.name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="params", + full_name="osm_ee.PrimitiveRequest.params", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=34, + serialized_end=94, ) _PRIMITIVEREPLY = _descriptor.Descriptor( - name='PrimitiveReply', - full_name='osm_ee.PrimitiveReply', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='status', full_name='osm_ee.PrimitiveReply.status', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='detailed_message', full_name='osm_ee.PrimitiveReply.detailed_message', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=96, - serialized_end=154, + name="PrimitiveReply", + full_name="osm_ee.PrimitiveReply", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="status", + full_name="osm_ee.PrimitiveReply.status", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="detailed_message", + full_name="osm_ee.PrimitiveReply.detailed_message", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=96, + serialized_end=154, ) _SSHKEYREQUEST = _descriptor.Descriptor( - name='SshKeyRequest', - full_name='osm_ee.SshKeyRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=156, - serialized_end=171, + name="SshKeyRequest", + full_name="osm_ee.SshKeyRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=156, + serialized_end=171, ) _SSHKEYREPLY = _descriptor.Descriptor( - name='SshKeyReply', - full_name='osm_ee.SshKeyReply', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='osm_ee.SshKeyReply.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=173, - serialized_end=203, + name="SshKeyReply", + full_name="osm_ee.SshKeyReply", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="message", + full_name="osm_ee.SshKeyReply.message", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=173, + serialized_end=203, ) -DESCRIPTOR.message_types_by_name['PrimitiveRequest'] = _PRIMITIVEREQUEST -DESCRIPTOR.message_types_by_name['PrimitiveReply'] = _PRIMITIVEREPLY -DESCRIPTOR.message_types_by_name['SshKeyRequest'] = _SSHKEYREQUEST -DESCRIPTOR.message_types_by_name['SshKeyReply'] = _SSHKEYREPLY +DESCRIPTOR.message_types_by_name["PrimitiveRequest"] = _PRIMITIVEREQUEST +DESCRIPTOR.message_types_by_name["PrimitiveReply"] = _PRIMITIVEREPLY +DESCRIPTOR.message_types_by_name["SshKeyRequest"] = _SSHKEYREQUEST +DESCRIPTOR.message_types_by_name["SshKeyReply"] = _SSHKEYREPLY _sym_db.RegisterFileDescriptor(DESCRIPTOR) -PrimitiveRequest = _reflection.GeneratedProtocolMessageType('PrimitiveRequest', (_message.Message,), { - 'DESCRIPTOR' : _PRIMITIVEREQUEST, - '__module__' : 'osm_lcm.frontend_pb2' - # @@protoc_insertion_point(class_scope:osm_ee.PrimitiveRequest) - }) +PrimitiveRequest = _reflection.GeneratedProtocolMessageType( + "PrimitiveRequest", + (_message.Message,), + { + "DESCRIPTOR": _PRIMITIVEREQUEST, + "__module__": "osm_lcm.frontend_pb2" + # @@protoc_insertion_point(class_scope:osm_ee.PrimitiveRequest) + }, +) _sym_db.RegisterMessage(PrimitiveRequest) -PrimitiveReply = _reflection.GeneratedProtocolMessageType('PrimitiveReply', (_message.Message,), { - 'DESCRIPTOR' : _PRIMITIVEREPLY, - '__module__' : 'osm_lcm.frontend_pb2' - # @@protoc_insertion_point(class_scope:osm_ee.PrimitiveReply) - }) +PrimitiveReply = _reflection.GeneratedProtocolMessageType( + "PrimitiveReply", + (_message.Message,), + { + "DESCRIPTOR": _PRIMITIVEREPLY, + "__module__": "osm_lcm.frontend_pb2" + # @@protoc_insertion_point(class_scope:osm_ee.PrimitiveReply) + }, +) _sym_db.RegisterMessage(PrimitiveReply) -SshKeyRequest = _reflection.GeneratedProtocolMessageType('SshKeyRequest', (_message.Message,), { - 'DESCRIPTOR' : _SSHKEYREQUEST, - '__module__' : 'osm_lcm.frontend_pb2' - # @@protoc_insertion_point(class_scope:osm_ee.SshKeyRequest) - }) +SshKeyRequest = _reflection.GeneratedProtocolMessageType( + "SshKeyRequest", + (_message.Message,), + { + "DESCRIPTOR": _SSHKEYREQUEST, + "__module__": "osm_lcm.frontend_pb2" + # @@protoc_insertion_point(class_scope:osm_ee.SshKeyRequest) + }, +) _sym_db.RegisterMessage(SshKeyRequest) -SshKeyReply = _reflection.GeneratedProtocolMessageType('SshKeyReply', (_message.Message,), { - 'DESCRIPTOR' : _SSHKEYREPLY, - '__module__' : 'osm_lcm.frontend_pb2' - # @@protoc_insertion_point(class_scope:osm_ee.SshKeyReply) - }) +SshKeyReply = _reflection.GeneratedProtocolMessageType( + "SshKeyReply", + (_message.Message,), + { + "DESCRIPTOR": _SSHKEYREPLY, + "__module__": "osm_lcm.frontend_pb2" + # @@protoc_insertion_point(class_scope:osm_ee.SshKeyReply) + }, +) _sym_db.RegisterMessage(SshKeyReply) DESCRIPTOR._options = None _FRONTENDEXECUTOR = _descriptor.ServiceDescriptor( - name='FrontendExecutor', - full_name='osm_ee.FrontendExecutor', - file=DESCRIPTOR, - index=0, - serialized_options=None, - serialized_start=206, - serialized_end=353, - methods=[ - _descriptor.MethodDescriptor( - name='RunPrimitive', - full_name='osm_ee.FrontendExecutor.RunPrimitive', + name="FrontendExecutor", + full_name="osm_ee.FrontendExecutor", + file=DESCRIPTOR, index=0, - containing_service=None, - input_type=_PRIMITIVEREQUEST, - output_type=_PRIMITIVEREPLY, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name='GetSshKey', - full_name='osm_ee.FrontendExecutor.GetSshKey', - index=1, - containing_service=None, - input_type=_SSHKEYREQUEST, - output_type=_SSHKEYREPLY, serialized_options=None, - ), -]) + serialized_start=206, + serialized_end=353, + methods=[ + _descriptor.MethodDescriptor( + name="RunPrimitive", + full_name="osm_ee.FrontendExecutor.RunPrimitive", + index=0, + containing_service=None, + input_type=_PRIMITIVEREQUEST, + output_type=_PRIMITIVEREPLY, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name="GetSshKey", + full_name="osm_ee.FrontendExecutor.GetSshKey", + index=1, + containing_service=None, + input_type=_SSHKEYREQUEST, + output_type=_SSHKEYREPLY, + serialized_options=None, + ), + ], +) _sym_db.RegisterServiceDescriptor(_FRONTENDEXECUTOR) -DESCRIPTOR.services_by_name['FrontendExecutor'] = _FRONTENDEXECUTOR +DESCRIPTOR.services_by_name["FrontendExecutor"] = _FRONTENDEXECUTOR # @@protoc_insertion_point(module_scope) diff --git a/osm_lcm/lcm.py b/osm_lcm/lcm.py index ebfca7e..851ba09 100644 --- a/osm_lcm/lcm.py +++ b/osm_lcm/lcm.py @@ -49,7 +49,7 @@ from random import choice as random_choice from n2vc import version as n2vc_version import traceback -if os.getenv('OSMLCM_PDB_DEBUG', None) is not None: +if os.getenv("OSMLCM_PDB_DEBUG", None) is not None: pdb.set_trace() @@ -58,14 +58,23 @@ min_RO_version = "6.0.2" min_n2vc_version = "0.0.2" min_common_version = "0.1.19" -health_check_file = path.expanduser("~") + "/time_last_ping" # TODO find better location for this file +health_check_file = ( + path.expanduser("~") + "/time_last_ping" +) # TODO find better location for this file class Lcm: - ping_interval_pace = 120 # how many time ping is send once is confirmed all is running - ping_interval_boot = 5 # how many time ping is sent when booting - cfg_logger_name = {"message": "lcm.msg", "database": "lcm.db", "storage": "lcm.fs", "tsdb": "lcm.prometheus"} + ping_interval_pace = ( + 120 # how many time ping is send once is confirmed all is running + ) + ping_interval_boot = 5 # how many time ping is sent when booting + cfg_logger_name = { + "message": "lcm.msg", + "database": "lcm.db", + "storage": "lcm.fs", + "tsdb": "lcm.prometheus", + } # ^ contains for each section at lcm.cfg the used logger name def __init__(self, config_file, loop=None): @@ -83,7 +92,7 @@ class Lcm: self.first_start = False # logging - self.logger = logging.getLogger('lcm') + self.logger = logging.getLogger("lcm") # get id self.worker_id = self.get_process_id() # load configuration @@ -97,24 +106,38 @@ class Lcm: "loglevel": config["RO"].get("loglevel", "ERROR"), } if not self.config["ro_config"]["uri"]: - self.config["ro_config"]["uri"] = "http://{}:{}/".format(config["RO"]["host"], config["RO"]["port"]) - elif "/ro" in self.config["ro_config"]["uri"][-4:] or "/openmano" in self.config["ro_config"]["uri"][-10:]: + self.config["ro_config"]["uri"] = "http://{}:{}/".format( + config["RO"]["host"], config["RO"]["port"] + ) + elif ( + "/ro" in self.config["ro_config"]["uri"][-4:] + or "/openmano" in self.config["ro_config"]["uri"][-10:] + ): # uri ends with '/ro', '/ro/', '/openmano', '/openmano/' index = self.config["ro_config"]["uri"][-1].rfind("/") - self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index+1] + self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index + 1] self.loop = loop or asyncio.get_event_loop() - self.ns = self.netslice = self.vim = self.wim = self.sdn = self.k8scluster = self.vca = self.k8srepo = None + self.ns = ( + self.netslice + ) = ( + self.vim + ) = self.wim = self.sdn = self.k8scluster = self.vca = self.k8srepo = None # logging - log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s" - log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S') + log_format_simple = ( + "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s" + ) + log_formatter_simple = logging.Formatter( + log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S" + ) config["database"]["logger_name"] = "lcm.db" config["storage"]["logger_name"] = "lcm.fs" config["message"]["logger_name"] = "lcm.msg" if config["global"].get("logfile"): - file_handler = logging.handlers.RotatingFileHandler(config["global"]["logfile"], - maxBytes=100e6, backupCount=9, delay=0) + file_handler = logging.handlers.RotatingFileHandler( + config["global"]["logfile"], maxBytes=100e6, backupCount=9, delay=0 + ) file_handler.setFormatter(log_formatter_simple) self.logger.addHandler(file_handler) if not config["global"].get("nologging"): @@ -130,24 +153,33 @@ class Lcm: config[k1]["logger_name"] = logname logger_module = logging.getLogger(logname) if config[k1].get("logfile"): - file_handler = logging.handlers.RotatingFileHandler(config[k1]["logfile"], - maxBytes=100e6, backupCount=9, delay=0) + file_handler = logging.handlers.RotatingFileHandler( + config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0 + ) file_handler.setFormatter(log_formatter_simple) logger_module.addHandler(file_handler) if config[k1].get("loglevel"): logger_module.setLevel(config[k1]["loglevel"]) - self.logger.critical("starting osm/lcm version {} {}".format(lcm_version, lcm_version_date)) + self.logger.critical( + "starting osm/lcm version {} {}".format(lcm_version, lcm_version_date) + ) # check version of N2VC # TODO enhance with int conversion or from distutils.version import LooseVersion # or with list(map(int, version.split("."))) if versiontuple(n2vc_version) < versiontuple(min_n2vc_version): - raise LcmException("Not compatible osm/N2VC version '{}'. Needed '{}' or higher".format( - n2vc_version, min_n2vc_version)) + raise LcmException( + "Not compatible osm/N2VC version '{}'. Needed '{}' or higher".format( + n2vc_version, min_n2vc_version + ) + ) # check version of common if versiontuple(common_version) < versiontuple(min_common_version): - raise LcmException("Not compatible osm/common version '{}'. Needed '{}' or higher".format( - common_version, min_common_version)) + raise LcmException( + "Not compatible osm/common version '{}'. Needed '{}' or higher".format( + common_version, min_common_version + ) + ) try: self.db = Database(config).instance.db @@ -170,8 +202,11 @@ class Lcm: config_message.pop("group_id", None) self.msg_admin.connect(config_message) else: - raise LcmException("Invalid configuration param '{}' at '[message]':'driver'".format( - config["message"]["driver"])) + raise LcmException( + "Invalid configuration param '{}' at '[message]':'driver'".format( + config["message"]["driver"] + ) + ) except (DbException, FsException, MsgException) as e: self.logger.critical(str(e), exc_info=True) raise LcmException(str(e)) @@ -181,10 +216,15 @@ class Lcm: if self.config.get("tsdb") and self.config["tsdb"].get("driver"): if self.config["tsdb"]["driver"] == "prometheus": - self.prometheus = prometheus.Prometheus(self.config["tsdb"], self.worker_id, self.loop) + self.prometheus = prometheus.Prometheus( + self.config["tsdb"], self.worker_id, self.loop + ) else: - raise LcmException("Invalid configuration param '{}' at '[tsdb]':'driver'".format( - config["tsdb"]["driver"])) + raise LcmException( + "Invalid configuration param '{}' at '[tsdb]':'driver'".format( + config["tsdb"]["driver"] + ) + ) else: self.prometheus = None @@ -206,22 +246,32 @@ class Lcm: ro_version = await ro_server.get_version() self.config["ro_config"]["ng"] = False if versiontuple(ro_version) < versiontuple(min_RO_version): - raise LcmException("Not compatible osm/RO version '{}'. Needed '{}' or higher".format( - ro_version, min_RO_version)) - self.logger.info("Connected to RO version {} new-generation version {}". - format(ro_version, self.config["ro_config"]["ng"])) + raise LcmException( + "Not compatible osm/RO version '{}'. Needed '{}' or higher".format( + ro_version, min_RO_version + ) + ) + self.logger.info( + "Connected to RO version {} new-generation version {}".format( + ro_version, self.config["ro_config"]["ng"] + ) + ) return except (ROClientException, NgRoException) as e: self.config["ro_config"]["uri"] = ro_uri tries -= 1 traceback.print_tb(e.__traceback__) - error_text = "Error while connecting to RO on {}: {}".format(self.config["ro_config"]["uri"], e) + error_text = "Error while connecting to RO on {}: {}".format( + self.config["ro_config"]["uri"], e + ) if tries <= 0: self.logger.critical(error_text) raise LcmException(error_text) if last_error != error_text: last_error = error_text - self.logger.error(error_text + ". Waiting until {} seconds".format(5*tries)) + self.logger.error( + error_text + ". Waiting until {} seconds".format(5 * tries) + ) await asyncio.sleep(5) async def test(self, param=None): @@ -236,11 +286,22 @@ class Lcm: while True: try: await self.msg_admin.aiowrite( - "admin", "ping", - {"from": "lcm", "to": "lcm", "worker_id": self.worker_id, "version": lcm_version}, - self.loop) + "admin", + "ping", + { + "from": "lcm", + "to": "lcm", + "worker_id": self.worker_id, + "version": lcm_version, + }, + self.loop, + ) # time between pings are low when it is not received and at starting - wait_time = self.ping_interval_boot if not kafka_has_received else self.ping_interval_pace + wait_time = ( + self.ping_interval_boot + if not kafka_has_received + else self.ping_interval_pace + ) if not self.pings_not_received: kafka_has_received = True self.pings_not_received += 1 @@ -255,10 +316,16 @@ class Lcm: # if not first_start is the first time after starting. So leave more time and wait # to allow kafka starts if consecutive_errors == 8 if not first_start else 30: - self.logger.error("Task kafka_read task exit error too many errors. Exception: {}".format(e)) + self.logger.error( + "Task kafka_read task exit error too many errors. Exception: {}".format( + e + ) + ) raise consecutive_errors += 1 - self.logger.error("Task kafka_read retrying after Exception {}".format(e)) + self.logger.error( + "Task kafka_read retrying after Exception {}".format(e) + ) wait_time = 2 if not first_start else 5 await asyncio.sleep(wait_time, loop=self.loop) @@ -266,7 +333,9 @@ class Lcm: order_id = 1 if topic != "admin" and command != "ping": - self.logger.debug("Task kafka_read receives {} {}: {}".format(topic, command, params)) + self.logger.debug( + "Task kafka_read receives {} {}: {}".format(topic, command, params) + ) self.consecutive_errors = 0 self.first_start = False order_id += 1 @@ -292,7 +361,11 @@ class Lcm: with open(health_check_file, "w") as f: f.write(str(time())) except Exception as e: - self.logger.error("Cannot write into '{}' for healthcheck: {}".format(health_check_file, e)) + self.logger.error( + "Cannot write into '{}' for healthcheck: {}".format( + health_check_file, e + ) + ) return elif topic == "pla": if command == "placement": @@ -302,12 +375,16 @@ class Lcm: if command == "create" or command == "created": k8scluster_id = params.get("_id") task = asyncio.ensure_future(self.k8scluster.create(params, order_id)) - self.lcm_tasks.register("k8scluster", k8scluster_id, order_id, "k8scluster_create", task) + self.lcm_tasks.register( + "k8scluster", k8scluster_id, order_id, "k8scluster_create", task + ) return elif command == "delete" or command == "deleted": k8scluster_id = params.get("_id") task = asyncio.ensure_future(self.k8scluster.delete(params, order_id)) - self.lcm_tasks.register("k8scluster", k8scluster_id, order_id, "k8scluster_delete", task) + self.lcm_tasks.register( + "k8scluster", k8scluster_id, order_id, "k8scluster_delete", task + ) return elif topic == "vca": if command == "create" or command == "created": @@ -325,12 +402,16 @@ class Lcm: k8srepo_id = params.get("_id") self.logger.debug("k8srepo_id = {}".format(k8srepo_id)) task = asyncio.ensure_future(self.k8srepo.create(params, order_id)) - self.lcm_tasks.register("k8srepo", k8srepo_id, order_id, "k8srepo_create", task) + self.lcm_tasks.register( + "k8srepo", k8srepo_id, order_id, "k8srepo_create", task + ) return elif command == "delete" or command == "deleted": k8srepo_id = params.get("_id") task = asyncio.ensure_future(self.k8srepo.delete(params, order_id)) - self.lcm_tasks.register("k8srepo", k8srepo_id, order_id, "k8srepo_delete", task) + self.lcm_tasks.register( + "k8srepo", k8srepo_id, order_id, "k8srepo_delete", task + ) return elif topic == "ns": if command == "instantiate": @@ -339,7 +420,9 @@ class Lcm: nslcmop_id = nslcmop["_id"] nsr_id = nslcmop["nsInstanceId"] task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id)) - self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_instantiate", task) + self.lcm_tasks.register( + "ns", nsr_id, nslcmop_id, "ns_instantiate", task + ) return elif command == "terminate": # self.logger.debug("Deleting NS {}".format(nsr_id)) @@ -354,8 +437,12 @@ class Lcm: nslcmop = params nslcmop_id = nslcmop["_id"] nsr_id = nslcmop["nsInstanceId"] - task = asyncio.ensure_future(self.ns.vca_status_refresh(nsr_id, nslcmop_id)) - self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh", task) + task = asyncio.ensure_future( + self.ns.vca_status_refresh(nsr_id, nslcmop_id) + ) + self.lcm_tasks.register( + "ns", nsr_id, nslcmop_id, "ns_vca_status_refresh", task + ) return elif command == "action": # self.logger.debug("Update NS {}".format(nsr_id)) @@ -377,18 +464,30 @@ class Lcm: nsr_id = params try: db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) - print("nsr:\n _id={}\n operational-status: {}\n config-status: {}" - "\n detailed-status: {}\n deploy: {}\n tasks: {}" - "".format(nsr_id, db_nsr["operational-status"], db_nsr["config-status"], - db_nsr["detailed-status"], - db_nsr["_admin"]["deployed"], self.lcm_ns_tasks.get(nsr_id))) + print( + "nsr:\n _id={}\n operational-status: {}\n config-status: {}" + "\n detailed-status: {}\n deploy: {}\n tasks: {}" + "".format( + nsr_id, + db_nsr["operational-status"], + db_nsr["config-status"], + db_nsr["detailed-status"], + db_nsr["_admin"]["deployed"], + self.lcm_ns_tasks.get(nsr_id), + ) + ) except Exception as e: print("nsr {} not found: {}".format(nsr_id, e)) sys.stdout.flush() return elif command == "deleted": return # TODO cleaning of task just in case should be done - elif command in ("terminated", "instantiated", "scaled", "actioned"): # "scaled-cooldown-time" + elif command in ( + "terminated", + "instantiated", + "scaled", + "actioned", + ): # "scaled-cooldown-time" return elif topic == "nsi": # netslice LCM processes (instantiate, terminate, etc) if command == "instantiate": @@ -396,8 +495,12 @@ class Lcm: nsilcmop = params nsilcmop_id = nsilcmop["_id"] # slice operation id nsir_id = nsilcmop["netsliceInstanceId"] # slice record id - task = asyncio.ensure_future(self.netslice.instantiate(nsir_id, nsilcmop_id)) - self.lcm_tasks.register("nsi", nsir_id, nsilcmop_id, "nsi_instantiate", task) + task = asyncio.ensure_future( + self.netslice.instantiate(nsir_id, nsilcmop_id) + ) + self.lcm_tasks.register( + "nsi", nsir_id, nsilcmop_id, "nsi_instantiate", task + ) return elif command == "terminate": # self.logger.debug("Terminating Network Slice NS {}".format(nsilcmop["netsliceInstanceId"])) @@ -405,37 +508,57 @@ class Lcm: nsilcmop_id = nsilcmop["_id"] # slice operation id nsir_id = nsilcmop["netsliceInstanceId"] # slice record id self.lcm_tasks.cancel(topic, nsir_id) - task = asyncio.ensure_future(self.netslice.terminate(nsir_id, nsilcmop_id)) - self.lcm_tasks.register("nsi", nsir_id, nsilcmop_id, "nsi_terminate", task) + task = asyncio.ensure_future( + self.netslice.terminate(nsir_id, nsilcmop_id) + ) + self.lcm_tasks.register( + "nsi", nsir_id, nsilcmop_id, "nsi_terminate", task + ) return elif command == "show": nsir_id = params try: db_nsir = self.db.get_one("nsirs", {"_id": nsir_id}) - print("nsir:\n _id={}\n operational-status: {}\n config-status: {}" - "\n detailed-status: {}\n deploy: {}\n tasks: {}" - "".format(nsir_id, db_nsir["operational-status"], db_nsir["config-status"], - db_nsir["detailed-status"], - db_nsir["_admin"]["deployed"], self.lcm_netslice_tasks.get(nsir_id))) + print( + "nsir:\n _id={}\n operational-status: {}\n config-status: {}" + "\n detailed-status: {}\n deploy: {}\n tasks: {}" + "".format( + nsir_id, + db_nsir["operational-status"], + db_nsir["config-status"], + db_nsir["detailed-status"], + db_nsir["_admin"]["deployed"], + self.lcm_netslice_tasks.get(nsir_id), + ) + ) except Exception as e: print("nsir {} not found: {}".format(nsir_id, e)) sys.stdout.flush() return elif command == "deleted": return # TODO cleaning of task just in case should be done - elif command in ("terminated", "instantiated", "scaled", "actioned"): # "scaled-cooldown-time" + elif command in ( + "terminated", + "instantiated", + "scaled", + "actioned", + ): # "scaled-cooldown-time" return elif topic == "vim_account": vim_id = params["_id"] if command in ("create", "created"): if not self.config["ro_config"].get("ng"): task = asyncio.ensure_future(self.vim.create(params, order_id)) - self.lcm_tasks.register("vim_account", vim_id, order_id, "vim_create", task) + self.lcm_tasks.register( + "vim_account", vim_id, order_id, "vim_create", task + ) return elif command == "delete" or command == "deleted": self.lcm_tasks.cancel(topic, vim_id) task = asyncio.ensure_future(self.vim.delete(params, order_id)) - self.lcm_tasks.register("vim_account", vim_id, order_id, "vim_delete", task) + self.lcm_tasks.register( + "vim_account", vim_id, order_id, "vim_delete", task + ) return elif command == "show": print("not implemented show with vim_account") @@ -444,7 +567,9 @@ class Lcm: elif command in ("edit", "edited"): if not self.config["ro_config"].get("ng"): task = asyncio.ensure_future(self.vim.edit(params, order_id)) - self.lcm_tasks.register("vim_account", vim_id, order_id, "vim_edit", task) + self.lcm_tasks.register( + "vim_account", vim_id, order_id, "vim_edit", task + ) return elif command == "deleted": return # TODO cleaning of task just in case should be done @@ -453,12 +578,16 @@ class Lcm: if command in ("create", "created"): if not self.config["ro_config"].get("ng"): task = asyncio.ensure_future(self.wim.create(params, order_id)) - self.lcm_tasks.register("wim_account", wim_id, order_id, "wim_create", task) + self.lcm_tasks.register( + "wim_account", wim_id, order_id, "wim_create", task + ) return elif command == "delete" or command == "deleted": self.lcm_tasks.cancel(topic, wim_id) task = asyncio.ensure_future(self.wim.delete(params, order_id)) - self.lcm_tasks.register("wim_account", wim_id, order_id, "wim_delete", task) + self.lcm_tasks.register( + "wim_account", wim_id, order_id, "wim_delete", task + ) return elif command == "show": print("not implemented show with wim_account") @@ -466,7 +595,9 @@ class Lcm: return elif command in ("edit", "edited"): task = asyncio.ensure_future(self.wim.edit(params, order_id)) - self.lcm_tasks.register("wim_account", wim_id, order_id, "wim_edit", task) + self.lcm_tasks.register( + "wim_account", wim_id, order_id, "wim_edit", task + ) return elif command == "deleted": return # TODO cleaning of task just in case should be done @@ -475,7 +606,9 @@ class Lcm: if command in ("create", "created"): if not self.config["ro_config"].get("ng"): task = asyncio.ensure_future(self.sdn.create(params, order_id)) - self.lcm_tasks.register("sdn", _sdn_id, order_id, "sdn_create", task) + self.lcm_tasks.register( + "sdn", _sdn_id, order_id, "sdn_create", task + ) return elif command == "delete" or command == "deleted": self.lcm_tasks.cancel(topic, _sdn_id) @@ -491,17 +624,36 @@ class Lcm: self.logger.critical("unknown topic {} and command '{}'".format(topic, command)) async def kafka_read(self): - self.logger.debug("Task kafka_read Enter with worker_id={}".format(self.worker_id)) + self.logger.debug( + "Task kafka_read Enter with worker_id={}".format(self.worker_id) + ) # future = asyncio.Future() self.consecutive_errors = 0 self.first_start = True while self.consecutive_errors < 10: try: - topics = ("ns", "vim_account", "wim_account", "sdn", "nsi", "k8scluster", "vca", "k8srepo", "pla") - topics_admin = ("admin", ) + topics = ( + "ns", + "vim_account", + "wim_account", + "sdn", + "nsi", + "k8scluster", + "vca", + "k8srepo", + "pla", + ) + topics_admin = ("admin",) await asyncio.gather( - self.msg.aioread(topics, self.loop, self.kafka_read_callback, from_beginning=True), - self.msg_admin.aioread(topics_admin, self.loop, self.kafka_read_callback, group_id=False) + self.msg.aioread( + topics, self.loop, self.kafka_read_callback, from_beginning=True + ), + self.msg_admin.aioread( + topics_admin, + self.loop, + self.kafka_read_callback, + group_id=False, + ), ) except LcmExceptionExit: @@ -511,10 +663,16 @@ class Lcm: # if not first_start is the first time after starting. So leave more time and wait # to allow kafka starts if self.consecutive_errors == 8 if not self.first_start else 30: - self.logger.error("Task kafka_read task exit error too many errors. Exception: {}".format(e)) + self.logger.error( + "Task kafka_read task exit error too many errors. Exception: {}".format( + e + ) + ) raise self.consecutive_errors += 1 - self.logger.error("Task kafka_read retrying after Exception {}".format(e)) + self.logger.error( + "Task kafka_read retrying after Exception {}".format(e) + ) wait_time = 2 if not self.first_start else 5 await asyncio.sleep(wait_time, loop=self.loop) @@ -526,24 +684,30 @@ class Lcm: # check RO version self.loop.run_until_complete(self.check_RO_version()) - self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.config, self.loop, self.prometheus) - self.netslice = netslice.NetsliceLcm(self.msg, self.lcm_tasks, self.config, self.loop, - self.ns) + self.ns = ns.NsLcm( + self.msg, self.lcm_tasks, self.config, self.loop, self.prometheus + ) + self.netslice = netslice.NetsliceLcm( + self.msg, self.lcm_tasks, self.config, self.loop, self.ns + ) self.vim = vim_sdn.VimLcm(self.msg, self.lcm_tasks, self.config, self.loop) self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.config, self.loop) self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.config, self.loop) - self.k8scluster = vim_sdn.K8sClusterLcm(self.msg, self.lcm_tasks, self.config, self.loop) + self.k8scluster = vim_sdn.K8sClusterLcm( + self.msg, self.lcm_tasks, self.config, self.loop + ) self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop) - self.k8srepo = vim_sdn.K8sRepoLcm(self.msg, self.lcm_tasks, self.config, self.loop) + self.k8srepo = vim_sdn.K8sRepoLcm( + self.msg, self.lcm_tasks, self.config, self.loop + ) # configure tsdb prometheus if self.prometheus: self.loop.run_until_complete(self.prometheus.start()) - self.loop.run_until_complete(asyncio.gather( - self.kafka_read(), - self.kafka_ping() - )) + self.loop.run_until_complete( + asyncio.gather(self.kafka_read(), self.kafka_ping()) + ) # TODO # self.logger.debug("Terminating cancelling creation tasks") # self.lcm_tasks.cancel("ALL", "create") @@ -574,7 +738,15 @@ class Lcm: with open(config_file) as f: conf = yaml.load(f, Loader=yaml.Loader) # Ensure all sections are not empty - for k in ("global", "timeout", "RO", "VCA", "database", "storage", "message"): + for k in ( + "global", + "timeout", + "RO", + "VCA", + "database", + "storage", + "message", + ): if not conf.get(k): conf[k] = {} @@ -594,27 +766,29 @@ class Lcm: else: conf[subject][item] = v except Exception as e: - self.logger.warning("skipping environ '{}' on exception '{}'".format(k, e)) + self.logger.warning( + "skipping environ '{}' on exception '{}'".format(k, e) + ) # backward compatibility of VCA parameters - if 'pubkey' in conf["VCA"]: - conf["VCA"]['public_key'] = conf["VCA"].pop('pubkey') - if 'cacert' in conf["VCA"]: - conf["VCA"]['ca_cert'] = conf["VCA"].pop('cacert') - if 'apiproxy' in conf["VCA"]: - conf["VCA"]['api_proxy'] = conf["VCA"].pop('apiproxy') + if "pubkey" in conf["VCA"]: + conf["VCA"]["public_key"] = conf["VCA"].pop("pubkey") + if "cacert" in conf["VCA"]: + conf["VCA"]["ca_cert"] = conf["VCA"].pop("cacert") + if "apiproxy" in conf["VCA"]: + conf["VCA"]["api_proxy"] = conf["VCA"].pop("apiproxy") - if 'enableosupgrade' in conf["VCA"]: - conf["VCA"]['enable_os_upgrade'] = conf["VCA"].pop('enableosupgrade') - if isinstance(conf["VCA"].get('enable_os_upgrade'), str): - if conf["VCA"]['enable_os_upgrade'].lower() == 'false': - conf["VCA"]['enable_os_upgrade'] = False - elif conf["VCA"]['enable_os_upgrade'].lower() == 'true': - conf["VCA"]['enable_os_upgrade'] = True + if "enableosupgrade" in conf["VCA"]: + conf["VCA"]["enable_os_upgrade"] = conf["VCA"].pop("enableosupgrade") + if isinstance(conf["VCA"].get("enable_os_upgrade"), str): + if conf["VCA"]["enable_os_upgrade"].lower() == "false": + conf["VCA"]["enable_os_upgrade"] = False + elif conf["VCA"]["enable_os_upgrade"].lower() == "true": + conf["VCA"]["enable_os_upgrade"] = True - if 'aptmirror' in conf["VCA"]: - conf["VCA"]['apt_mirror'] = conf["VCA"].pop('aptmirror') + if "aptmirror" in conf["VCA"]: + conf["VCA"]["apt_mirror"] = conf["VCA"].pop("aptmirror") return conf except Exception as e: @@ -633,26 +807,30 @@ class Lcm: with open("/proc/self/cgroup", "r") as f: text_id_ = f.readline() _, _, text_id = text_id_.rpartition("/") - text_id = text_id.replace('\n', '')[:12] + text_id = text_id.replace("\n", "")[:12] if text_id: return text_id except Exception: pass # Return a random id - return ''.join(random_choice("0123456789abcdef") for _ in range(12)) + return "".join(random_choice("0123456789abcdef") for _ in range(12)) def usage(): - print("""Usage: {} [options] + print( + """Usage: {} [options] -c|--config [configuration_file]: loads the configuration file (default: ./lcm.cfg) --health-check: do not run lcm, but inspect kafka bus to determine if lcm is healthy -h|--help: shows this help - """.format(sys.argv[0])) + """.format( + sys.argv[0] + ) + ) # --log-socket-host HOST: send logs to this host") # --log-socket-port PORT: send logs using this port (default: 9022)") -if __name__ == '__main__': +if __name__ == "__main__": try: # print("SYS.PATH='{}'".format(sys.path)) @@ -662,7 +840,9 @@ if __name__ == '__main__': # --config value # --help # --health-check - opts, args = getopt.getopt(sys.argv[1:], "hc:", ["config=", "help", "health-check"]) + opts, args = getopt.getopt( + sys.argv[1:], "hc:", ["config=", "help", "health-check"] + ) # TODO add "log-socket-host=", "log-socket-port=", "log-file=" config_file = None for o, a in opts: @@ -673,6 +853,7 @@ if __name__ == '__main__': config_file = a elif o == "--health-check": from osm_lcm.lcm_hc import health_check + health_check(health_check_file, Lcm.ping_interval_pace) # elif o == "--log-socket-port": # log_socket_port = a @@ -685,14 +866,24 @@ if __name__ == '__main__': if config_file: if not path.isfile(config_file): - print("configuration file '{}' does not exist".format(config_file), file=sys.stderr) + print( + "configuration file '{}' does not exist".format(config_file), + file=sys.stderr, + ) exit(1) else: - for config_file in (__file__[:__file__.rfind(".")] + ".cfg", "./lcm.cfg", "/etc/osm/lcm.cfg"): + for config_file in ( + __file__[: __file__.rfind(".")] + ".cfg", + "./lcm.cfg", + "/etc/osm/lcm.cfg", + ): if path.isfile(config_file): break else: - print("No configuration file 'lcm.cfg' found neither at local folder nor at /etc/osm/", file=sys.stderr) + print( + "No configuration file 'lcm.cfg' found neither at local folder nor at /etc/osm/", + file=sys.stderr, + ) exit(1) lcm = Lcm(config_file) lcm.start() diff --git a/osm_lcm/lcm_hc.py b/osm_lcm/lcm_hc.py index 701e5b6..0a685a2 100644 --- a/osm_lcm/lcm_hc.py +++ b/osm_lcm/lcm_hc.py @@ -35,7 +35,9 @@ def health_check(health_check_file=None, ping_interval_pace=120): with open(health_check_file, "r") as f: last_received_ping = f.read() - if time() - float(last_received_ping) < 2 * ping_interval_pace: # allow one ping not received every two + if ( + time() - float(last_received_ping) < 2 * ping_interval_pace + ): # allow one ping not received every two exit(0) except Exception as e: print(e, file=stderr) @@ -44,5 +46,5 @@ def health_check(health_check_file=None, ping_interval_pace=120): exit(1) -if __name__ == '__main__': +if __name__ == "__main__": health_check() diff --git a/osm_lcm/lcm_helm_conn.py b/osm_lcm/lcm_helm_conn.py index 845a413..40624ad 100644 --- a/osm_lcm/lcm_helm_conn.py +++ b/osm_lcm/lcm_helm_conn.py @@ -35,16 +35,18 @@ from osm_lcm.data_utils.filesystem.filesystem import Filesystem from n2vc.n2vc_conn import N2VCConnector from n2vc.k8s_helm_conn import K8sHelmConnector from n2vc.k8s_helm3_conn import K8sHelm3Connector -from n2vc.exceptions import N2VCBadArgumentsException, N2VCException, N2VCExecutionException +from n2vc.exceptions import ( + N2VCBadArgumentsException, + N2VCException, + N2VCExecutionException, +) from osm_lcm.lcm_utils import deep_get def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay"): def wrapper(func): - retry_exceptions = ( - ConnectionRefusedError - ) + retry_exceptions = ConnectionRefusedError @functools.wraps(func) async def wrapped(*args, **kwargs): @@ -69,7 +71,9 @@ def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_dela continue else: return ConnectionRefusedError + return wrapped + return wrapper @@ -85,11 +89,13 @@ class LCMHelmConn(N2VCConnector, LcmBase): # Time beetween retries, retry time after a connection error is raised _EE_RETRY_DELAY = 10 - def __init__(self, - log: object = None, - loop: object = None, - vca_config: dict = None, - on_update_db=None, ): + def __init__( + self, + log: object = None, + loop: object = None, + vca_config: dict = None, + on_update_db=None, + ): """ Initialize EE helm connector. """ @@ -99,12 +105,7 @@ class LCMHelmConn(N2VCConnector, LcmBase): # parent class constructor N2VCConnector.__init__( - self, - log=log, - loop=loop, - on_update_db=on_update_db, - db=self.db, - fs=self.fs + self, log=log, loop=loop, on_update_db=on_update_db, db=self.db, fs=self.fs ) self.vca_config = vca_config @@ -121,14 +122,18 @@ class LCMHelmConn(N2VCConnector, LcmBase): self.log.debug("Initial retry time: {}".format(self._initial_retry_time)) else: self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME - self.log.debug("Applied default retry time: {}".format(self._initial_retry_time)) + self.log.debug( + "Applied default retry time: {}".format(self._initial_retry_time) + ) if self.vca_config and self.vca_config.get("eegrpctimeout"): self._max_retry_time = self.vca_config.get("eegrpctimeout") self.log.debug("Retry time: {}".format(self._max_retry_time)) else: self._max_retry_time = self._MAX_RETRY_TIME - self.log.debug("Applied default retry time: {}".format(self._max_retry_time)) + self.log.debug( + "Applied default retry time: {}".format(self._max_retry_time) + ) # initialize helm connector for helmv2 and helmv3 self._k8sclusterhelm2 = K8sHelmConnector( @@ -153,16 +158,19 @@ class LCMHelmConn(N2VCConnector, LcmBase): self.log.info("Helm N2VC connector initialized") # TODO - ¿reuse_ee_id? - async def create_execution_environment(self, - namespace: str, - db_dict: dict, - reuse_ee_id: str = None, - progress_timeout: float = None, - total_timeout: float = None, - config: dict = None, - artifact_path: str = None, - vca_type: str = None, - *kargs, **kwargs) -> (str, dict): + async def create_execution_environment( + self, + namespace: str, + db_dict: dict, + reuse_ee_id: str = None, + progress_timeout: float = None, + total_timeout: float = None, + config: dict = None, + artifact_path: str = None, + vca_type: str = None, + *kargs, + **kwargs, + ) -> (str, dict): """ Creates a new helm execution environment deploying the helm-chat indicated in the attifact_path @@ -184,8 +192,7 @@ class LCMHelmConn(N2VCConnector, LcmBase): self.log.info( "create_execution_environment: namespace: {}, artifact_path: {}, db_dict: {}, " - "reuse_ee_id: {}".format( - namespace, artifact_path, db_dict, reuse_ee_id) + "reuse_ee_id: {}".format(namespace, artifact_path, db_dict, reuse_ee_id) ) # Validate artifact-path is provided @@ -233,23 +240,29 @@ class LCMHelmConn(N2VCConnector, LcmBase): db_dict=db_dict, kdu_model=full_path, ) - await self._k8sclusterhelm2.install(system_cluster_uuid, kdu_model=full_path, - kdu_instance=helm_id, - namespace=self._KUBECTL_OSM_NAMESPACE, - params=config, - db_dict=db_dict, - timeout=progress_timeout) + await self._k8sclusterhelm2.install( + system_cluster_uuid, + kdu_model=full_path, + kdu_instance=helm_id, + namespace=self._KUBECTL_OSM_NAMESPACE, + params=config, + db_dict=db_dict, + timeout=progress_timeout, + ) else: helm_id = self._k8sclusterhelm2.generate_kdu_instance_name( db_dict=db_dict, kdu_model=full_path, ) - await self._k8sclusterhelm3.install(system_cluster_uuid, kdu_model=full_path, - kdu_instance=helm_id, - namespace=self._KUBECTL_OSM_NAMESPACE, - params=config, - db_dict=db_dict, - timeout=progress_timeout) + await self._k8sclusterhelm3.install( + system_cluster_uuid, + kdu_model=full_path, + kdu_instance=helm_id, + namespace=self._KUBECTL_OSM_NAMESPACE, + params=config, + db_dict=db_dict, + timeout=progress_timeout, + ) ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id) return ee_id, None @@ -259,9 +272,16 @@ class LCMHelmConn(N2VCConnector, LcmBase): self.log.error("Error deploying chart ee: {}".format(e), exc_info=True) raise N2VCException("Error deploying chart ee: {}".format(e)) - async def register_execution_environment(self, namespace: str, credentials: dict, db_dict: dict, - progress_timeout: float = None, total_timeout: float = None, - *kargs, **kwargs) -> str: + async def register_execution_environment( + self, + namespace: str, + credentials: dict, + db_dict: dict, + progress_timeout: float = None, + total_timeout: float = None, + *kargs, + **kwargs, + ) -> str: # nothing to do pass @@ -301,8 +321,7 @@ class LCMHelmConn(N2VCConnector, LcmBase): """ self.log.info( - "get_ee_ssh_public_key: ee_id: {}, db_dict: {}".format( - ee_id, db_dict) + "get_ee_ssh_public_key: ee_id: {}, db_dict: {}".format(ee_id, db_dict) ) # check arguments @@ -325,14 +344,14 @@ class LCMHelmConn(N2VCConnector, LcmBase): raise N2VCException("Error obtaining ee ssh_ke: {}".format(e)) async def exec_primitive( - self, - ee_id: str, - primitive_name: str, - params_dict: dict, - db_dict: dict = None, - progress_timeout: float = None, - total_timeout: float = None, - **kwargs, + self, + ee_id: str, + primitive_name: str, + params_dict: dict, + db_dict: dict = None, + progress_timeout: float = None, + total_timeout: float = None, + **kwargs, ) -> str: """ Execute a primitive in the execution environment @@ -354,9 +373,11 @@ class LCMHelmConn(N2VCConnector, LcmBase): :returns str: primitive result, if ok. It raises exceptions in case of fail """ - self.log.info("exec primitive for ee_id : {}, primitive_name: {}, params_dict: {}, db_dict: {}".format( - ee_id, primitive_name, params_dict, db_dict - )) + self.log.info( + "exec primitive for ee_id : {}, primitive_name: {}, params_dict: {}, db_dict: {}".format( + ee_id, primitive_name, params_dict, db_dict + ) + ) # check arguments if ee_id is None or len(ee_id) == 0: @@ -380,12 +401,20 @@ class LCMHelmConn(N2VCConnector, LcmBase): if primitive_name == "config": try: # Execute config primitive, higher timeout to check the case ee is starting - status, detailed_message = await self._execute_config_primitive(ip_addr, params_dict, db_dict=db_dict) - self.log.debug("Executed config primitive ee_id_ {}, status: {}, message: {}".format( - ee_id, status, detailed_message)) + status, detailed_message = await self._execute_config_primitive( + ip_addr, params_dict, db_dict=db_dict + ) + self.log.debug( + "Executed config primitive ee_id_ {}, status: {}, message: {}".format( + ee_id, status, detailed_message + ) + ) if status != "OK": - self.log.error("Error configuring helm ee, status: {}, message: {}".format( - status, detailed_message)) + self.log.error( + "Error configuring helm ee, status: {}, message: {}".format( + status, detailed_message + ) + ) raise N2VCExecutionException( message="Error configuring helm ee_id: {}, status: {}, message: {}: ".format( ee_id, status, detailed_message @@ -395,23 +424,26 @@ class LCMHelmConn(N2VCConnector, LcmBase): except Exception as e: self.log.error("Error configuring helm ee: {}".format(e)) raise N2VCExecutionException( - message="Error configuring helm ee_id: {}, {}".format( - ee_id, e - ), + message="Error configuring helm ee_id: {}, {}".format(ee_id, e), primitive_name=primitive_name, ) return "CONFIG OK" else: try: # Execute primitive - status, detailed_message = await self._execute_primitive(ip_addr, primitive_name, - params_dict, db_dict=db_dict) - self.log.debug("Executed primitive {} ee_id_ {}, status: {}, message: {}".format( - primitive_name, ee_id, status, detailed_message)) + status, detailed_message = await self._execute_primitive( + ip_addr, primitive_name, params_dict, db_dict=db_dict + ) + self.log.debug( + "Executed primitive {} ee_id_ {}, status: {}, message: {}".format( + primitive_name, ee_id, status, detailed_message + ) + ) if status != "OK" and status != "PROCESSING": self.log.error( "Execute primitive {} returned not ok status: {}, message: {}".format( - primitive_name, status, detailed_message) + primitive_name, status, detailed_message + ) ) raise N2VCExecutionException( message="Execute primitive {} returned not ok status: {}, message: {}".format( @@ -479,10 +511,14 @@ class LCMHelmConn(N2VCConnector, LcmBase): except N2VCException: raise except Exception as e: - self.log.error("Error deleting ee id: {}: {}".format(ee_id, e), exc_info=True) + self.log.error( + "Error deleting ee id: {}: {}".format(ee_id, e), exc_info=True + ) raise N2VCException("Error deleting ee id {}: {}".format(ee_id, e)) - async def delete_namespace(self, namespace: str, db_dict: dict = None, total_timeout: float = None): + async def delete_namespace( + self, namespace: str, db_dict: dict = None, total_timeout: float = None + ): # method not implemented for this connector, execution environments must be deleted individually pass @@ -495,7 +531,8 @@ class LCMHelmConn(N2VCConnector, LcmBase): progress_timeout: float = None, total_timeout: float = None, config: dict = None, - *kargs, **kwargs + *kargs, + **kwargs, ) -> str: pass @@ -512,13 +549,19 @@ class LCMHelmConn(N2VCConnector, LcmBase): @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay") async def _execute_config_primitive(self, ip_addr, params, db_dict=None): - return await self._execute_primitive_internal(ip_addr, "config", params, db_dict=db_dict) + return await self._execute_primitive_internal( + ip_addr, "config", params, db_dict=db_dict + ) @retryer(max_wait_time_var="_max_retry_time", delay_time_var="_retry_delay") async def _execute_primitive(self, ip_addr, primitive_name, params, db_dict=None): - return await self._execute_primitive_internal(ip_addr, primitive_name, params, db_dict=db_dict) + return await self._execute_primitive_internal( + ip_addr, primitive_name, params, db_dict=db_dict + ) - async def _execute_primitive_internal(self, ip_addr, primitive_name, params, db_dict=None): + async def _execute_primitive_internal( + self, ip_addr, primitive_name, params, db_dict=None + ): channel = Channel(ip_addr, self._ee_service_port) try: @@ -526,16 +569,25 @@ class LCMHelmConn(N2VCConnector, LcmBase): async with stub.RunPrimitive.open() as stream: primitive_id = str(uuid.uuid1()) result = None - self.log.debug("Execute primitive internal: id:{}, name:{}, params: {}". - format(primitive_id, primitive_name, params)) + self.log.debug( + "Execute primitive internal: id:{}, name:{}, params: {}".format( + primitive_id, primitive_name, params + ) + ) await stream.send_message( - PrimitiveRequest(id=primitive_id, name=primitive_name, params=yaml.dump(params)), end=True) + PrimitiveRequest( + id=primitive_id, name=primitive_name, params=yaml.dump(params) + ), + end=True, + ) async for reply in stream: self.log.debug("Received reply: {}".format(reply)) result = reply # If db_dict provided write notifs in database if db_dict: - self._write_op_detailed_status(db_dict, reply.status, reply.detailed_message) + self._write_op_detailed_status( + db_dict, reply.status, reply.detailed_message + ) if result: return reply.status, reply.detailed_message else: @@ -564,24 +616,34 @@ class LCMHelmConn(N2VCConnector, LcmBase): async def _get_system_cluster_id(self): if not self._system_cluster_id: - db_k8cluster = self.db.get_one("k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME}) + db_k8cluster = self.db.get_one( + "k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME} + ) k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart-v3", "id")) if not k8s_hc_id: try: # backward compatibility for existing clusters that have not been initialized for helm v3 cluster_id = db_k8cluster.get("_id") k8s_credentials = yaml.safe_dump(db_k8cluster.get("credentials")) - k8s_hc_id, uninstall_sw = await self._k8sclusterhelm3.init_env(k8s_credentials, - reuse_cluster_uuid=cluster_id) - db_k8scluster_update = {"_admin.helm-chart-v3.error_msg": None, - "_admin.helm-chart-v3.id": k8s_hc_id, - "_admin.helm-chart-v3}.created": uninstall_sw, - "_admin.helm-chart-v3.operationalState": "ENABLED"} + k8s_hc_id, uninstall_sw = await self._k8sclusterhelm3.init_env( + k8s_credentials, reuse_cluster_uuid=cluster_id + ) + db_k8scluster_update = { + "_admin.helm-chart-v3.error_msg": None, + "_admin.helm-chart-v3.id": k8s_hc_id, + "_admin.helm-chart-v3}.created": uninstall_sw, + "_admin.helm-chart-v3.operationalState": "ENABLED", + } self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update) except Exception as e: - self.log.error("error initializing helm-v3 cluster: {}".format(str(e))) - raise N2VCException("K8s system cluster '{}' has not been initialized for helm-chart-v3".format( - cluster_id)) + self.log.error( + "error initializing helm-v3 cluster: {}".format(str(e)) + ) + raise N2VCException( + "K8s system cluster '{}' has not been initialized for helm-chart-v3".format( + cluster_id + ) + ) self._system_cluster_id = k8s_hc_id return self._system_cluster_id @@ -591,6 +653,6 @@ class LCMHelmConn(N2VCConnector, LcmBase): namespace.helm_id for backward compatibility If exists helm version can be helm-v3 or helm (helm-v2 old version) """ - version, _, part_id = ee_id.rpartition(':') - namespace, _, helm_id = part_id.rpartition('.') + version, _, part_id = ee_id.rpartition(":") + namespace, _, helm_id = part_id.rpartition(".") return version, namespace, helm_id diff --git a/osm_lcm/lcm_utils.py b/osm_lcm/lcm_utils.py index a1569c1..16d5b33 100644 --- a/osm_lcm/lcm_utils.py +++ b/osm_lcm/lcm_utils.py @@ -97,7 +97,6 @@ def populate_dict(target_dict, key_list, value): class LcmBase: - def __init__(self, msg, logger): """ @@ -145,24 +144,23 @@ class TaskRegistry(LcmBase): """ # NS/NSI: "services" VIM/WIM/SDN: "accounts" - topic_service_list = ['ns', 'nsi'] - topic_account_list = ['vim', 'wim', 'sdn', 'k8scluster', 'vca', 'k8srepo'] + topic_service_list = ["ns", "nsi"] + topic_account_list = ["vim", "wim", "sdn", "k8scluster", "vca", "k8srepo"] # Map topic to InstanceID - topic2instid_dict = { - 'ns': 'nsInstanceId', - 'nsi': 'netsliceInstanceId'} + topic2instid_dict = {"ns": "nsInstanceId", "nsi": "netsliceInstanceId"} # Map topic to DB table name topic2dbtable_dict = { - 'ns': 'nslcmops', - 'nsi': 'nsilcmops', - 'vim': 'vim_accounts', - 'wim': 'wim_accounts', - 'sdn': 'sdns', - 'k8scluster': 'k8sclusters', - 'vca': 'vca', - 'k8srepo': 'k8srepos'} + "ns": "nslcmops", + "nsi": "nsilcmops", + "vim": "vim_accounts", + "wim": "wim_accounts", + "sdn": "sdns", + "k8scluster": "k8sclusters", + "vca": "vca", + "k8srepo": "k8srepos", + } def __init__(self, worker_id=None, logger=None): self.task_registry = { @@ -271,7 +269,7 @@ class TaskRegistry(LcmBase): def _get_account_and_op_HA(self, op_id): if not op_id: return None, None - account_id, _, op_index = op_id.rpartition(':') + account_id, _, op_index = op_id.rpartition(":") if not account_id or not op_index.isdigit(): return None, None return account_id, op_index @@ -280,7 +278,7 @@ class TaskRegistry(LcmBase): def _get_instance_id_HA(self, topic, op_type, op_id): _id = None # Special operation 'ANY', for SDN account associated to a VIM account: op_id as '_id' - if op_type == 'ANY': + if op_type == "ANY": _id = op_id # NS/NSI: Use op_id as '_id' elif self._is_service_type_HA(topic): @@ -295,8 +293,8 @@ class TaskRegistry(LcmBase): _filter = {} # Special operation 'ANY', for SDN account associated to a VIM account: op_id as '_id' # In this special case, the timestamp is ignored - if op_type == 'ANY': - _filter = {'operationState': 'PROCESSING'} + if op_type == "ANY": + _filter = {"operationState": "PROCESSING"} # Otherwise, get 'startTime' timestamp for this operation else: # NS/NSI @@ -305,19 +303,23 @@ class TaskRegistry(LcmBase): starttime_this_op = db_lcmop.get("startTime") instance_id_label = self.topic2instid_dict.get(topic) instance_id = db_lcmop.get(instance_id_label) - _filter = {instance_id_label: instance_id, - 'operationState': 'PROCESSING', - 'startTime.lt': starttime_this_op, - "_admin.modified.gt": now - 2*3600, # ignore if tow hours of inactivity - } + _filter = { + instance_id_label: instance_id, + "operationState": "PROCESSING", + "startTime.lt": starttime_this_op, + "_admin.modified.gt": now + - 2 * 3600, # ignore if tow hours of inactivity + } # VIM/WIM/SDN/K8scluster elif self._is_account_type_HA(topic): _, op_index = self._get_account_and_op_HA(op_id) - _ops = db_lcmop['_admin']['operations'] + _ops = db_lcmop["_admin"]["operations"] _this_op = _ops[int(op_index)] - starttime_this_op = _this_op.get('startTime', None) - _filter = {'operationState': 'PROCESSING', - 'startTime.lt': starttime_this_op} + starttime_this_op = _this_op.get("startTime", None) + _filter = { + "operationState": "PROCESSING", + "startTime.lt": starttime_this_op, + } return _filter # Get DB params for any topic and operation @@ -326,19 +328,24 @@ class TaskRegistry(LcmBase): update_dict = {} # NS/NSI if self._is_service_type_HA(topic): - q_filter = {'_id': op_id, '_admin.worker': None} - update_dict = {'_admin.worker': self.worker_id} + q_filter = {"_id": op_id, "_admin.worker": None} + update_dict = {"_admin.worker": self.worker_id} # VIM/WIM/SDN elif self._is_account_type_HA(topic): account_id, op_index = self._get_account_and_op_HA(op_id) if not account_id: return None, None - if op_type == 'create': + if op_type == "create": # Creating a VIM/WIM/SDN account implies setting '_admin.current_operation' = 0 op_index = 0 - q_filter = {'_id': account_id, "_admin.operations.{}.worker".format(op_index): None} - update_dict = {'_admin.operations.{}.worker'.format(op_index): self.worker_id, - '_admin.current_operation': op_index} + q_filter = { + "_id": account_id, + "_admin.operations.{}.worker".format(op_index): None, + } + update_dict = { + "_admin.operations.{}.worker".format(op_index): self.worker_id, + "_admin.current_operation": op_index, + } return q_filter, update_dict def lock_HA(self, topic, op_type, op_id): @@ -366,24 +373,36 @@ class TaskRegistry(LcmBase): # Try to lock this task db_table_name = self.topic2dbtable_dict[topic] q_filter, update_dict = self._get_dbparams_for_lock_HA(topic, op_type, op_id) - db_lock_task = self.db.set_one(db_table_name, - q_filter=q_filter, - update_dict=update_dict, - fail_on_empty=False) + db_lock_task = self.db.set_one( + db_table_name, + q_filter=q_filter, + update_dict=update_dict, + fail_on_empty=False, + ) if db_lock_task is None: - self.logger.debug("Task {} operation={} already locked by another worker".format(topic, op_id)) + self.logger.debug( + "Task {} operation={} already locked by another worker".format( + topic, op_id + ) + ) return False else: # Set 'detailed-status' to 'In progress' for VIM/WIM/SDN operations if self._is_account_type_HA(topic): - detailed_status = 'In progress' + detailed_status = "In progress" account_id, op_index = self._get_account_and_op_HA(op_id) - q_filter = {'_id': account_id} - update_dict = {'_admin.operations.{}.detailed-status'.format(op_index): detailed_status} - self.db.set_one(db_table_name, - q_filter=q_filter, - update_dict=update_dict, - fail_on_empty=False) + q_filter = {"_id": account_id} + update_dict = { + "_admin.operations.{}.detailed-status".format( + op_index + ): detailed_status + } + self.db.set_one( + db_table_name, + q_filter=q_filter, + update_dict=update_dict, + fail_on_empty=False, + ) return True def unlock_HA(self, topic, op_type, op_id, operationState, detailed_status): @@ -406,15 +425,19 @@ class TaskRegistry(LcmBase): # If this is a 'delete' operation, the account may have been deleted (SUCCESS) or may still exist (FAILED) # If the account exist, register the HA task. # Update DB for HA tasks - q_filter = {'_id': account_id} - update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState, - '_admin.operations.{}.detailed-status'.format(op_index): detailed_status, - '_admin.operations.{}.worker'.format(op_index): None, - '_admin.current_operation': None} - self.db.set_one(db_table_name, - q_filter=q_filter, - update_dict=update_dict, - fail_on_empty=False) + q_filter = {"_id": account_id} + update_dict = { + "_admin.operations.{}.operationState".format(op_index): operationState, + "_admin.operations.{}.detailed-status".format(op_index): detailed_status, + "_admin.operations.{}.worker".format(op_index): None, + "_admin.current_operation": None, + } + self.db.set_one( + db_table_name, + q_filter=q_filter, + update_dict=update_dict, + fail_on_empty=False, + ) return async def waitfor_related_HA(self, topic, op_type, op_id=None): @@ -423,7 +446,9 @@ class TaskRegistry(LcmBase): """ # Backward compatibility - if not (self._is_service_type_HA(topic) or self._is_account_type_HA(topic)) and (op_id is None): + if not ( + self._is_service_type_HA(topic) or self._is_account_type_HA(topic) + ) and (op_id is None): return # Get DB table name @@ -432,9 +457,7 @@ class TaskRegistry(LcmBase): # Get instance ID _id = self._get_instance_id_HA(topic, op_type, op_id) _filter = {"_id": _id} - db_lcmop = self.db.get_one(db_table_name, - _filter, - fail_on_empty=False) + db_lcmop = self.db.get_one(db_table_name, _filter, fail_on_empty=False) if not db_lcmop: return @@ -442,17 +465,18 @@ class TaskRegistry(LcmBase): _filter = self._get_waitfor_filter_HA(db_lcmop, topic, op_type, op_id) # For HA, get list of tasks from DB instead of from dictionary (in-memory) variable. - timeout_wait_for_task = 3600 # Max time (seconds) to wait for a related task to finish + timeout_wait_for_task = ( + 3600 # Max time (seconds) to wait for a related task to finish + ) # interval_wait_for_task = 30 # A too long polling interval slows things down considerably - interval_wait_for_task = 10 # Interval in seconds for polling related tasks + interval_wait_for_task = 10 # Interval in seconds for polling related tasks time_left = timeout_wait_for_task old_num_related_tasks = 0 while True: # Get related tasks (operations within the same instance as this) which are # still running (operationState='PROCESSING') and which were started before this task. # In the case of op_type='ANY', get any related tasks with operationState='PROCESSING', ignore timestamps. - db_waitfor_related_task = self.db.get_list(db_table_name, - q_filter=_filter) + db_waitfor_related_task = self.db.get_list(db_table_name, q_filter=_filter) new_num_related_tasks = len(db_waitfor_related_task) # If there are no related tasks, there is nothing to wait for, so return. if not new_num_related_tasks: @@ -460,28 +484,39 @@ class TaskRegistry(LcmBase): # If number of pending related tasks have changed, # update the 'detailed-status' field and log the change. # Do NOT update the 'detailed-status' for SDNC-associated-to-VIM operations ('ANY'). - if (op_type != 'ANY') and (new_num_related_tasks != old_num_related_tasks): - step = "Waiting for {} related tasks to be completed.".format(new_num_related_tasks) + if (op_type != "ANY") and (new_num_related_tasks != old_num_related_tasks): + step = "Waiting for {} related tasks to be completed.".format( + new_num_related_tasks + ) update_dict = {} - q_filter = {'_id': _id} + q_filter = {"_id": _id} # NS/NSI if self._is_service_type_HA(topic): - update_dict = {'detailed-status': step, 'queuePosition': new_num_related_tasks} + update_dict = { + "detailed-status": step, + "queuePosition": new_num_related_tasks, + } # VIM/WIM/SDN elif self._is_account_type_HA(topic): _, op_index = self._get_account_and_op_HA(op_id) - update_dict = {'_admin.operations.{}.detailed-status'.format(op_index): step} + update_dict = { + "_admin.operations.{}.detailed-status".format(op_index): step + } self.logger.debug("Task {} operation={} {}".format(topic, _id, step)) - self.db.set_one(db_table_name, - q_filter=q_filter, - update_dict=update_dict, - fail_on_empty=False) + self.db.set_one( + db_table_name, + q_filter=q_filter, + update_dict=update_dict, + fail_on_empty=False, + ) old_num_related_tasks = new_num_related_tasks time_left -= interval_wait_for_task if time_left < 0: raise LcmException( "Timeout ({}) when waiting for related tasks to be completed".format( - timeout_wait_for_task)) + timeout_wait_for_task + ) + ) await asyncio.sleep(interval_wait_for_task) return diff --git a/osm_lcm/netslice.py b/osm_lcm/netslice.py index c3a48e6..3a8002c 100644 --- a/osm_lcm/netslice.py +++ b/osm_lcm/netslice.py @@ -18,7 +18,13 @@ import logging import logging.handlers import traceback from osm_lcm import ROclient -from osm_lcm.lcm_utils import LcmException, LcmBase, populate_dict, get_iterable, deep_get +from osm_lcm.lcm_utils import ( + LcmException, + LcmBase, + populate_dict, + get_iterable, + deep_get, +) from osm_common.dbbase import DbException from time import time from copy import deepcopy @@ -38,7 +44,7 @@ class NetsliceLcm(LcmBase): :return: None """ # logging - self.logger = logging.getLogger('lcm.netslice') + self.logger = logging.getLogger("lcm.netslice") self.loop = loop self.lcm_tasks = lcm_tasks self.ns = ns @@ -67,12 +73,14 @@ class NetsliceLcm(LcmBase): nsi_update_nsir["vld.{}".format(vld_index)] = vld break else: - raise LcmException("ns_update_nsir: Not found vld={} at RO info".format(vld["id"])) + raise LcmException( + "ns_update_nsir: Not found vld={} at RO info".format(vld["id"]) + ) async def instantiate(self, nsir_id, nsilcmop_id): # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA('nsi', 'nsilcmops', nsilcmop_id) + task_is_locked_by_me = self.lcm_tasks.lock_HA("nsi", "nsilcmops", nsilcmop_id) if not task_is_locked_by_me: return @@ -95,7 +103,7 @@ class NetsliceLcm(LcmBase): if isinstance(RO_ip_profile["dns-server"], list): RO_ip_profile["dns-address"] = [] for ds in RO_ip_profile.pop("dns-server"): - RO_ip_profile["dns-address"].append(ds['address']) + RO_ip_profile["dns-address"].append(ds["address"]) else: RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server") if RO_ip_profile.get("ip-version") == "ipv4": @@ -117,13 +125,18 @@ class NetsliceLcm(LcmBase): db_vim = self.db.get_one("vim_accounts", {"_id": vim_account}) if db_vim["_admin"]["operationalState"] != "ENABLED": - raise LcmException("VIM={} is not available. operationalState={}".format( - vim_account, db_vim["_admin"]["operationalState"])) + raise LcmException( + "VIM={} is not available. operationalState={}".format( + vim_account, db_vim["_admin"]["operationalState"] + ) + ) RO_vim_id = db_vim["_admin"]["deployed"]["RO"] vim_2_RO[vim_account] = RO_vim_id return RO_vim_id - async def netslice_scenario_create(self, vld_item, nsir_id, db_nsir, db_nsir_admin, db_nsir_update): + async def netslice_scenario_create( + self, vld_item, nsir_id, db_nsir, db_nsir_admin, db_nsir_update + ): """ Create a network slice VLD through RO Scenario :param vld_id The VLD id inside nsir to be created @@ -140,12 +153,20 @@ class NetsliceLcm(LcmBase): vld_shared = None for shared_nsrs_item in get_iterable(vld_item, "shared-nsrs-list"): - _filter = {"_id.ne": nsir_id, "_admin.nsrs-detailed-list.ANYINDEX.nsrId": shared_nsrs_item} - shared_nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False) + _filter = { + "_id.ne": nsir_id, + "_admin.nsrs-detailed-list.ANYINDEX.nsrId": shared_nsrs_item, + } + shared_nsi = self.db.get_one( + "nsis", _filter, fail_on_empty=False, fail_on_more=False + ) if shared_nsi: for vlds in get_iterable(shared_nsi["_admin"]["deployed"], "RO"): if vld_id == vlds["vld_id"]: - vld_shared = {"instance_scenario_id": vlds["netslice_scenario_id"], "osm_id": vld_id} + vld_shared = { + "instance_scenario_id": vlds["netslice_scenario_id"], + "osm_id": vld_id, + } break break @@ -159,7 +180,9 @@ class NetsliceLcm(LcmBase): else: # TODO: Check VDU type in all descriptors finding SRIOV / PT # Updating network names and datacenters from instantiation parameters for each VLD - for instantiation_params_vld in get_iterable(db_nsir["instantiation_parameters"], "netslice-vld"): + for instantiation_params_vld in get_iterable( + db_nsir["instantiation_parameters"], "netslice-vld" + ): if instantiation_params_vld.get("name") == netslice_vld["name"]: ip_vld = deepcopy(instantiation_params_vld) ip_vld.pop("name") @@ -168,14 +191,19 @@ class NetsliceLcm(LcmBase): db_nsir_update_RO = {} db_nsir_update_RO["vld_id"] = netslice_vld["name"] if self.ro_config["ng"]: - db_nsir_update_RO["netslice_scenario_id"] = vld_shared.get("instance_scenario_id") if vld_shared \ + db_nsir_update_RO["netslice_scenario_id"] = ( + vld_shared.get("instance_scenario_id") + if vld_shared else "nsir:{}:vld.{}".format(nsir_id, netslice_vld["name"]) + ) else: # if not self.ro_config["ng"]: if netslice_vld.get("mgmt-network"): mgmt_network = True RO_ns_params = {} RO_ns_params["name"] = netslice_vld["name"] - RO_ns_params["datacenter"] = vim_account_2_RO(db_nsir["instantiation_parameters"]["vimAccountId"]) + RO_ns_params["datacenter"] = vim_account_2_RO( + db_nsir["instantiation_parameters"]["vimAccountId"] + ) # Creating scenario if vim-network-name / vim-network-id are present as instantiation parameter # Use vim-network-id instantiation parameter @@ -186,46 +214,87 @@ class NetsliceLcm(LcmBase): elif ip_vld.get("vim-network-name"): vim_network_option = "vim-network-name" if ip_vld.get("ip-profile"): - populate_dict(RO_ns_params, ("networks", netslice_vld["name"], "ip-profile"), - ip_profile_2_RO(ip_vld["ip-profile"])) + populate_dict( + RO_ns_params, + ("networks", netslice_vld["name"], "ip-profile"), + ip_profile_2_RO(ip_vld["ip-profile"]), + ) if vim_network_option: if ip_vld.get(vim_network_option): if isinstance(ip_vld.get(vim_network_option), list): for vim_net_id in ip_vld.get(vim_network_option): for vim_account, vim_net in vim_net_id.items(): - RO_vld_sites.append({ - "netmap-use": vim_net, - "datacenter": vim_account_2_RO(vim_account) - }) + RO_vld_sites.append( + { + "netmap-use": vim_net, + "datacenter": vim_account_2_RO( + vim_account + ), + } + ) elif isinstance(ip_vld.get(vim_network_option), dict): - for vim_account, vim_net in ip_vld.get(vim_network_option).items(): - RO_vld_sites.append({ - "netmap-use": vim_net, - "datacenter": vim_account_2_RO(vim_account) - }) + for vim_account, vim_net in ip_vld.get( + vim_network_option + ).items(): + RO_vld_sites.append( + { + "netmap-use": vim_net, + "datacenter": vim_account_2_RO(vim_account), + } + ) else: - RO_vld_sites.append({ - "netmap-use": ip_vld[vim_network_option], - "datacenter": vim_account_2_RO(netslice_vld["vimAccountId"])}) + RO_vld_sites.append( + { + "netmap-use": ip_vld[vim_network_option], + "datacenter": vim_account_2_RO( + netslice_vld["vimAccountId"] + ), + } + ) # Use default netslice vim-network-name from template else: - for nss_conn_point_ref in get_iterable(netslice_vld, "nss-connection-point-ref"): + for nss_conn_point_ref in get_iterable( + netslice_vld, "nss-connection-point-ref" + ): if nss_conn_point_ref.get("vimAccountId"): - if nss_conn_point_ref["vimAccountId"] != netslice_vld["vimAccountId"]: - RO_vld_sites.append({ - "netmap-create": None, - "datacenter": vim_account_2_RO(nss_conn_point_ref["vimAccountId"])}) + if ( + nss_conn_point_ref["vimAccountId"] + != netslice_vld["vimAccountId"] + ): + RO_vld_sites.append( + { + "netmap-create": None, + "datacenter": vim_account_2_RO( + nss_conn_point_ref["vimAccountId"] + ), + } + ) if vld_shared: - populate_dict(RO_ns_params, ("networks", netslice_vld["name"], "use-network"), vld_shared) + populate_dict( + RO_ns_params, + ("networks", netslice_vld["name"], "use-network"), + vld_shared, + ) if RO_vld_sites: - populate_dict(RO_ns_params, ("networks", netslice_vld["name"], "sites"), RO_vld_sites) - - RO_ns_params["scenario"] = {"nets": [{"name": netslice_vld["name"], - "external": mgmt_network, "type": "bridge"}]} + populate_dict( + RO_ns_params, + ("networks", netslice_vld["name"], "sites"), + RO_vld_sites, + ) + + RO_ns_params["scenario"] = { + "nets": [ + { + "name": netslice_vld["name"], + "external": mgmt_network, + "type": "bridge", + } + ] + } # self.logger.debug(logging_text + step) desc = await RO.create("ns", descriptor=RO_ns_params) @@ -242,16 +311,32 @@ class NetsliceLcm(LcmBase): RO_list = db_nsir_admin["deployed"]["RO"] for ro_item_index, RO_item in enumerate(RO_list): - netslice_vld = next((n for n in get_iterable(db_nsir["_admin"], "netslice-vld") - if RO_item.get("vld_id") == n.get("id")), None) + netslice_vld = next( + ( + n + for n in get_iterable(db_nsir["_admin"], "netslice-vld") + if RO_item.get("vld_id") == n.get("id") + ), + None, + ) if not netslice_vld: continue # if is equal vld of _admin with vld of netslice-vld then go for the CPs # Search the cp of netslice-vld that match with nst:netslice-subnet - for nss_cp_item in get_iterable(netslice_vld, "nss-connection-point-ref"): + for nss_cp_item in get_iterable( + netslice_vld, "nss-connection-point-ref" + ): # Search the netslice-subnet of nst that match - nss = next((nss for nss in get_iterable(db_nsir["_admin"], "netslice-subnet") - if nss_cp_item["nss-ref"] == nss["nss-id"]), None) + nss = next( + ( + nss + for nss in get_iterable( + db_nsir["_admin"], "netslice-subnet" + ) + if nss_cp_item["nss-ref"] == nss["nss-id"] + ), + None, + ) # Compare nss-ref equal nss from nst if not nss: continue @@ -260,27 +345,42 @@ class NetsliceLcm(LcmBase): for cp_nsd in db_nsds.get("sapd", ()): if cp_nsd["id"] == nss_cp_item["nsd-connection-point-ref"]: if nslcmop.get("operationParams"): - if nslcmop["operationParams"].get("nsName") == nss["nsName"]: + if ( + nslcmop["operationParams"].get("nsName") + == nss["nsName"] + ): vld_id = RO_item["vld_id"] - netslice_scenario_id = RO_item["netslice_scenario_id"] + netslice_scenario_id = RO_item[ + "netslice_scenario_id" + ] nslcmop_vld = {} nslcmop_vld["name"] = cp_nsd["virtual-link-desc"] - for vld in get_iterable(nslcmop["operationParams"], "vld"): + for vld in get_iterable( + nslcmop["operationParams"], "vld" + ): if vld["name"] == cp_nsd["virtual-link-desc"]: nslcmop_vld.update(vld) if self.ro_config["ng"]: nslcmop_vld["common_id"] = netslice_scenario_id - nslcmop_vld.update(nsi_vld_instantiationi_params.get(RO_item["vld_id"], {})) + nslcmop_vld.update( + nsi_vld_instantiationi_params.get( + RO_item["vld_id"], {} + ) + ) else: - nslcmop_vld["ns-net"] = {vld_id: netslice_scenario_id} + nslcmop_vld["ns-net"] = { + vld_id: netslice_scenario_id + } vld_op_list.append(nslcmop_vld) nslcmop["operationParams"]["vld"] = vld_op_list - self.update_db_2("nslcmops", nslcmop["_id"], {"operationParams.vld": vld_op_list}) + self.update_db_2( + "nslcmops", nslcmop["_id"], {"operationParams.vld": vld_op_list} + ) return nsr_id, nslcmop try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('nsi', 'nsilcmops', nsilcmop_id) + await self.lcm_tasks.waitfor_related_HA("nsi", "nsilcmops", nsilcmop_id) step = "Getting nsir={} from db".format(nsir_id) db_nsir = self.db.get_one("nsis", {"_id": nsir_id}) @@ -292,7 +392,9 @@ class NetsliceLcm(LcmBase): if nsi_params and nsi_params.get("timeout_nsi_deploy"): timeout_nsi_deploy = nsi_params["timeout_nsi_deploy"] else: - timeout_nsi_deploy = self.timeout.get("nsi_deploy", self.timeout_nsi_deploy) + timeout_nsi_deploy = self.timeout.get( + "nsi_deploy", self.timeout_nsi_deploy + ) # Empty list to keep track of network service records status in the netslice nsir_admin = db_nsir_admin = db_nsir.get("_admin") @@ -309,7 +411,9 @@ class NetsliceLcm(LcmBase): self.update_db_2("nsis", nsir_id, db_nsir_update) db_nsir_update["_admin.deployed.RO"] = db_nsir_admin["deployed"]["RO"] for vld_item in get_iterable(nsir_admin, "netslice-vld"): - await netslice_scenario_create(self, vld_item, nsir_id, db_nsir, db_nsir_admin, db_nsir_update) + await netslice_scenario_create( + self, vld_item, nsir_id, db_nsir, db_nsir_admin, db_nsir_update + ) step = "Instantiating netslice subnets" db_nsir_update["detailed-status"] = step @@ -337,7 +441,9 @@ class NetsliceLcm(LcmBase): nsr_id, nslcmop = overwrite_nsd_params(self, db_nsir, nslcmop) step = "Launching ns={} instantiate={} task".format(nsr_id, nslcmop_id) task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id)) - self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_instantiate", task) + self.lcm_tasks.register( + "ns", nsr_id, nslcmop_id, "ns_instantiate", task + ) # Wait until Network Slice is ready step = " Waiting nsi ready." @@ -357,24 +463,41 @@ class NetsliceLcm(LcmBase): # TODO: (future improvement) other possible status: ROLLING_BACK,ROLLED_BACK for nss in nsrs_detailed_list: if nss["nsrId"] == nslcmop["nsInstanceId"]: - nss.update({"nsrId": nslcmop["nsInstanceId"], "status": nslcmop["operationState"], - "detailed-status": nslcmop.get("detailed-status"), - "instantiated": True}) + nss.update( + { + "nsrId": nslcmop["nsInstanceId"], + "status": nslcmop["operationState"], + "detailed-status": nslcmop.get("detailed-status"), + "instantiated": True, + } + ) nsrs_detailed_list_new.append(nss) - if status not in ["COMPLETED", "PARTIALLY_COMPLETED", "FAILED", "FAILED_TEMP"]: + if status not in [ + "COMPLETED", + "PARTIALLY_COMPLETED", + "FAILED", + "FAILED_TEMP", + ]: nsi_ready = False if nsrs_detailed_list_new != nsrs_detailed_list_old: nsrs_detailed_list_old = nsrs_detailed_list_new - self.update_db_2("nsis", nsir_id, {"_admin.nsrs-detailed-list": nsrs_detailed_list_new}) + self.update_db_2( + "nsis", + nsir_id, + {"_admin.nsrs-detailed-list": nsrs_detailed_list_new}, + ) if nsi_ready: error_list = [] step = "Network Slice Instance instantiated" for nss in nsrs_detailed_list: if nss["status"] in ("FAILED", "FAILED_TEMP"): - error_list.append("NS {} {}: {}".format(nss["nsrId"], nss["status"], - nss["detailed-status"])) + error_list.append( + "NS {} {}: {}".format( + nss["nsrId"], nss["status"], nss["detailed-status"] + ) + ) if error_list: step = "instantiating" raise LcmException("; ".join(error_list)) @@ -383,27 +506,36 @@ class NetsliceLcm(LcmBase): # TODO: future improvement due to synchronism -> await asyncio.wait(vca_task_list, timeout=300) await asyncio.sleep(5, loop=self.loop) - else: # timeout_nsi_deploy reached: + else: # timeout_nsi_deploy reached: raise LcmException("Timeout waiting nsi to be ready.") db_nsir_update["operational-status"] = "running" db_nsir_update["detailed-status"] = "done" db_nsir_update["config-status"] = "configured" - db_nsilcmop_update["operationState"] = nsilcmop_operation_state = "COMPLETED" + db_nsilcmop_update[ + "operationState" + ] = nsilcmop_operation_state = "COMPLETED" db_nsilcmop_update["statusEnteredTime"] = time() db_nsilcmop_update["detailed-status"] = "done" return except (LcmException, DbException) as e: - self.logger.error(logging_text + "Exit Exception while '{}': {}".format(step, e)) + self.logger.error( + logging_text + "Exit Exception while '{}': {}".format(step, e) + ) exc = e except asyncio.CancelledError: - self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step)) + self.logger.error( + logging_text + "Cancelled Exception while '{}'".format(step) + ) exc = "Operation was cancelled" except Exception as e: exc = traceback.format_exc() - self.logger.critical(logging_text + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e), - exc_info=True) + self.logger.critical( + logging_text + + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e), + exc_info=True, + ) finally: if exc: if db_nsir: @@ -411,8 +543,12 @@ class NetsliceLcm(LcmBase): db_nsir_update["operational-status"] = "failed" db_nsir_update["config-status"] = "configured" if db_nsilcmop: - db_nsilcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc) - db_nsilcmop_update["operationState"] = nsilcmop_operation_state = "FAILED" + db_nsilcmop_update["detailed-status"] = "FAILED {}: {}".format( + step, exc + ) + db_nsilcmop_update[ + "operationState" + ] = nsilcmop_operation_state = "FAILED" db_nsilcmop_update["statusEnteredTime"] = time() try: if db_nsir: @@ -424,17 +560,26 @@ class NetsliceLcm(LcmBase): self.logger.error(logging_text + "Cannot update database: {}".format(e)) if nsilcmop_operation_state: try: - await self.msg.aiowrite("nsi", "instantiated", {"nsir_id": nsir_id, "nsilcmop_id": nsilcmop_id, - "operationState": nsilcmop_operation_state}) + await self.msg.aiowrite( + "nsi", + "instantiated", + { + "nsir_id": nsir_id, + "nsilcmop_id": nsilcmop_id, + "operationState": nsilcmop_operation_state, + }, + ) except Exception as e: - self.logger.error(logging_text + "kafka_write notification Exception {}".format(e)) + self.logger.error( + logging_text + "kafka_write notification Exception {}".format(e) + ) self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("nsi", nsir_id, nsilcmop_id, "nsi_instantiate") async def terminate(self, nsir_id, nsilcmop_id): # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA('nsi', 'nsilcmops', nsilcmop_id) + task_is_locked_by_me = self.lcm_tasks.lock_HA("nsi", "nsilcmops", nsilcmop_id) if not task_is_locked_by_me: return @@ -447,12 +592,12 @@ class NetsliceLcm(LcmBase): db_nsilcmop_update = {} RO = ROclient.ROClient(self.loop, **self.ro_config) nsir_deployed = None - failed_detail = [] # annotates all failed error messages + failed_detail = [] # annotates all failed error messages nsilcmop_operation_state = None autoremove = False # autoremove after terminated try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('nsi', 'nsilcmops', nsilcmop_id) + await self.lcm_tasks.waitfor_related_HA("nsi", "nsilcmops", nsilcmop_id) step = "Getting nsir={} from db".format(nsir_id) db_nsir = self.db.get_one("nsis", {"_id": nsir_id}) @@ -484,11 +629,18 @@ class NetsliceLcm(LcmBase): for nslcmop_id in nslcmop_ids: nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) nsr_id = nslcmop["operationParams"].get("nsInstanceId") - nss_in_use = self.db.get_list("nsis", {"_admin.netslice-vld.ANYINDEX.shared-nsrs-list": nsr_id, - "operational-status": {"$nin": ["terminated", "failed"]}}) + nss_in_use = self.db.get_list( + "nsis", + { + "_admin.netslice-vld.ANYINDEX.shared-nsrs-list": nsr_id, + "operational-status": {"$nin": ["terminated", "failed"]}, + }, + ) if len(nss_in_use) < 2: task = asyncio.ensure_future(self.ns.terminate(nsr_id, nslcmop_id)) - self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_instantiate", task) + self.lcm_tasks.register( + "ns", nsr_id, nslcmop_id, "ns_instantiate", task + ) nslcmop_new.append(nslcmop_id) else: # Update shared nslcmop shared with active nsi @@ -497,16 +649,25 @@ class NetsliceLcm(LcmBase): if db_nsir["_id"] != nsis_item["_id"]: netsliceInstanceId = nsis_item["_id"] break - self.db.set_one("nslcmops", {"_id": nslcmop_id}, - {"operationParams.netsliceInstanceId": netsliceInstanceId}) - self.db.set_one("nsilcmops", {"_id": nsilcmop_id}, {"operationParams.nslcmops_ids": nslcmop_new}) + self.db.set_one( + "nslcmops", + {"_id": nslcmop_id}, + {"operationParams.netsliceInstanceId": netsliceInstanceId}, + ) + self.db.set_one( + "nsilcmops", + {"_id": nsilcmop_id}, + {"operationParams.nslcmops_ids": nslcmop_new}, + ) # Wait until Network Slice is terminated - step = nsir_status_detailed = " Waiting nsi terminated. nsi_id={}".format(nsir_id) + step = nsir_status_detailed = " Waiting nsi terminated. nsi_id={}".format( + nsir_id + ) nsrs_detailed_list_old = None self.logger.debug(logging_text + step) - termination_timeout = 2 * 3600 # Two hours + termination_timeout = 2 * 3600 # Two hours while termination_timeout > 0: # Check ns termination status nsi_ready = True @@ -516,43 +677,67 @@ class NetsliceLcm(LcmBase): for nslcmop_item in nslcmop_ids: nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_item}) status = nslcmop["operationState"] - # TODO: (future improvement) other possible status: ROLLING_BACK,ROLLED_BACK + # TODO: (future improvement) other possible status: ROLLING_BACK,ROLLED_BACK for nss in nsrs_detailed_list: if nss["nsrId"] == nslcmop["nsInstanceId"]: - nss.update({"nsrId": nslcmop["nsInstanceId"], "status": nslcmop["operationState"], - "detailed-status": - nsir_status_detailed + "; {}".format(nslcmop.get("detailed-status"))}) + nss.update( + { + "nsrId": nslcmop["nsInstanceId"], + "status": nslcmop["operationState"], + "detailed-status": nsir_status_detailed + + "; {}".format(nslcmop.get("detailed-status")), + } + ) nsrs_detailed_list_new.append(nss) - if status not in ["COMPLETED", "PARTIALLY_COMPLETED", "FAILED", "FAILED_TEMP"]: + if status not in [ + "COMPLETED", + "PARTIALLY_COMPLETED", + "FAILED", + "FAILED_TEMP", + ]: nsi_ready = False if nsrs_detailed_list_new != nsrs_detailed_list_old: nsrs_detailed_list_old = nsrs_detailed_list_new - self.update_db_2("nsis", nsir_id, {"_admin.nsrs-detailed-list": nsrs_detailed_list_new}) + self.update_db_2( + "nsis", + nsir_id, + {"_admin.nsrs-detailed-list": nsrs_detailed_list_new}, + ) if nsi_ready: # Check if it is the last used nss and mark isinstantiate: False db_nsir = self.db.get_one("nsis", {"_id": nsir_id}) nsrs_detailed_list = db_nsir["_admin"].get("nsrs-detailed-list") for nss in nsrs_detailed_list: - _filter = {"_admin.nsrs-detailed-list.ANYINDEX.nsrId": nss["nsrId"], - "operational-status.ne": "terminated", - "_id.ne": nsir_id} - nsis_list = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False) + _filter = { + "_admin.nsrs-detailed-list.ANYINDEX.nsrId": nss["nsrId"], + "operational-status.ne": "terminated", + "_id.ne": nsir_id, + } + nsis_list = self.db.get_one( + "nsis", _filter, fail_on_empty=False, fail_on_more=False + ) if not nsis_list: nss.update({"instantiated": False}) - step = "Network Slice Instance is terminated. nsi_id={}".format(nsir_id) + step = "Network Slice Instance is terminated. nsi_id={}".format( + nsir_id + ) for items in nsrs_detailed_list: if "FAILED" in items.values(): - raise LcmException("Error terminating NSI: {}".format(nsir_id)) + raise LcmException( + "Error terminating NSI: {}".format(nsir_id) + ) break await asyncio.sleep(5, loop=self.loop) termination_timeout -= 5 if termination_timeout <= 0: - raise LcmException("Timeout waiting nsi to be terminated. nsi_id={}".format(nsir_id)) + raise LcmException( + "Timeout waiting nsi to be terminated. nsi_id={}".format(nsir_id) + ) # Delete netslice-vlds RO_nsir_id = RO_delete_action = None @@ -560,8 +745,12 @@ class NetsliceLcm(LcmBase): RO_nsir_id = nsir_deployed_RO.get("netslice_scenario_id") try: if not self.ro_config["ng"]: - step = db_nsir_update["detailed-status"] = "Deleting netslice-vld at RO" - db_nsilcmop_update["detailed-status"] = "Deleting netslice-vld at RO" + step = db_nsir_update[ + "detailed-status" + ] = "Deleting netslice-vld at RO" + db_nsilcmop_update[ + "detailed-status" + ] = "Deleting netslice-vld at RO" self.logger.debug(logging_text + step) desc = await RO.delete("ns", RO_nsir_id) RO_delete_action = desc["action_id"] @@ -571,32 +760,47 @@ class NetsliceLcm(LcmBase): self.update_db_2("nsis", nsir_id, db_nsir_update) if RO_delete_action: # wait until NS is deleted from VIM - step = "Waiting ns deleted from VIM. RO_id={}".format(RO_nsir_id) + step = "Waiting ns deleted from VIM. RO_id={}".format( + RO_nsir_id + ) self.logger.debug(logging_text + step) except ROclient.ROClientException as e: if e.http_code == 404: # not found nsir_deployed_RO["vld_id"] = None nsir_deployed_RO["vld_status"] = "DELETED" - self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(RO_nsir_id)) - elif e.http_code == 409: # conflict - failed_detail.append("RO_ns_id={} delete conflict: {}".format(RO_nsir_id, e)) + self.logger.debug( + logging_text + + "RO_ns_id={} already deleted".format(RO_nsir_id) + ) + elif e.http_code == 409: # conflict + failed_detail.append( + "RO_ns_id={} delete conflict: {}".format(RO_nsir_id, e) + ) self.logger.debug(logging_text + failed_detail[-1]) else: - failed_detail.append("RO_ns_id={} delete error: {}".format(RO_nsir_id, e)) + failed_detail.append( + "RO_ns_id={} delete error: {}".format(RO_nsir_id, e) + ) self.logger.error(logging_text + failed_detail[-1]) if failed_detail: self.logger.error(logging_text + " ;".join(failed_detail)) db_nsir_update["operational-status"] = "failed" - db_nsir_update["detailed-status"] = "Deletion errors " + "; ".join(failed_detail) + db_nsir_update["detailed-status"] = "Deletion errors " + "; ".join( + failed_detail + ) db_nsilcmop_update["detailed-status"] = "; ".join(failed_detail) - db_nsilcmop_update["operationState"] = nsilcmop_operation_state = "FAILED" + db_nsilcmop_update[ + "operationState" + ] = nsilcmop_operation_state = "FAILED" db_nsilcmop_update["statusEnteredTime"] = time() else: db_nsir_update["operational-status"] = "terminating" db_nsir_update["config-status"] = "terminating" db_nsir_update["_admin.nsiState"] = "NOT_INSTANTIATED" - db_nsilcmop_update["operationState"] = nsilcmop_operation_state = "COMPLETED" + db_nsilcmop_update[ + "operationState" + ] = nsilcmop_operation_state = "COMPLETED" db_nsilcmop_update["statusEnteredTime"] = time() if db_nsilcmop["operationParams"].get("autoremove"): autoremove = True @@ -609,15 +813,22 @@ class NetsliceLcm(LcmBase): return except (LcmException, DbException) as e: - self.logger.error(logging_text + "Exit Exception while '{}': {}".format(step, e)) + self.logger.error( + logging_text + "Exit Exception while '{}': {}".format(step, e) + ) exc = e except asyncio.CancelledError: - self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step)) + self.logger.error( + logging_text + "Cancelled Exception while '{}'".format(step) + ) exc = "Operation was cancelled" except Exception as e: exc = traceback.format_exc() - self.logger.critical(logging_text + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e), - exc_info=True) + self.logger.critical( + logging_text + + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e), + exc_info=True, + ) finally: if exc: if db_nsir: @@ -625,8 +836,12 @@ class NetsliceLcm(LcmBase): db_nsir_update["detailed-status"] = "ERROR {}: {}".format(step, exc) db_nsir_update["operational-status"] = "failed" if db_nsilcmop: - db_nsilcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc) - db_nsilcmop_update["operationState"] = nsilcmop_operation_state = "FAILED" + db_nsilcmop_update["detailed-status"] = "FAILED {}: {}".format( + step, exc + ) + db_nsilcmop_update[ + "operationState" + ] = nsilcmop_operation_state = "FAILED" db_nsilcmop_update["statusEnteredTime"] = time() try: if db_nsir: @@ -640,11 +855,20 @@ class NetsliceLcm(LcmBase): if nsilcmop_operation_state: try: - await self.msg.aiowrite("nsi", "terminated", {"nsir_id": nsir_id, "nsilcmop_id": nsilcmop_id, - "operationState": nsilcmop_operation_state, - "autoremove": autoremove}, - loop=self.loop) + await self.msg.aiowrite( + "nsi", + "terminated", + { + "nsir_id": nsir_id, + "nsilcmop_id": nsilcmop_id, + "operationState": nsilcmop_operation_state, + "autoremove": autoremove, + }, + loop=self.loop, + ) except Exception as e: - self.logger.error(logging_text + "kafka_write notification Exception {}".format(e)) + self.logger.error( + logging_text + "kafka_write notification Exception {}".format(e) + ) self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("nsi", nsir_id, nsilcmop_id, "nsi_terminate") diff --git a/osm_lcm/ng_ro.py b/osm_lcm/ng_ro.py index 6e9f683..b8893e8 100644 --- a/osm_lcm/ng_ro.py +++ b/osm_lcm/ng_ro.py @@ -41,20 +41,28 @@ class NgRoException(Exception): class NgRoClient: - headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'} - client_to_RO = {'tenant': 'tenants', 'vim': 'datacenters', 'vim_account': 'datacenters', 'sdn': 'sdn_controllers', - 'vnfd': 'vnfs', 'nsd': 'scenarios', 'wim': 'wims', 'wim_account': 'wims', - 'ns': 'instances'} + headers_req = {"Accept": "application/yaml", "content-type": "application/yaml"} + client_to_RO = { + "tenant": "tenants", + "vim": "datacenters", + "vim_account": "datacenters", + "sdn": "sdn_controllers", + "vnfd": "vnfs", + "nsd": "scenarios", + "wim": "wims", + "wim_account": "wims", + "ns": "instances", + } mandatory_for_create = { - 'tenant': ("name", ), - 'vnfd': ("name", "id"), - 'nsd': ("name", "id"), - 'ns': ("name", "scenario", "datacenter"), - 'vim': ("name", "vim_url"), - 'wim': ("name", "wim_url"), - 'vim_account': (), - 'wim_account': (), - 'sdn': ("name", 'type'), + "tenant": ("name",), + "vnfd": ("name", "id"), + "nsd": ("name", "id"), + "ns": ("name", "scenario", "datacenter"), + "vim": ("name", "vim_url"), + "wim": ("name", "wim_url"), + "vim_account": (), + "wim_account": (), + "sdn": ("name", "type"), } timeout_large = 120 timeout_short = 30 @@ -73,7 +81,7 @@ class NgRoClient: self.tenant = None self.datacenter_id_name = kwargs.get("datacenter") self.datacenter = None - logger_name = kwargs.get('logger_name', 'lcm.ro') + logger_name = kwargs.get("logger_name", "lcm.ro") self.logger = logging.getLogger(logger_name) if kwargs.get("loglevel"): self.logger.setLevel(kwargs["loglevel"]) @@ -98,9 +106,15 @@ class NgRoClient: async with aiohttp.ClientSession(loop=self.loop) as session: self.logger.debug("NG-RO POST %s %s", url, payload_req) # timeout = aiohttp.ClientTimeout(total=self.timeout_large) - async with session.post(url, headers=self.headers_req, data=payload_req) as response: + async with session.post( + url, headers=self.headers_req, data=payload_req + ) as response: response_text = await response.read() - self.logger.debug("POST {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "POST {} [{}] {}".format( + url, response.status, response_text[:100] + ) + ) if response.status >= 300: raise NgRoException(response_text, http_code=response.status) return self._parse_yaml(response_text, response=True) @@ -111,13 +125,19 @@ class NgRoClient: async def status(self, nsr_id, action_id): try: - url = "{}/ns/v1/deploy/{nsr_id}/{action_id}".format(self.endpoint_url, nsr_id=nsr_id, action_id=action_id) + url = "{}/ns/v1/deploy/{nsr_id}/{action_id}".format( + self.endpoint_url, nsr_id=nsr_id, action_id=action_id + ) async with aiohttp.ClientSession(loop=self.loop) as session: self.logger.debug("GET %s", url) # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.get(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "GET {} [{}] {}".format( + url, response.status, response_text[:100] + ) + ) if response.status >= 300: raise NgRoException(response_text, http_code=response.status) return self._parse_yaml(response_text, response=True) @@ -136,7 +156,9 @@ class NgRoClient: async with session.delete(url, headers=self.headers_req) as response: self.logger.debug("DELETE {} [{}]".format(url, response.status)) if response.status >= 300: - raise NgRoException("Delete {}".format(nsr_id), http_code=response.status) + raise NgRoException( + "Delete {}".format(nsr_id), http_code=response.status + ) return except (aiohttp.ClientOSError, aiohttp.ClientError) as e: @@ -157,7 +179,11 @@ class NgRoClient: # timeout = aiohttp.ClientTimeout(total=self.timeout_short) async with session.get(url, headers=self.headers_req) as response: response_text = await response.read() - self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100])) + self.logger.debug( + "GET {} [{}] {}".format( + url, response.status, response_text[:100] + ) + ) if response.status >= 300: raise NgRoException(response_text, http_code=response.status) @@ -165,14 +191,21 @@ class NgRoClient: if "." in word: version_text, _, _ = word.partition("-") return version_text - raise NgRoException("Got invalid version text: '{}'".format(response_text), http_code=500) + raise NgRoException( + "Got invalid version text: '{}'".format(response_text), + http_code=500, + ) except (aiohttp.ClientOSError, aiohttp.ClientError) as e: raise NgRoException(e, http_code=504) except asyncio.TimeoutError: raise NgRoException("Timeout", http_code=504) except Exception as e: - raise NgRoException("Got invalid version text: '{}'; causing exception {}".format(response_text, e), - http_code=500) + raise NgRoException( + "Got invalid version text: '{}'; causing exception {}".format( + response_text, e + ), + http_code=500, + ) @staticmethod def _parse_yaml(descriptor, response=False): @@ -180,9 +213,11 @@ class NgRoClient: return yaml.safe_load(descriptor) except yaml.YAMLError as exc: error_pos = "" - if hasattr(exc, 'problem_mark'): + if hasattr(exc, "problem_mark"): mark = exc.problem_mark - error_pos = " at line:{} column:{}s".format(mark.line + 1, mark.column + 1) + error_pos = " at line:{} column:{}s".format( + mark.line + 1, mark.column + 1 + ) error_text = "yaml format error" + error_pos if response: raise NgRoException("reponse with " + error_text) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 6b82da9..8b2be1e 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -22,16 +22,39 @@ import logging import logging.handlers import traceback import json -from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError +from jinja2 import ( + Environment, + TemplateError, + TemplateNotFound, + StrictUndefined, + UndefinedError, +) from osm_lcm import ROclient from osm_lcm.ng_ro import NgRoClient, NgRoException -from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict +from osm_lcm.lcm_utils import ( + LcmException, + LcmExceptionNoMgmtIP, + LcmBase, + deep_get, + get_iterable, + populate_dict, +) from osm_lcm.data_utils.nsd import get_vnf_profiles -from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \ - get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \ - get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \ - get_vdu_index, get_scaling_aspect, get_number_of_instances, get_juju_ee_ref +from osm_lcm.data_utils.vnfd import ( + get_vdu_list, + get_vdu_profile, + get_ee_sorted_initial_config_primitive_list, + get_ee_sorted_terminate_config_primitive_list, + get_kdu_list, + get_virtual_link_profiles, + get_vdu, + get_configuration, + get_vdu_index, + get_scaling_aspect, + get_number_of_instances, + get_juju_ee_ref, +) from osm_lcm.data_utils.list_utils import find_in_list from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index from osm_lcm.data_utils.dict_utils import parse_yaml_strings @@ -61,12 +84,16 @@ __author__ = "Alfonso Tierno " class NsLcm(LcmBase): - timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed - timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns - timeout_ns_terminate = 1800 # default global timeout for un deployment a ns + timeout_vca_on_error = ( + 5 * 60 + ) # Time for charm from first time at blocked,error status to mark as failed + timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns + timeout_ns_terminate = 1800 # default global timeout for un deployment a ns timeout_charm_delete = 10 * 60 timeout_primitive = 30 * 60 # timeout for primitive execution - timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution + timeout_progress_primitive = ( + 10 * 60 + ) # timeout for some progress in a primitive execution SUBOPERATION_STATUS_NOT_FOUND = -1 SUBOPERATION_STATUS_NEW = -2 @@ -79,10 +106,7 @@ class NsLcm(LcmBase): :param config: two level dictionary with configuration. Top level should contain 'database', 'storage', :return: None """ - super().__init__( - msg=msg, - logger=logging.getLogger('lcm.ns') - ) + super().__init__(msg=msg, logger=logging.getLogger("lcm.ns")) self.db = Database().instance.db self.fs = Filesystem().instance.fs @@ -99,14 +123,14 @@ class NsLcm(LcmBase): loop=self.loop, on_update_db=self._on_update_n2vc_db, fs=self.fs, - db=self.db + db=self.db, ) self.conn_helm_ee = LCMHelmConn( log=self.logger, loop=self.loop, vca_config=self.vca_config, - on_update_db=self._on_update_n2vc_db + on_update_db=self._on_update_n2vc_db, ) self.k8sclusterhelm2 = K8sHelmConnector( @@ -115,7 +139,7 @@ class NsLcm(LcmBase): log=self.logger, on_update_db=None, fs=self.fs, - db=self.db + db=self.db, ) self.k8sclusterhelm3 = K8sHelm3Connector( @@ -134,7 +158,7 @@ class NsLcm(LcmBase): loop=self.loop, on_update_db=self._on_update_k8s_db, fs=self.fs, - db=self.db + db=self.db, ) self.k8scluster_map = { @@ -150,7 +174,7 @@ class NsLcm(LcmBase): "native_charm": self.n2vc, "k8s_proxy_charm": self.n2vc, "helm": self.conn_helm_ee, - "helm-v3": self.conn_helm_ee + "helm-v3": self.conn_helm_ee, } self.prometheus = prometheus @@ -173,7 +197,9 @@ class NsLcm(LcmBase): if i > 0: i += 1 # format in hex, len can be 2 for mac or 4 for ipv6 - return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(ip_mac[:i], int(ip_mac[i:], 16) + vm_index) + return ("{}{:0" + str(len(ip_mac) - i) + "x}").format( + ip_mac[:i], int(ip_mac[i:], 16) + vm_index + ) except Exception: pass return None @@ -188,92 +214,108 @@ class NsLcm(LcmBase): # write to database db_dict = dict() # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2) - db_dict['deploymentStatus'] = ro_descriptor + db_dict["deploymentStatus"] = ro_descriptor self.update_db_2("nsrs", nsrs_id, db_dict) except Exception as e: - self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e)) + self.logger.warn( + "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e) + ) async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None): # remove last dot from path (if exists) - if path.endswith('.'): + if path.endswith("."): path = path[:-1] # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}' # .format(table, filter, path, updated_data)) try: - nsr_id = filter.get('_id') + nsr_id = filter.get("_id") # read ns record from database - nsr = self.db.get_one(table='nsrs', q_filter=filter) - current_ns_status = nsr.get('nsState') + nsr = self.db.get_one(table="nsrs", q_filter=filter) + current_ns_status = nsr.get("nsState") # get vca status for NS - status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False, vca_id=vca_id) + status_dict = await self.n2vc.get_status( + namespace="." + nsr_id, yaml_format=False, vca_id=vca_id + ) # vcaStatus db_dict = dict() - db_dict['vcaStatus'] = status_dict - await self.n2vc.update_vca_status(db_dict['vcaStatus'], vca_id=vca_id) + db_dict["vcaStatus"] = status_dict + await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id) # update configurationStatus for this VCA try: - vca_index = int(path[path.rfind(".")+1:]) + vca_index = int(path[path.rfind(".") + 1 :]) - vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA')) - vca_status = vca_list[vca_index].get('status') + vca_list = deep_get( + target_dict=nsr, key_list=("_admin", "deployed", "VCA") + ) + vca_status = vca_list[vca_index].get("status") - configuration_status_list = nsr.get('configurationStatus') - config_status = configuration_status_list[vca_index].get('status') + configuration_status_list = nsr.get("configurationStatus") + config_status = configuration_status_list[vca_index].get("status") - if config_status == 'BROKEN' and vca_status != 'failed': - db_dict['configurationStatus'][vca_index] = 'READY' - elif config_status != 'BROKEN' and vca_status == 'failed': - db_dict['configurationStatus'][vca_index] = 'BROKEN' + if config_status == "BROKEN" and vca_status != "failed": + db_dict["configurationStatus"][vca_index] = "READY" + elif config_status != "BROKEN" and vca_status == "failed": + db_dict["configurationStatus"][vca_index] = "BROKEN" except Exception as e: # not update configurationStatus - self.logger.debug('Error updating vca_index (ignore): {}'.format(e)) + self.logger.debug("Error updating vca_index (ignore): {}".format(e)) # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED' # if nsState = 'DEGRADED' check if all is OK is_degraded = False - if current_ns_status in ('READY', 'DEGRADED'): - error_description = '' + if current_ns_status in ("READY", "DEGRADED"): + error_description = "" # check machines - if status_dict.get('machines'): - for machine_id in status_dict.get('machines'): - machine = status_dict.get('machines').get(machine_id) + if status_dict.get("machines"): + for machine_id in status_dict.get("machines"): + machine = status_dict.get("machines").get(machine_id) # check machine agent-status - if machine.get('agent-status'): - s = machine.get('agent-status').get('status') - if s != 'started': + if machine.get("agent-status"): + s = machine.get("agent-status").get("status") + if s != "started": is_degraded = True - error_description += 'machine {} agent-status={} ; '.format(machine_id, s) + error_description += ( + "machine {} agent-status={} ; ".format( + machine_id, s + ) + ) # check machine instance status - if machine.get('instance-status'): - s = machine.get('instance-status').get('status') - if s != 'running': + if machine.get("instance-status"): + s = machine.get("instance-status").get("status") + if s != "running": is_degraded = True - error_description += 'machine {} instance-status={} ; '.format(machine_id, s) + error_description += ( + "machine {} instance-status={} ; ".format( + machine_id, s + ) + ) # check applications - if status_dict.get('applications'): - for app_id in status_dict.get('applications'): - app = status_dict.get('applications').get(app_id) + if status_dict.get("applications"): + for app_id in status_dict.get("applications"): + app = status_dict.get("applications").get(app_id) # check application status - if app.get('status'): - s = app.get('status').get('status') - if s != 'active': + if app.get("status"): + s = app.get("status").get("status") + if s != "active": is_degraded = True - error_description += 'application {} status={} ; '.format(app_id, s) + error_description += ( + "application {} status={} ; ".format(app_id, s) + ) if error_description: - db_dict['errorDescription'] = error_description - if current_ns_status == 'READY' and is_degraded: - db_dict['nsState'] = 'DEGRADED' - if current_ns_status == 'DEGRADED' and not is_degraded: - db_dict['nsState'] = 'READY' + db_dict["errorDescription"] = error_description + if current_ns_status == "READY" and is_degraded: + db_dict["nsState"] = "DEGRADED" + if current_ns_status == "DEGRADED" and not is_degraded: + db_dict["nsState"] = "READY" # write to database self.update_db_2("nsrs", nsr_id, db_dict) @@ -281,9 +323,11 @@ class NsLcm(LcmBase): except (asyncio.CancelledError, asyncio.TimeoutError): raise except Exception as e: - self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e)) + self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e)) - async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None, vca_id=None): + async def _on_update_k8s_db( + self, cluster_uuid, kdu_instance, filter=None, vca_id=None + ): """ Updating vca status in NSR record :param cluster_uuid: UUID of a k8s cluster @@ -296,7 +340,7 @@ class NsLcm(LcmBase): # .format(cluster_uuid, kdu_instance, filter)) try: - nsr_id = filter.get('_id') + nsr_id = filter.get("_id") # get vca status for NS vca_status = await self.k8sclusterjuju.status_kdu( @@ -308,10 +352,10 @@ class NsLcm(LcmBase): ) # vcaStatus db_dict = dict() - db_dict['vcaStatus'] = {nsr_id: vca_status} + db_dict["vcaStatus"] = {nsr_id: vca_status} await self.k8sclusterjuju.update_vca_status( - db_dict['vcaStatus'], + db_dict["vcaStatus"], kdu_instance, vca_id=vca_id, ) @@ -322,7 +366,7 @@ class NsLcm(LcmBase): except (asyncio.CancelledError, asyncio.TimeoutError): raise except Exception as e: - self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e)) + self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e)) @staticmethod def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id): @@ -331,20 +375,28 @@ class NsLcm(LcmBase): template = env.from_string(cloud_init_text) return template.render(additional_params or {}) except UndefinedError as e: - raise LcmException("Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-" - "file, must be provided in the instantiation parameters inside the " - "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)) + raise LcmException( + "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-" + "file, must be provided in the instantiation parameters inside the " + "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id) + ) except (TemplateError, TemplateNotFound) as e: - raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}". - format(vnfd_id, vdu_id, e)) + raise LcmException( + "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format( + vnfd_id, vdu_id, e + ) + ) def _get_vdu_cloud_init_content(self, vdu, vnfd): cloud_init_content = cloud_init_file = None try: if vdu.get("cloud-init-file"): base_folder = vnfd["_admin"]["storage"] - cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], - vdu["cloud-init-file"]) + cloud_init_file = "{}/{}/cloud_init/{}".format( + base_folder["folder"], + base_folder["pkg-dir"], + vdu["cloud-init-file"], + ) with self.fs.file_open(cloud_init_file, "r") as ci_file: cloud_init_content = ci_file.read() elif vdu.get("cloud-init"): @@ -352,11 +404,16 @@ class NsLcm(LcmBase): return cloud_init_content except FsException as e: - raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}". - format(vnfd["id"], vdu["id"], cloud_init_file, e)) + raise LcmException( + "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format( + vnfd["id"], vdu["id"], cloud_init_file, e + ) + ) def _get_vdu_additional_params(self, db_vnfr, vdu_id): - vdur = next(vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]) + vdur = next( + vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"] + ) additional_params = vdur.get("additionalParams") return parse_yaml_strings(additional_params) @@ -393,7 +450,7 @@ class NsLcm(LcmBase): if isinstance(RO_ip_profile["dns-server"], list): RO_ip_profile["dns-address"] = [] for ds in RO_ip_profile.pop("dns-server"): - RO_ip_profile["dns-address"].append(ds['address']) + RO_ip_profile["dns-address"].append(ds["address"]) else: RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server") if RO_ip_profile.get("ip-version") == "ipv4": @@ -407,8 +464,11 @@ class NsLcm(LcmBase): def _get_ro_vim_id_for_vim_account(self, vim_account): db_vim = self.db.get_one("vim_accounts", {"_id": vim_account}) if db_vim["_admin"]["operationalState"] != "ENABLED": - raise LcmException("VIM={} is not available. operationalState={}".format( - vim_account, db_vim["_admin"]["operationalState"])) + raise LcmException( + "VIM={} is not available. operationalState={}".format( + vim_account, db_vim["_admin"]["operationalState"] + ) + ) RO_vim_id = db_vim["_admin"]["deployed"]["RO"] return RO_vim_id @@ -416,8 +476,11 @@ class NsLcm(LcmBase): if isinstance(wim_account, str): db_wim = self.db.get_one("wim_accounts", {"_id": wim_account}) if db_wim["_admin"]["operationalState"] != "ENABLED": - raise LcmException("WIM={} is not available. operationalState={}".format( - wim_account, db_wim["_admin"]["operationalState"])) + raise LcmException( + "WIM={} is not available. operationalState={}".format( + wim_account, db_wim["_admin"]["operationalState"] + ) + ) RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"] return RO_wim_id else: @@ -429,10 +492,20 @@ class NsLcm(LcmBase): db_update = {"_admin.modified": time()} if vdu_create: for vdu_id, vdu_count in vdu_create.items(): - vdur = next((vdur for vdur in reversed(db_vnfr["vdur"]) if vdur["vdu-id-ref"] == vdu_id), None) + vdur = next( + ( + vdur + for vdur in reversed(db_vnfr["vdur"]) + if vdur["vdu-id-ref"] == vdu_id + ), + None, + ) if not vdur: - raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?". - format(vdu_id)) + raise LcmException( + "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format( + vdu_id + ) + ) for count in range(vdu_count): vdur_copy = deepcopy(vdur) @@ -441,30 +514,56 @@ class NsLcm(LcmBase): vdur_copy["ip-address"]: None vdur_copy["_id"] = str(uuid4()) vdur_copy["count-index"] += count + 1 - vdur_copy["id"] = "{}-{}".format(vdur_copy["vdu-id-ref"], vdur_copy["count-index"]) + vdur_copy["id"] = "{}-{}".format( + vdur_copy["vdu-id-ref"], vdur_copy["count-index"] + ) vdur_copy.pop("vim_info", None) for iface in vdur_copy["interfaces"]: if iface.get("fixed-ip"): - iface["ip-address"] = self.increment_ip_mac(iface["ip-address"], count+1) + iface["ip-address"] = self.increment_ip_mac( + iface["ip-address"], count + 1 + ) else: iface.pop("ip-address", None) if iface.get("fixed-mac"): - iface["mac-address"] = self.increment_ip_mac(iface["mac-address"], count+1) + iface["mac-address"] = self.increment_ip_mac( + iface["mac-address"], count + 1 + ) else: iface.pop("mac-address", None) - iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf + iface.pop( + "mgmt_vnf", None + ) # only first vdu can be managment of vnf db_vdu_push_list.append(vdur_copy) # self.logger.debug("scale out, adding vdu={}".format(vdur_copy)) if vdu_delete: for vdu_id, vdu_count in vdu_delete.items(): if mark_delete: - indexes_to_delete = [iv[0] for iv in enumerate(db_vnfr["vdur"]) if iv[1]["vdu-id-ref"] == vdu_id] - db_update.update({"vdur.{}.status".format(i): "DELETING" for i in indexes_to_delete[-vdu_count:]}) + indexes_to_delete = [ + iv[0] + for iv in enumerate(db_vnfr["vdur"]) + if iv[1]["vdu-id-ref"] == vdu_id + ] + db_update.update( + { + "vdur.{}.status".format(i): "DELETING" + for i in indexes_to_delete[-vdu_count:] + } + ) else: # it must be deleted one by one because common.db does not allow otherwise - vdus_to_delete = [v for v in reversed(db_vnfr["vdur"]) if v["vdu-id-ref"] == vdu_id] + vdus_to_delete = [ + v + for v in reversed(db_vnfr["vdur"]) + if v["vdu-id-ref"] == vdu_id + ] for vdu in vdus_to_delete[:vdu_count]: - self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, None, pull={"vdur": {"_id": vdu["_id"]}}) + self.db.set_one( + "vnfrs", + {"_id": db_vnfr["_id"]}, + None, + pull={"vdur": {"_id": vdu["_id"]}}, + ) db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push) # modify passed dictionary db_vnfr @@ -491,7 +590,9 @@ class NsLcm(LcmBase): ns_update_nsr["vld.{}".format(vld_index)] = vld break else: - raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"])) + raise LcmException( + "ns_update_nsr: Not found vld={} at RO info".format(vld["id"]) + ) def set_vnfr_at_error(self, db_vnfrs, error_text): try: @@ -503,7 +604,9 @@ class NsLcm(LcmBase): vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR" if error_text: vdur["status-detailed"] = str(error_text) - vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR" + vnfr_update[ + "vdur.{}.status-detailed".format(vdu_index) + ] = "ERROR" self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update) except DbException as e: self.logger.error("Cannot update vnf. {}".format(e)) @@ -521,10 +624,16 @@ class NsLcm(LcmBase): continue vnfr_update = {} if vnf_RO.get("ip_address"): - db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0] + db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[ + "ip_address" + ].split(";")[0] elif not db_vnfr.get("ip-address"): - if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address - raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index)) + if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address + raise LcmExceptionNoMgmtIP( + "ns member_vnf_index '{}' has no IP address".format( + vnf_index + ) + ) for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")): vdur_RO_count_index = 0 @@ -548,18 +657,29 @@ class NsLcm(LcmBase): for ifacer in get_iterable(vdur, "interfaces"): for interface_RO in get_iterable(vdur_RO, "interfaces"): if ifacer["name"] == interface_RO.get("internal_name"): - ifacer["ip-address"] = interface_RO.get("ip_address") - ifacer["mac-address"] = interface_RO.get("mac_address") + ifacer["ip-address"] = interface_RO.get( + "ip_address" + ) + ifacer["mac-address"] = interface_RO.get( + "mac_address" + ) break else: - raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} " - "from VIM info" - .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"])) + raise LcmException( + "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} " + "from VIM info".format( + vnf_index, vdur["vdu-id-ref"], ifacer["name"] + ) + ) vnfr_update["vdur.{}".format(vdu_index)] = vdur break else: - raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from " - "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"])) + raise LcmException( + "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from " + "VIM info".format( + vnf_index, vdur["vdu-id-ref"], vdur["count-index"] + ) + ) for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")): for net_RO in get_iterable(nsr_desc_RO, "nets"): @@ -572,14 +692,21 @@ class NsLcm(LcmBase): vnfr_update["vld.{}".format(vld_index)] = vld break else: - raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format( - vnf_index, vld["id"])) + raise LcmException( + "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format( + vnf_index, vld["id"] + ) + ) self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update) break else: - raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index)) + raise LcmException( + "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format( + vnf_index + ) + ) def _get_ns_config_info(self, nsr_id): """ @@ -599,12 +726,27 @@ class NsLcm(LcmBase): if not vca["vdu_id"]: mapping[vca["member-vnf-index"]] = vca["application"] else: - mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\ - vca["application"] + mapping[ + "{}.{}.{}".format( + vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"] + ) + ] = vca["application"] return ns_config_info - async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds, - n2vc_key_list, stage, start_deploy, timeout_ns_deploy): + async def _instantiate_ng_ro( + self, + logging_text, + nsr_id, + nsd, + db_nsr, + db_nslcmop, + db_vnfrs, + db_vnfds, + n2vc_key_list, + stage, + start_deploy, + timeout_ns_deploy, + ): db_vims = {} @@ -617,13 +759,21 @@ class NsLcm(LcmBase): return db_vim # modify target_vld info with instantiation parameters - def parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn): + def parse_vld_instantiation_params( + target_vim, target_vld, vld_params, target_sdn + ): if vld_params.get("ip-profile"): - target_vld["vim_info"][target_vim]["ip_profile"] = vld_params["ip-profile"] + target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[ + "ip-profile" + ] if vld_params.get("provider-network"): - target_vld["vim_info"][target_vim]["provider_network"] = vld_params["provider-network"] + target_vld["vim_info"][target_vim]["provider_network"] = vld_params[ + "provider-network" + ] if "sdn-ports" in vld_params["provider-network"] and target_sdn: - target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params["provider-network"]["sdn-ports"] + target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[ + "provider-network" + ]["sdn-ports"] if vld_params.get("wimAccountId"): target_wim = "wim:{}".format(vld_params["wimAccountId"]) target_vld["vim_info"][target_wim] = {} @@ -632,9 +782,15 @@ class NsLcm(LcmBase): if isinstance(vld_params[param], dict): for vim, vim_net in vld_params[param].items(): other_target_vim = "vim:" + vim - populate_dict(target_vld["vim_info"], (other_target_vim, param.replace("-", "_")), vim_net) + populate_dict( + target_vld["vim_info"], + (other_target_vim, param.replace("-", "_")), + vim_net, + ) else: # isinstance str - target_vld["vim_info"][target_vim][param.replace("-", "_")] = vld_params[param] + target_vld["vim_info"][target_vim][ + param.replace("-", "_") + ] = vld_params[param] if vld_params.get("common_id"): target_vld["common_id"] = vld_params.get("common_id") @@ -655,8 +811,13 @@ class NsLcm(LcmBase): if db_nslcmop.get("lcmOperationType") != "instantiate": # get parameters of instantiation: - db_nslcmop_instantiate = self.db.get_list("nslcmops", {"nsInstanceId": db_nslcmop["nsInstanceId"], - "lcmOperationType": "instantiate"})[-1] + db_nslcmop_instantiate = self.db.get_list( + "nslcmops", + { + "nsInstanceId": db_nslcmop["nsInstanceId"], + "lcmOperationType": "instantiate", + }, + )[-1] ns_params = db_nslcmop_instantiate.get("operationParams") else: ns_params = db_nslcmop.get("operationParams") @@ -674,9 +835,9 @@ class NsLcm(LcmBase): "vim_info": { target_vim: { "vim_network_name": vld.get("vim-network-name"), - "vim_account_id": ns_params["vimAccountId"] + "vim_account_id": ns_params["vimAccountId"], } - } + }, } # check if this network needs SDN assist if vld.get("pci-interfaces"): @@ -686,32 +847,53 @@ class NsLcm(LcmBase): sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"]) target_sdn = "sdn:{}".format(sdnc_id) target_vld["vim_info"][target_sdn] = { - "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")} + "sdn": True, + "target_vim": target_vim, + "vlds": [sdn_vld], + "type": vld.get("type"), + } nsd_vnf_profiles = get_vnf_profiles(nsd) for nsd_vnf_profile in nsd_vnf_profiles: for cp in nsd_vnf_profile["virtual-link-connectivity"]: if cp["virtual-link-profile-id"] == vld["id"]: - cp2target["member_vnf:{}.{}".format( - cp["constituent-cpd-id"][0]["constituent-base-element-id"], - cp["constituent-cpd-id"][0]["constituent-cpd-id"] - )] = "nsrs:{}:vld.{}".format(nsr_id, vld_index) + cp2target[ + "member_vnf:{}.{}".format( + cp["constituent-cpd-id"][0][ + "constituent-base-element-id" + ], + cp["constituent-cpd-id"][0]["constituent-cpd-id"], + ) + ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index) # check at nsd descriptor, if there is an ip-profile vld_params = {} nsd_vlp = find_in_list( get_virtual_link_profiles(nsd), - lambda a_link_profile: a_link_profile["virtual-link-desc-id"] == vld["id"]) - if nsd_vlp and nsd_vlp.get("virtual-link-protocol-data") and \ - nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"): - ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"]["l3-protocol-data"] + lambda a_link_profile: a_link_profile["virtual-link-desc-id"] + == vld["id"], + ) + if ( + nsd_vlp + and nsd_vlp.get("virtual-link-protocol-data") + and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data") + ): + ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][ + "l3-protocol-data" + ] ip_profile_dest_data = {} if "ip-version" in ip_profile_source_data: - ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"] + ip_profile_dest_data["ip-version"] = ip_profile_source_data[ + "ip-version" + ] if "cidr" in ip_profile_source_data: - ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"] + ip_profile_dest_data["subnet-address"] = ip_profile_source_data[ + "cidr" + ] if "gateway-ip" in ip_profile_source_data: - ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"] + ip_profile_dest_data["gateway-address"] = ip_profile_source_data[ + "gateway-ip" + ] if "dhcp-enabled" in ip_profile_source_data: ip_profile_dest_data["dhcp-params"] = { "enabled": ip_profile_source_data["dhcp-enabled"] @@ -719,29 +901,41 @@ class NsLcm(LcmBase): vld_params["ip-profile"] = ip_profile_dest_data # update vld_params with instantiation params - vld_instantiation_params = find_in_list(get_iterable(ns_params, "vld"), - lambda a_vld: a_vld["name"] in (vld["name"], vld["id"])) + vld_instantiation_params = find_in_list( + get_iterable(ns_params, "vld"), + lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]), + ) if vld_instantiation_params: vld_params.update(vld_instantiation_params) parse_vld_instantiation_params(target_vim, target_vld, vld_params, None) target["ns"]["vld"].append(target_vld) for vnfr in db_vnfrs.values(): - vnfd = find_in_list(db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]) - vnf_params = find_in_list(get_iterable(ns_params, "vnf"), - lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"]) + vnfd = find_in_list( + db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"] + ) + vnf_params = find_in_list( + get_iterable(ns_params, "vnf"), + lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"], + ) target_vnf = deepcopy(vnfr) target_vim = "vim:{}".format(vnfr["vim-account-id"]) for vld in target_vnf.get("vld", ()): # check if connected to a ns.vld, to fill target' - vnf_cp = find_in_list(vnfd.get("int-virtual-link-desc", ()), - lambda cpd: cpd.get("id") == vld["id"]) + vnf_cp = find_in_list( + vnfd.get("int-virtual-link-desc", ()), + lambda cpd: cpd.get("id") == vld["id"], + ) if vnf_cp: - ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"]) + ns_cp = "member_vnf:{}.{}".format( + vnfr["member-vnf-index-ref"], vnf_cp["id"] + ) if cp2target.get(ns_cp): vld["target"] = cp2target[ns_cp] - vld["vim_info"] = {target_vim: {"vim_network_name": vld.get("vim-network-name")}} + vld["vim_info"] = { + target_vim: {"vim_network_name": vld.get("vim-network-name")} + } # check if this network needs SDN assist target_sdn = None if vld.get("pci-interfaces"): @@ -751,24 +945,39 @@ class NsLcm(LcmBase): sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"]) target_sdn = "sdn:{}".format(sdnc_id) vld["vim_info"][target_sdn] = { - "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")} + "sdn": True, + "target_vim": target_vim, + "vlds": [sdn_vld], + "type": vld.get("type"), + } # check at vnfd descriptor, if there is an ip-profile vld_params = {} vnfd_vlp = find_in_list( get_virtual_link_profiles(vnfd), - lambda a_link_profile: a_link_profile["id"] == vld["id"] + lambda a_link_profile: a_link_profile["id"] == vld["id"], ) - if vnfd_vlp and vnfd_vlp.get("virtual-link-protocol-data") and \ - vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"): - ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"]["l3-protocol-data"] + if ( + vnfd_vlp + and vnfd_vlp.get("virtual-link-protocol-data") + and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data") + ): + ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][ + "l3-protocol-data" + ] ip_profile_dest_data = {} if "ip-version" in ip_profile_source_data: - ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"] + ip_profile_dest_data["ip-version"] = ip_profile_source_data[ + "ip-version" + ] if "cidr" in ip_profile_source_data: - ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"] + ip_profile_dest_data["subnet-address"] = ip_profile_source_data[ + "cidr" + ] if "gateway-ip" in ip_profile_source_data: - ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"] + ip_profile_dest_data[ + "gateway-address" + ] = ip_profile_source_data["gateway-ip"] if "dhcp-enabled" in ip_profile_source_data: ip_profile_dest_data["dhcp-params"] = { "enabled": ip_profile_source_data["dhcp-enabled"] @@ -777,8 +986,10 @@ class NsLcm(LcmBase): vld_params["ip-profile"] = ip_profile_dest_data # update vld_params with instantiation params if vnf_params: - vld_instantiation_params = find_in_list(get_iterable(vnf_params, "internal-vld"), - lambda i_vld: i_vld["name"] == vld["id"]) + vld_instantiation_params = find_in_list( + get_iterable(vnf_params, "internal-vld"), + lambda i_vld: i_vld["name"] == vld["id"], + ) if vld_instantiation_params: vld_params.update(vld_instantiation_params) parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn) @@ -794,17 +1005,28 @@ class NsLcm(LcmBase): if ssh_keys_all: vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"]) vnf_configuration = get_configuration(vnfd, vnfd["id"]) - if vdu_configuration and vdu_configuration.get("config-access") and \ - vdu_configuration.get("config-access").get("ssh-access"): + if ( + vdu_configuration + and vdu_configuration.get("config-access") + and vdu_configuration.get("config-access").get("ssh-access") + ): vdur["ssh-keys"] = ssh_keys_all - vdur["ssh-access-required"] = vdu_configuration["config-access"]["ssh-access"]["required"] - elif vnf_configuration and vnf_configuration.get("config-access") and \ - vnf_configuration.get("config-access").get("ssh-access") and \ - any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]): + vdur["ssh-access-required"] = vdu_configuration[ + "config-access" + ]["ssh-access"]["required"] + elif ( + vnf_configuration + and vnf_configuration.get("config-access") + and vnf_configuration.get("config-access").get("ssh-access") + and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]) + ): vdur["ssh-keys"] = ssh_keys_all - vdur["ssh-access-required"] = vnf_configuration["config-access"]["ssh-access"]["required"] - elif ssh_keys_instantiation and \ - find_in_list(vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")): + vdur["ssh-access-required"] = vnf_configuration[ + "config-access" + ]["ssh-access"]["required"] + elif ssh_keys_instantiation and find_in_list( + vdur["interfaces"], lambda iface: iface.get("mgmt-vnf") + ): vdur["ssh-keys"] = ssh_keys_instantiation self.logger.debug("NS > vdur > {}".format(vdur)) @@ -812,21 +1034,36 @@ class NsLcm(LcmBase): vdud = get_vdu(vnfd, vdur["vdu-id-ref"]) # cloud-init if vdud.get("cloud-init-file"): - vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file")) + vdur["cloud-init"] = "{}:file:{}".format( + vnfd["_id"], vdud.get("cloud-init-file") + ) # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system if vdur["cloud-init"] not in target["cloud_init_content"]: base_folder = vnfd["_admin"]["storage"] - cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], - vdud.get("cloud-init-file")) + cloud_init_file = "{}/{}/cloud_init/{}".format( + base_folder["folder"], + base_folder["pkg-dir"], + vdud.get("cloud-init-file"), + ) with self.fs.file_open(cloud_init_file, "r") as ci_file: - target["cloud_init_content"][vdur["cloud-init"]] = ci_file.read() + target["cloud_init_content"][ + vdur["cloud-init"] + ] = ci_file.read() elif vdud.get("cloud-init"): - vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])) + vdur["cloud-init"] = "{}:vdu:{}".format( + vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"]) + ) # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor - target["cloud_init_content"][vdur["cloud-init"]] = vdud["cloud-init"] + target["cloud_init_content"][vdur["cloud-init"]] = vdud[ + "cloud-init" + ] vdur["additionalParams"] = vdur.get("additionalParams") or {} - deploy_params_vdu = self._format_additional_params(vdur.get("additionalParams") or {}) - deploy_params_vdu["OSM"] = get_osm_params(vnfr, vdur["vdu-id-ref"], vdur["count-index"]) + deploy_params_vdu = self._format_additional_params( + vdur.get("additionalParams") or {} + ) + deploy_params_vdu["OSM"] = get_osm_params( + vnfr, vdur["vdu-id-ref"], vdur["count-index"] + ) vdur["additionalParams"] = deploy_params_vdu # flavor @@ -845,7 +1082,9 @@ class NsLcm(LcmBase): ns_alt_image = target["image"][int(alt_image_id)] if vim_type == ns_alt_image.get("vim-type"): # must use alternative image - self.logger.debug("use alternative image id: {}".format(alt_image_id)) + self.logger.debug( + "use alternative image id: {}".format(alt_image_id) + ) ns_image_id = alt_image_id vdur["ns-image-id"] = ns_image_id break @@ -865,20 +1104,32 @@ class NsLcm(LcmBase): desc = await self.RO.deploy(nsr_id, target) self.logger.debug("RO return > {}".format(desc)) action_id = desc["action_id"] - await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage) + await self._wait_ng_ro( + nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage + ) # Updating NSR db_nsr_update = { "_admin.deployed.RO.operational-status": "running", - "detailed-status": " ".join(stage) + "detailed-status": " ".join(stage), } # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM" self.update_db_2("nsrs", nsr_id, db_nsr_update) self._write_op_status(nslcmop_id, stage) - self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id)) + self.logger.debug( + logging_text + "ns deployed at RO. RO_id={}".format(action_id) + ) return - async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id=None, start_time=None, timeout=600, stage=None): + async def _wait_ng_ro( + self, + nsr_id, + action_id, + nslcmop_id=None, + start_time=None, + timeout=600, + stage=None, + ): detailed_status_old = None db_nsr_update = {} start_time = start_time or time() @@ -895,7 +1146,9 @@ class NsLcm(LcmBase): stage[2] = "Deployed at VIM" break else: - assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"]) + assert False, "ROclient.check_ns_status returns unknown {}".format( + desc_status["status"] + ) if stage and nslcmop_id and stage[2] != detailed_status_old: detailed_status_old = stage[2] db_nsr_update["detailed-status"] = " ".join(stage) @@ -905,7 +1158,9 @@ class NsLcm(LcmBase): else: # timeout_ns_deploy raise NgRoException("Timeout waiting ns to deploy") - async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage): + async def _terminate_ng_ro( + self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage + ): db_nsr_update = {} failed_detail = [] action_id = None @@ -916,17 +1171,22 @@ class NsLcm(LcmBase): "vnf": [], "image": [], "flavor": [], - "action_id": nslcmop_id + "action_id": nslcmop_id, } desc = await self.RO.deploy(nsr_id, target) action_id = desc["action_id"] db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING" - self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id)) + self.logger.debug( + logging_text + + "ns terminate action at RO. action_id={}".format(action_id) + ) # wait until done delete_timeout = 20 * 60 # 20 minutes - await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage) + await self._wait_ng_ro( + nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage + ) db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" @@ -937,13 +1197,21 @@ class NsLcm(LcmBase): db_nsr_update["_admin.deployed.RO.nsr_id"] = None db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None - self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id)) + self.logger.debug( + logging_text + "RO_action_id={} already deleted".format(action_id) + ) elif isinstance(e, NgRoException) and e.http_code == 409: # conflict failed_detail.append("delete conflict: {}".format(e)) - self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e)) + self.logger.debug( + logging_text + + "RO_action_id={} delete conflict: {}".format(action_id, e) + ) else: failed_detail.append("delete error: {}".format(e)) - self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e)) + self.logger.error( + logging_text + + "RO_action_id={} delete error: {}".format(action_id, e) + ) if failed_detail: stage[2] = "Error deleting from VIM" @@ -957,8 +1225,18 @@ class NsLcm(LcmBase): raise LcmException("; ".join(failed_detail)) return - async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds, - n2vc_key_list, stage): + async def instantiate_RO( + self, + logging_text, + nsr_id, + nsd, + db_nsr, + db_nslcmop, + db_vnfrs, + db_vnfds, + n2vc_key_list, + stage, + ): """ Instantiate at RO :param logging_text: preffix text to use at logging @@ -978,7 +1256,9 @@ class NsLcm(LcmBase): if ns_params and ns_params.get("timeout_ns_deploy"): timeout_ns_deploy = ns_params["timeout_ns_deploy"] else: - timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy) + timeout_ns_deploy = self.timeout.get( + "ns_deploy", self.timeout_ns_deploy + ) # Check for and optionally request placement optimization. Database will be updated if placement activated stage[2] = "Waiting for Placement." @@ -990,14 +1270,34 @@ class NsLcm(LcmBase): else: ns_params["vimAccountId"] == vnfr["vim-account-id"] - return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, - db_vnfds, n2vc_key_list, stage, start_deploy, timeout_ns_deploy) + return await self._instantiate_ng_ro( + logging_text, + nsr_id, + nsd, + db_nsr, + db_nslcmop, + db_vnfrs, + db_vnfds, + n2vc_key_list, + stage, + start_deploy, + timeout_ns_deploy, + ) except Exception as e: stage[2] = "ERROR deploying at VIM" self.set_vnfr_at_error(db_vnfrs, str(e)) - self.logger.error("Error deploying at VIM {}".format(e), - exc_info=not isinstance(e, (ROclient.ROClientException, LcmException, DbException, - NgRoException))) + self.logger.error( + "Error deploying at VIM {}".format(e), + exc_info=not isinstance( + e, + ( + ROclient.ROClientException, + LcmException, + DbException, + NgRoException, + ), + ), + ) raise async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name): @@ -1015,20 +1315,33 @@ class NsLcm(LcmBase): while nb_tries < 360: db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}) - kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("kdu-name") == kdu_name), None) + kdur = next( + ( + x + for x in get_iterable(db_vnfr, "kdur") + if x.get("kdu-name") == kdu_name + ), + None, + ) if not kdur: - raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)) + raise LcmException( + "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name) + ) if kdur.get("status"): if kdur["status"] in ("READY", "ENABLED"): return kdur.get("ip-address") else: - raise LcmException("target KDU={} is in error state".format(kdu_name)) + raise LcmException( + "target KDU={} is in error state".format(kdu_name) + ) await asyncio.sleep(10, loop=self.loop) nb_tries += 1 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name)) - async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None): + async def wait_vm_up_insert_key_ro( + self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None + ): """ Wait for ip addres at RO, and optionally, insert public key in virtual machine :param logging_text: prefix use for logging @@ -1052,7 +1365,9 @@ class NsLcm(LcmBase): ro_retries += 1 if ro_retries >= 360: # 1 hour - raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)) + raise LcmException( + "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id) + ) await asyncio.sleep(10, loop=self.loop) @@ -1062,33 +1377,62 @@ class NsLcm(LcmBase): if not vdu_id: # for the VNF case if db_vnfr.get("status") == "ERROR": - raise LcmException("Cannot inject ssh-key because target VNF is in error state") + raise LcmException( + "Cannot inject ssh-key because target VNF is in error state" + ) ip_address = db_vnfr.get("ip-address") if not ip_address: continue - vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None) + vdur = next( + ( + x + for x in get_iterable(db_vnfr, "vdur") + if x.get("ip-address") == ip_address + ), + None, + ) else: # VDU case - vdur = next((x for x in get_iterable(db_vnfr, "vdur") - if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None) + vdur = next( + ( + x + for x in get_iterable(db_vnfr, "vdur") + if x.get("vdu-id-ref") == vdu_id + and x.get("count-index") == vdu_index + ), + None, + ) - if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu + if ( + not vdur and len(db_vnfr.get("vdur", ())) == 1 + ): # If only one, this should be the target vdu vdur = db_vnfr["vdur"][0] if not vdur: - raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id, - vdu_index)) + raise LcmException( + "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format( + vnfr_id, vdu_id, vdu_index + ) + ) # New generation RO stores information at "vim_info" ng_ro_status = None target_vim = None if vdur.get("vim_info"): - target_vim = next(t for t in vdur["vim_info"]) # there should be only one key + target_vim = next( + t for t in vdur["vim_info"] + ) # there should be only one key ng_ro_status = vdur["vim_info"][target_vim].get("vim_status") - if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE" or ng_ro_status == "ACTIVE": + if ( + vdur.get("pdu-type") + or vdur.get("status") == "ACTIVE" + or ng_ro_status == "ACTIVE" + ): ip_address = vdur.get("ip-address") if not ip_address: continue target_vdu_id = vdur["vdu-id-ref"] elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR": - raise LcmException("Cannot inject ssh-key because target VM is in error state") + raise LcmException( + "Cannot inject ssh-key because target VM is in error state" + ) if not target_vdu_id: continue @@ -1101,11 +1445,18 @@ class NsLcm(LcmBase): self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU") return ip_address try: - ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index + ro_vm_id = "{}-{}".format( + db_vnfr["member-vnf-index-ref"], target_vdu_id + ) # TODO add vdu_index if self.ng_ro: - target = {"action": {"action": "inject_ssh_key", "key": pub_key, "user": user}, - "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}], - } + target = { + "action": { + "action": "inject_ssh_key", + "key": pub_key, + "user": user, + }, + "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}], + } desc = await self.RO.deploy(nsr_id, target) action_id = desc["action_id"] await self._wait_ng_ro(nsr_id, action_id, timeout=600) @@ -1114,33 +1465,52 @@ class NsLcm(LcmBase): # wait until NS is deployed at RO if not ro_nsr_id: db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id}) - ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id")) + ro_nsr_id = deep_get( + db_nsrs, ("_admin", "deployed", "RO", "nsr_id") + ) if not ro_nsr_id: continue result_dict = await self.RO.create_action( item="ns", item_id_name=ro_nsr_id, - descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user} + descriptor={ + "add_public_key": pub_key, + "vms": [ro_vm_id], + "user": user, + }, ) # result_dict contains the format {VM-id: {vim_result: 200, description: text}} if not result_dict or not isinstance(result_dict, dict): - raise LcmException("Unknown response from RO when injecting key") + raise LcmException( + "Unknown response from RO when injecting key" + ) for result in result_dict.values(): if result.get("vim_result") == 200: break else: - raise ROclient.ROClientException("error injecting key: {}".format( - result.get("description"))) + raise ROclient.ROClientException( + "error injecting key: {}".format( + result.get("description") + ) + ) break except NgRoException as e: - raise LcmException("Reaching max tries injecting key. Error: {}".format(e)) + raise LcmException( + "Reaching max tries injecting key. Error: {}".format(e) + ) except ROclient.ROClientException as e: if not nb_tries: - self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds". - format(e, 20*10)) + self.logger.debug( + logging_text + + "error injecting key: {}. Retrying until {} seconds".format( + e, 20 * 10 + ) + ) nb_tries += 1 if nb_tries >= 20: - raise LcmException("Reaching max tries injecting key. Error: {}".format(e)) + raise LcmException( + "Reaching max tries injecting key. Error: {}".format(e) + ) else: break @@ -1163,13 +1533,17 @@ class NsLcm(LcmBase): if index == vca_index: # myself continue - if not my_vca.get("member-vnf-index") or \ - (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")): + if not my_vca.get("member-vnf-index") or ( + vca_deployed.get("member-vnf-index") + == my_vca.get("member-vnf-index") + ): internal_status = configuration_status_list[index].get("status") - if internal_status == 'READY': + if internal_status == "READY": continue - elif internal_status == 'BROKEN': - raise LcmException("Configuration aborted because dependent charm/s has failed") + elif internal_status == "BROKEN": + raise LcmException( + "Configuration aborted because dependent charm/s has failed" + ) else: break else: @@ -1181,28 +1555,43 @@ class NsLcm(LcmBase): raise LcmException("Configuration aborted because dependent charm/s timeout") def get_vca_id(self, db_vnfr: dict, db_nsr: dict): - return ( - deep_get(db_vnfr, ("vca-id",)) or - deep_get(db_nsr, ("instantiate_params", "vcaId")) + return deep_get(db_vnfr, ("vca-id",)) or deep_get( + db_nsr, ("instantiate_params", "vcaId") ) - async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index, - config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name, - ee_config_descriptor): + async def instantiate_N2VC( + self, + logging_text, + vca_index, + nsi_id, + db_nsr, + db_vnfr, + vdu_id, + kdu_name, + vdu_index, + config_descriptor, + deploy_params, + base_folder, + nslcmop_id, + stage, + vca_type, + vca_name, + ee_config_descriptor, + ): nsr_id = db_nsr["_id"] db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index) vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"] vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index] osm_config = {"osm": {"ns_id": db_nsr["_id"]}} db_dict = { - 'collection': 'nsrs', - 'filter': {'_id': nsr_id}, - 'path': db_update_entry + "collection": "nsrs", + "filter": {"_id": nsr_id}, + "path": db_update_entry, } step = "" try: - element_type = 'NS' + element_type = "NS" element_under_configuration = nsr_id vnfr_id = None @@ -1210,22 +1599,20 @@ class NsLcm(LcmBase): vnfr_id = db_vnfr["_id"] osm_config["osm"]["vnf_id"] = vnfr_id - namespace = "{nsi}.{ns}".format( - nsi=nsi_id if nsi_id else "", - ns=nsr_id) + namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id) if vnfr_id: - element_type = 'VNF' + element_type = "VNF" element_under_configuration = vnfr_id namespace += ".{}-{}".format(vnfr_id, vdu_index or 0) if vdu_id: namespace += ".{}-{}".format(vdu_id, vdu_index or 0) - element_type = 'VDU' + element_type = "VDU" element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0) osm_config["osm"]["vdu_id"] = vdu_id elif kdu_name: namespace += ".{}.{}".format(kdu_name, vdu_index or 0) - element_type = 'KDU' + element_type = "KDU" element_under_configuration = kdu_name osm_config["osm"]["kdu_name"] = kdu_name @@ -1233,24 +1620,37 @@ class NsLcm(LcmBase): artifact_path = "{}/{}/{}/{}".format( base_folder["folder"], base_folder["pkg-dir"], - "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts", - vca_name + "charms" + if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") + else "helm-charts", + vca_name, ) self.logger.debug("Artifact path > {}".format(artifact_path)) # get initial_config_primitive_list that applies to this element - initial_config_primitive_list = config_descriptor.get('initial-config-primitive') + initial_config_primitive_list = config_descriptor.get( + "initial-config-primitive" + ) - self.logger.debug("Initial config primitive list > {}".format(initial_config_primitive_list)) + self.logger.debug( + "Initial config primitive list > {}".format( + initial_config_primitive_list + ) + ) # add config if not present for NS charm ee_descriptor_id = ee_config_descriptor.get("id") self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id)) - initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(initial_config_primitive_list, - vca_deployed, ee_descriptor_id) + initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list( + initial_config_primitive_list, vca_deployed, ee_descriptor_id + ) - self.logger.debug("Initial config primitive list #2 > {}".format(initial_config_primitive_list)) + self.logger.debug( + "Initial config primitive list #2 > {}".format( + initial_config_primitive_list + ) + ) # n2vc_redesign STEP 3.1 # find old ee_id if exists ee_id = vca_deployed.get("ee_id") @@ -1262,35 +1662,39 @@ class NsLcm(LcmBase): self._write_configuration_status( nsr_id=nsr_id, vca_index=vca_index, - status='CREATING', + status="CREATING", element_under_configuration=element_under_configuration, - element_type=element_type + element_type=element_type, ) step = "create execution environment" - self.logger.debug(logging_text + step) + self.logger.debug(logging_text + step) ee_id = None credentials = None if vca_type == "k8s_proxy_charm": ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm( - charm_name=artifact_path[artifact_path.rfind("/") + 1:], + charm_name=artifact_path[artifact_path.rfind("/") + 1 :], namespace=namespace, artifact_path=artifact_path, db_dict=db_dict, vca_id=vca_id, ) - elif vca_type == "helm" or vca_type == "helm-v3": - ee_id, credentials = await self.vca_map[vca_type].create_execution_environment( + elif vca_type == "helm" or vca_type == "helm-v3": + ee_id, credentials = await self.vca_map[ + vca_type + ].create_execution_environment( namespace=namespace, reuse_ee_id=ee_id, db_dict=db_dict, config=osm_config, artifact_path=artifact_path, - vca_type=vca_type + vca_type=vca_type, ) - else: - ee_id, credentials = await self.vca_map[vca_type].create_execution_environment( + else: + ee_id, credentials = await self.vca_map[ + vca_type + ].create_execution_environment( namespace=namespace, reuse_ee_id=ee_id, db_dict=db_dict, @@ -1300,11 +1704,20 @@ class NsLcm(LcmBase): elif vca_type == "native_charm": step = "Waiting to VM being up and getting IP address" self.logger.debug(logging_text + step) - rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, - user=None, pub_key=None) + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro( + logging_text, + nsr_id, + vnfr_id, + vdu_id, + vdu_index, + user=None, + pub_key=None, + ) credentials = {"hostname": rw_mgmt_ip} # get username - username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) + username = deep_get( + config_descriptor, ("config-access", "ssh-access", "default-user") + ) # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were # merged. Meanwhile let's get username from initial-config-primitive if not username and initial_config_primitive_list: @@ -1314,17 +1727,19 @@ class NsLcm(LcmBase): username = param["value"] break if not username: - raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with " - "'config-access.ssh-access.default-user'") + raise LcmException( + "Cannot determine the username neither with 'initial-config-primitive' nor with " + "'config-access.ssh-access.default-user'" + ) credentials["username"] = username # n2vc_redesign STEP 3.2 self._write_configuration_status( nsr_id=nsr_id, vca_index=vca_index, - status='REGISTERING', + status="REGISTERING", element_under_configuration=element_under_configuration, - element_type=element_type + element_type=element_type, ) step = "register execution environment {}".format(credentials) @@ -1338,7 +1753,7 @@ class NsLcm(LcmBase): # for compatibility with MON/POL modules, the need model and application name at database # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name" - ee_id_parts = ee_id.split('.') + ee_id_parts = ee_id.split(".") db_nsr_update = {db_update_entry + "ee_id": ee_id} if len(ee_id_parts) >= 2: model_name = ee_id_parts[0] @@ -1352,22 +1767,23 @@ class NsLcm(LcmBase): self._write_configuration_status( nsr_id=nsr_id, vca_index=vca_index, - status='INSTALLING SW', + status="INSTALLING SW", element_under_configuration=element_under_configuration, element_type=element_type, - other_update=db_nsr_update + other_update=db_nsr_update, ) # TODO check if already done self.logger.debug(logging_text + step) config = None if vca_type == "native_charm": - config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None) + config_primitive = next( + (p for p in initial_config_primitive_list if p["name"] == "config"), + None, + ) if config_primitive: config = self._map_primitive_params( - config_primitive, - {}, - deploy_params + config_primitive, {}, deploy_params ) num_units = 1 if vca_type == "lxc_proxy_charm": @@ -1391,11 +1807,18 @@ class NsLcm(LcmBase): ) # write in db flag of configuration_sw already installed - self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}) + self.update_db_2( + "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True} + ) # add relations for this VCA (wait for other peers related with this VCA) - await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id, - vca_index=vca_index, vca_id=vca_id, vca_type=vca_type) + await self._add_vca_relations( + logging_text=logging_text, + nsr_id=nsr_id, + vca_index=vca_index, + vca_id=vca_id, + vca_type=vca_type, + ) # if SSH access is required, then get execution environment SSH public # if native charm we have waited already to VM be UP @@ -1403,18 +1826,23 @@ class NsLcm(LcmBase): pub_key = None user = None # self.logger.debug("get ssh key block") - if deep_get(config_descriptor, ("config-access", "ssh-access", "required")): + if deep_get( + config_descriptor, ("config-access", "ssh-access", "required") + ): # self.logger.debug("ssh key needed") # Needed to inject a ssh key - user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) + user = deep_get( + config_descriptor, + ("config-access", "ssh-access", "default-user"), + ) step = "Install configuration Software, getting public ssh key" pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key( - ee_id=ee_id, - db_dict=db_dict, - vca_id=vca_id + ee_id=ee_id, db_dict=db_dict, vca_id=vca_id ) - step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key) + step = "Insert public key into VM user={} ssh_key={}".format( + user, pub_key + ) else: # self.logger.debug("no need to get ssh key") step = "Waiting to VM being up and getting IP address" @@ -1424,20 +1852,29 @@ class NsLcm(LcmBase): # wait for RO (ip-address) Insert pub_key into VM if vnfr_id: if kdu_name: - rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name) + rw_mgmt_ip = await self.wait_kdu_up( + logging_text, nsr_id, vnfr_id, kdu_name + ) else: - rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, - vdu_index, user=user, pub_key=pub_key) + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro( + logging_text, + nsr_id, + vnfr_id, + vdu_id, + vdu_index, + user=user, + pub_key=pub_key, + ) else: - rw_mgmt_ip = None # This is for a NS configuration + rw_mgmt_ip = None # This is for a NS configuration - self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip)) + self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip)) # store rw_mgmt_ip in deploy params for later replacement deploy_params["rw_mgmt_ip"] = rw_mgmt_ip # n2vc_redesign STEP 6 Execute initial config primitive - step = 'execute initial config primitive' + step = "execute initial config primitive" # wait for dependent primitives execution (NS -> VNF -> VDU) if initial_config_primitive_list: @@ -1447,34 +1884,35 @@ class NsLcm(LcmBase): my_vca = vca_deployed_list[vca_index] if my_vca.get("vdu_id") or my_vca.get("kdu_name"): # VDU or KDU - stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.' + stage[0] = "Stage 3/5: running Day-1 primitives for VDU." elif my_vca.get("member-vnf-index"): # VNF - stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.' + stage[0] = "Stage 4/5: running Day-1 primitives for VNF." else: # NS - stage[0] = 'Stage 5/5: running Day-1 primitives for NS.' + stage[0] = "Stage 5/5: running Day-1 primitives for NS." self._write_configuration_status( - nsr_id=nsr_id, - vca_index=vca_index, - status='EXECUTING PRIMITIVE' + nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE" ) - self._write_op_status( - op_id=nslcmop_id, - stage=stage - ) + self._write_op_status(op_id=nslcmop_id, stage=stage) check_if_terminated_needed = True for initial_config_primitive in initial_config_primitive_list: # adding information on the vca_deployed if it is a NS execution environment if not vca_deployed["member-vnf-index"]: - deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id)) + deploy_params["ns_config_info"] = json.dumps( + self._get_ns_config_info(nsr_id) + ) # TODO check if already done - primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params) + primitive_params_ = self._map_primitive_params( + initial_config_primitive, {}, deploy_params + ) - step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_) + step = "execute primitive '{}' params '{}'".format( + initial_config_primitive["name"], primitive_params_ + ) self.logger.debug(logging_text + step) await self.vca_map[vca_type].exec_primitive( ee_id=ee_id, @@ -1485,8 +1923,10 @@ class NsLcm(LcmBase): ) # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives if check_if_terminated_needed: - if config_descriptor.get('terminate-config-primitive'): - self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True}) + if config_descriptor.get("terminate-config-primitive"): + self.update_db_2( + "nsrs", nsr_id, {db_update_entry + "needed_terminate": True} + ) check_if_terminated_needed = False # TODO register in database that primitive is done @@ -1502,30 +1942,42 @@ class NsLcm(LcmBase): target_ip=rw_mgmt_ip, ) if prometheus_jobs: - self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs}) + self.update_db_2( + "nsrs", + nsr_id, + {db_update_entry + "prometheus_jobs": prometheus_jobs}, + ) step = "instantiated at VCA" self.logger.debug(logging_text + step) self._write_configuration_status( - nsr_id=nsr_id, - vca_index=vca_index, - status='READY' + nsr_id=nsr_id, vca_index=vca_index, status="READY" ) except Exception as e: # TODO not use Exception but N2VC exception # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"}) - if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)): - self.logger.error("Exception while {} : {}".format(step, e), exc_info=True) + if not isinstance( + e, (DbException, N2VCException, LcmException, asyncio.CancelledError) + ): + self.logger.error( + "Exception while {} : {}".format(step, e), exc_info=True + ) self._write_configuration_status( - nsr_id=nsr_id, - vca_index=vca_index, - status='BROKEN' + nsr_id=nsr_id, vca_index=vca_index, status="BROKEN" ) raise LcmException("{} {}".format(step, e)) from e - def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str, - error_description: str = None, error_detail: str = None, other_update: dict = None): + def _write_ns_status( + self, + nsr_id: str, + ns_state: str, + current_operation: str, + current_operation_id: str, + error_description: str = None, + error_detail: str = None, + other_update: dict = None, + ): """ Update db_nsr fields. :param nsr_id: @@ -1539,9 +1991,13 @@ class NsLcm(LcmBase): """ try: db_dict = other_update or {} - db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility + db_dict[ + "_admin.nslcmop" + ] = current_operation_id # for backward compatibility db_dict["_admin.current-operation"] = current_operation_id - db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None + db_dict["_admin.operation-type"] = ( + current_operation if current_operation != "IDLE" else None + ) db_dict["currentOperation"] = current_operation db_dict["currentOperationID"] = current_operation_id db_dict["errorDescription"] = error_description @@ -1551,62 +2007,87 @@ class NsLcm(LcmBase): db_dict["nsState"] = ns_state self.update_db_2("nsrs", nsr_id, db_dict) except DbException as e: - self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e)) + self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e)) - def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0, - operation_state: str = None, other_update: dict = None): + def _write_op_status( + self, + op_id: str, + stage: list = None, + error_message: str = None, + queuePosition: int = 0, + operation_state: str = None, + other_update: dict = None, + ): try: db_dict = other_update or {} - db_dict['queuePosition'] = queuePosition + db_dict["queuePosition"] = queuePosition if isinstance(stage, list): - db_dict['stage'] = stage[0] - db_dict['detailed-status'] = " ".join(stage) + db_dict["stage"] = stage[0] + db_dict["detailed-status"] = " ".join(stage) elif stage is not None: - db_dict['stage'] = str(stage) + db_dict["stage"] = str(stage) if error_message is not None: - db_dict['errorMessage'] = error_message + db_dict["errorMessage"] = error_message if operation_state is not None: - db_dict['operationState'] = operation_state + db_dict["operationState"] = operation_state db_dict["statusEnteredTime"] = time() self.update_db_2("nslcmops", op_id, db_dict) except DbException as e: - self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e)) + self.logger.warn( + "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e) + ) def _write_all_config_status(self, db_nsr: dict, status: str): try: nsr_id = db_nsr["_id"] # configurationStatus - config_status = db_nsr.get('configurationStatus') + config_status = db_nsr.get("configurationStatus") if config_status: - db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in - enumerate(config_status) if v} + db_nsr_update = { + "configurationStatus.{}.status".format(index): status + for index, v in enumerate(config_status) + if v + } # update status self.update_db_2("nsrs", nsr_id, db_nsr_update) except DbException as e: - self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e)) + self.logger.warn( + "Error writing all configuration status, ns={}: {}".format(nsr_id, e) + ) - def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None, - element_under_configuration: str = None, element_type: str = None, - other_update: dict = None): + def _write_configuration_status( + self, + nsr_id: str, + vca_index: int, + status: str = None, + element_under_configuration: str = None, + element_type: str = None, + other_update: dict = None, + ): # self.logger.debug('_write_configuration_status(): vca_index={}, status={}' # .format(vca_index, status)) try: - db_path = 'configurationStatus.{}.'.format(vca_index) + db_path = "configurationStatus.{}.".format(vca_index) db_dict = other_update or {} if status: - db_dict[db_path + 'status'] = status + db_dict[db_path + "status"] = status if element_under_configuration: - db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration + db_dict[ + db_path + "elementUnderConfiguration" + ] = element_under_configuration if element_type: - db_dict[db_path + 'elementType'] = element_type + db_dict[db_path + "elementType"] = element_type self.update_db_2("nsrs", nsr_id, db_dict) except DbException as e: - self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}' - .format(status, nsr_id, vca_index, e)) + self.logger.warn( + "Error writing configuration status={}, ns={}, vca_index={}: {}".format( + status, nsr_id, vca_index, e + ) + ) async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs): """ @@ -1620,11 +2101,15 @@ class NsLcm(LcmBase): computed 'vim-account-id' """ modified = False - nslcmop_id = db_nslcmop['_id'] - placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine')) + nslcmop_id = db_nslcmop["_id"] + placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine")) if placement_engine == "PLA": - self.logger.debug(logging_text + "Invoke and wait for placement optimization") - await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop) + self.logger.debug( + logging_text + "Invoke and wait for placement optimization" + ) + await self.msg.aiowrite( + "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop + ) db_poll_interval = 5 wait = db_poll_interval * 10 pla_result = None @@ -1632,27 +2117,35 @@ class NsLcm(LcmBase): await asyncio.sleep(db_poll_interval) wait -= db_poll_interval db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) - pla_result = deep_get(db_nslcmop, ('_admin', 'pla')) + pla_result = deep_get(db_nslcmop, ("_admin", "pla")) if not pla_result: - raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id)) + raise LcmException( + "Placement timeout for nslcmopId={}".format(nslcmop_id) + ) - for pla_vnf in pla_result['vnf']: - vnfr = db_vnfrs.get(pla_vnf['member-vnf-index']) - if not pla_vnf.get('vimAccountId') or not vnfr: + for pla_vnf in pla_result["vnf"]: + vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"]) + if not pla_vnf.get("vimAccountId") or not vnfr: continue modified = True - self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']}) + self.db.set_one( + "vnfrs", + {"_id": vnfr["_id"]}, + {"vim-account-id": pla_vnf["vimAccountId"]}, + ) # Modifies db_vnfrs - vnfr["vim-account-id"] = pla_vnf['vimAccountId'] + vnfr["vim-account-id"] = pla_vnf["vimAccountId"] return modified def update_nsrs_with_pla_result(self, params): try: - nslcmop_id = deep_get(params, ('placement', 'nslcmopId')) - self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')}) + nslcmop_id = deep_get(params, ("placement", "nslcmopId")) + self.update_db_2( + "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")} + ) except Exception as e: - self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e)) + self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e)) async def instantiate(self, nsr_id, nslcmop_id): """ @@ -1663,9 +2156,11 @@ class NsLcm(LcmBase): """ # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id) + task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id) if not task_is_locked_by_me: - self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id)) + self.logger.debug( + "instantiate() task is not locked by me, ns={}".format(nsr_id) + ) return logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id) @@ -1685,16 +2180,20 @@ class NsLcm(LcmBase): db_nslcmop_update = {} nslcmop_operation_state = None - db_vnfrs = {} # vnf's info indexed by member-index + db_vnfrs = {} # vnf's info indexed by member-index # n2vc_info = {} tasks_dict_info = {} # from task to info text exc = None error_list = [] - stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""] + stage = [ + "Stage 1/5: preparation of the environment.", + "Waiting for previous operations to terminate.", + "", + ] # ^ stage, step, VIM progress try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) + await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id) # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds) stage[1] = "Reading from database." @@ -1706,13 +2205,9 @@ class NsLcm(LcmBase): ns_state="BUILDING", current_operation="INSTANTIATING", current_operation_id=nslcmop_id, - other_update=db_nsr_update - ) - self._write_op_status( - op_id=nslcmop_id, - stage=stage, - queuePosition=0 + other_update=db_nsr_update, ) + self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0) # read from db: operation stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id) @@ -1721,7 +2216,9 @@ class NsLcm(LcmBase): if ns_params and ns_params.get("timeout_ns_deploy"): timeout_ns_deploy = ns_params["timeout_ns_deploy"] else: - timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy) + timeout_ns_deploy = self.timeout.get( + "ns_deploy", self.timeout_ns_deploy + ) # read from db: ns stage[1] = "Getting nsr={} from db.".format(nsr_id) @@ -1738,7 +2235,7 @@ class NsLcm(LcmBase): db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) # read from db: vnfd's for every vnf - db_vnfds = [] # every vnfd data + db_vnfds = [] # every vnfd data # for each vnf in ns, read vnfd for vnfr in db_vnfrs_list: @@ -1750,7 +2247,9 @@ class NsLcm(LcmBase): # if we haven't this vnfd, read it from db if vnfd_id not in db_vnfds: # read from db - stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref) + stage[1] = "Getting vnfd={} id='{}' from db.".format( + vnfd_id, vnfd_ref + ) self.logger.debug(logging_text + stage[1]) vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) @@ -1774,21 +2273,22 @@ class NsLcm(LcmBase): db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list) - if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list): + if not isinstance( + deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list + ): populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), []) db_nsr_update["_admin.deployed.RO.vnfd"] = [] # set state to INSTANTIATED. When instantiated NBI will not delete directly db_nsr_update["_admin.nsState"] = "INSTANTIATED" self.update_db_2("nsrs", nsr_id, db_nsr_update) - self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}) + self.db.set_list( + "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"} + ) # n2vc_redesign STEP 2 Deploy Network Scenario - stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.' - self._write_op_status( - op_id=nslcmop_id, - stage=stage - ) + stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments." + self._write_op_status(op_id=nslcmop_id, stage=stage) stage[1] = "Deploying KDUs." # self.logger.debug(logging_text + "Before deploy_kdus") @@ -1821,7 +2321,7 @@ class NsLcm(LcmBase): db_vnfrs=db_vnfrs, db_vnfds=db_vnfds, n2vc_key_list=n2vc_key_list, - stage=stage + stage=stage, ) ) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro) @@ -1846,12 +2346,15 @@ class NsLcm(LcmBase): # Get additional parameters deploy_params = {"OSM": get_osm_params(db_vnfr)} if db_vnfr.get("additionalParamsForVnf"): - deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())) + deploy_params.update( + parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy()) + ) descriptor_config = get_configuration(vnfd, vnfd["id"]) if descriptor_config: self._deploy_n2vc( - logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), + logging_text=logging_text + + "member_vnf_index={} ".format(member_vnf_index), db_nsr=db_nsr, db_vnfr=db_vnfr, nslcmop_id=nslcmop_id, @@ -1867,32 +2370,42 @@ class NsLcm(LcmBase): descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_info=tasks_dict_info, - stage=stage + stage=stage, ) # Deploy charms for each VDU that supports one. for vdud in get_vdu_list(vnfd): vdu_id = vdud["id"] descriptor_config = get_configuration(vnfd, vdu_id) - vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id) + vdur = find_in_list( + db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id + ) if vdur.get("additionalParams"): deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"]) else: deploy_params_vdu = deploy_params - deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=0) - vdud_count = get_vdu_profile(vnfd, vdu_id).get("max-number-of-instances", 1) + deploy_params_vdu["OSM"] = get_osm_params( + db_vnfr, vdu_id, vdu_count_index=0 + ) + vdud_count = get_vdu_profile(vnfd, vdu_id).get( + "max-number-of-instances", 1 + ) self.logger.debug("VDUD > {}".format(vdud)) - self.logger.debug("Descriptor config > {}".format(descriptor_config)) + self.logger.debug( + "Descriptor config > {}".format(descriptor_config) + ) if descriptor_config: vdu_name = None kdu_name = None for vdu_index in range(vdud_count): # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"] self._deploy_n2vc( - logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( - member_vnf_index, vdu_id, vdu_index), + logging_text=logging_text + + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index + ), db_nsr=db_nsr, db_vnfr=db_vnfr, nslcmop_id=nslcmop_id, @@ -1908,7 +2421,7 @@ class NsLcm(LcmBase): descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_info=tasks_dict_info, - stage=stage + stage=stage, ) for kdud in get_kdu_list(vnfd): kdu_name = kdud["name"] @@ -1917,10 +2430,14 @@ class NsLcm(LcmBase): vdu_id = None vdu_index = 0 vdu_name = None - kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name) + kdur = next( + x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name + ) deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} if kdur.get("additionalParams"): - deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"]) + deploy_params_kdu = parse_yaml_strings( + kdur["additionalParams"] + ) self._deploy_n2vc( logging_text=logging_text, @@ -1939,7 +2456,7 @@ class NsLcm(LcmBase): descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_info=tasks_dict_info, - stage=stage + stage=stage, ) # Check if this NS has a charm configuration @@ -1956,7 +2473,9 @@ class NsLcm(LcmBase): # Get additional parameters deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}} if db_nsr.get("additionalParamsForNs"): - deploy_params.update(parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())) + deploy_params.update( + parse_yaml_strings(db_nsr["additionalParamsForNs"].copy()) + ) base_folder = nsd["_admin"]["storage"] self._deploy_n2vc( logging_text=logging_text, @@ -1975,20 +2494,32 @@ class NsLcm(LcmBase): descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_info=tasks_dict_info, - stage=stage + stage=stage, ) # rest of staff will be done at finally - except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e: - self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e)) + except ( + ROclient.ROClientException, + DbException, + LcmException, + N2VCException, + ) as e: + self.logger.error( + logging_text + "Exit Exception while '{}': {}".format(stage[1], e) + ) exc = e except asyncio.CancelledError: - self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1])) + self.logger.error( + logging_text + "Cancelled Exception while '{}'".format(stage[1]) + ) exc = "Operation was cancelled" except Exception as e: exc = traceback.format_exc() - self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception while '{}': {}".format(stage[1], e), + exc_info=True, + ) finally: if exc: error_list.append(str(exc)) @@ -1997,8 +2528,14 @@ class NsLcm(LcmBase): if tasks_dict_info: stage[1] = "Waiting for instantiate pending tasks." self.logger.debug(logging_text + stage[1]) - error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy, - stage, nslcmop_id, nsr_id=nsr_id) + error_list += await self._wait_for_tasks( + logging_text, + tasks_dict_info, + timeout_ns_deploy, + stage, + nslcmop_id, + nsr_id=nsr_id, + ) stage[1] = stage[2] = "" except asyncio.CancelledError: error_list.append("Cancelled") @@ -2023,10 +2560,16 @@ class NsLcm(LcmBase): if error_list: error_detail = ". ".join(error_list) self.logger.error(logging_text + error_detail) - error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail) - error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0]) + error_description_nslcmop = "{} Detail: {}".format( + stage[0], error_detail + ) + error_description_nsr = "Operation: INSTANTIATING.{}, {}".format( + nslcmop_id, stage[0] + ) - db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail + db_nsr_update["detailed-status"] = ( + error_description_nsr + " Detail: " + error_detail + ) db_nslcmop_update["detailed-status"] = error_detail nslcmop_operation_state = "FAILED" ns_state = "BROKEN" @@ -2046,7 +2589,7 @@ class NsLcm(LcmBase): current_operation_id=None, error_description=error_description_nsr, error_detail=error_detail, - other_update=db_nsr_update + other_update=db_nsr_update, ) self._write_op_status( op_id=nslcmop_id, @@ -2058,11 +2601,20 @@ class NsLcm(LcmBase): if nslcmop_operation_state: try: - await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id, - "operationState": nslcmop_operation_state}, - loop=self.loop) + await self.msg.aiowrite( + "ns", + "instantiated", + { + "nsr_id": nsr_id, + "nslcmop_id": nslcmop_id, + "operationState": nslcmop_operation_state, + }, + loop=self.loop, + ) except Exception as e: - self.logger.error(logging_text + "kafka_write notification Exception {}".format(e)) + self.logger.error( + logging_text + "kafka_write notification Exception {}".format(e) + ) self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate") @@ -2092,21 +2644,23 @@ class NsLcm(LcmBase): nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) # this VCA data - my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index] + my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index] # read all ns-configuration relations ns_relations = list() - db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation')) + db_ns_relations = deep_get(nsd, ("ns-configuration", "relation")) if db_ns_relations: for r in db_ns_relations: # check if this VCA is in the relation - if my_vca.get('member-vnf-index') in\ - (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')): + if my_vca.get("member-vnf-index") in ( + r.get("entities")[0].get("id"), + r.get("entities")[1].get("id"), + ): ns_relations.append(r) # read all vnf-configuration relations vnf_relations = list() - db_vnfd_list = db_nsr.get('vnfd-id') + db_vnfd_list = db_nsr.get("vnfd-id") if db_vnfd_list: for vnfd in db_vnfd_list: db_vnf_relations = None @@ -2117,15 +2671,23 @@ class NsLcm(LcmBase): if db_vnf_relations: for r in db_vnf_relations: # check if this VCA is in the relation - if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')): + if my_vca.get("vdu_id") in ( + r.get("entities")[0].get("id"), + r.get("entities")[1].get("id"), + ): vnf_relations.append(r) # if no relations, terminate if not ns_relations and not vnf_relations: - self.logger.debug(logging_text + ' No relations') + self.logger.debug(logging_text + " No relations") return True - self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations)) + self.logger.debug( + logging_text + + " adding relations\n {}\n {}".format( + ns_relations, vnf_relations + ) + ) # add all relations start = time() @@ -2133,7 +2695,7 @@ class NsLcm(LcmBase): # check timeout now = time() if now - start >= timeout: - self.logger.error(logging_text + ' : timeout adding relations') + self.logger.error(logging_text + " : timeout adding relations") return False # reload nsr from database (we need to update record: _admin.deloyed.VCA) @@ -2145,16 +2707,18 @@ class NsLcm(LcmBase): to_vca_ee_id = None from_vca_endpoint = None to_vca_endpoint = None - vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA')) + vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA")) for vca in vca_list: - if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \ - and vca.get('config_sw_installed'): - from_vca_ee_id = vca.get('ee_id') - from_vca_endpoint = r.get('entities')[0].get('endpoint') - if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \ - and vca.get('config_sw_installed'): - to_vca_ee_id = vca.get('ee_id') - to_vca_endpoint = r.get('entities')[1].get('endpoint') + if vca.get("member-vnf-index") == r.get("entities")[0].get( + "id" + ) and vca.get("config_sw_installed"): + from_vca_ee_id = vca.get("ee_id") + from_vca_endpoint = r.get("entities")[0].get("endpoint") + if vca.get("member-vnf-index") == r.get("entities")[1].get( + "id" + ) and vca.get("config_sw_installed"): + to_vca_ee_id = vca.get("ee_id") + to_vca_endpoint = r.get("entities")[1].get("endpoint") if from_vca_ee_id and to_vca_ee_id: # add relation await self.vca_map[vca_type].add_relation( @@ -2169,17 +2733,21 @@ class NsLcm(LcmBase): else: # check failed peers try: - vca_status_list = db_nsr.get('configurationStatus') + vca_status_list = db_nsr.get("configurationStatus") if vca_status_list: for i in range(len(vca_list)): vca = vca_list[i] vca_status = vca_status_list[i] - if vca.get('member-vnf-index') == r.get('entities')[0].get('id'): - if vca_status.get('status') == 'BROKEN': + if vca.get("member-vnf-index") == r.get("entities")[ + 0 + ].get("id"): + if vca_status.get("status") == "BROKEN": # peer broken: remove relation from list ns_relations.remove(r) - if vca.get('member-vnf-index') == r.get('entities')[1].get('id'): - if vca_status.get('status') == 'BROKEN': + if vca.get("member-vnf-index") == r.get("entities")[ + 1 + ].get("id"): + if vca_status.get("status") == "BROKEN": # peer broken: remove relation from list ns_relations.remove(r) except Exception: @@ -2192,17 +2760,21 @@ class NsLcm(LcmBase): to_vca_ee_id = None from_vca_endpoint = None to_vca_endpoint = None - vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA')) + vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA")) for vca in vca_list: key_to_check = "vdu_id" if vca.get("vdu_id") is None: key_to_check = "vnfd_id" - if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'): - from_vca_ee_id = vca.get('ee_id') - from_vca_endpoint = r.get('entities')[0].get('endpoint') - if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'): - to_vca_ee_id = vca.get('ee_id') - to_vca_endpoint = r.get('entities')[1].get('endpoint') + if vca.get(key_to_check) == r.get("entities")[0].get( + "id" + ) and vca.get("config_sw_installed"): + from_vca_ee_id = vca.get("ee_id") + from_vca_endpoint = r.get("entities")[0].get("endpoint") + if vca.get(key_to_check) == r.get("entities")[1].get( + "id" + ) and vca.get("config_sw_installed"): + to_vca_ee_id = vca.get("ee_id") + to_vca_endpoint = r.get("entities")[1].get("endpoint") if from_vca_ee_id and to_vca_ee_id: # add relation await self.vca_map[vca_type].add_relation( @@ -2217,17 +2789,21 @@ class NsLcm(LcmBase): else: # check failed peers try: - vca_status_list = db_nsr.get('configurationStatus') + vca_status_list = db_nsr.get("configurationStatus") if vca_status_list: for i in range(len(vca_list)): vca = vca_list[i] vca_status = vca_status_list[i] - if vca.get('vdu_id') == r.get('entities')[0].get('id'): - if vca_status.get('status') == 'BROKEN': + if vca.get("vdu_id") == r.get("entities")[0].get( + "id" + ): + if vca_status.get("status") == "BROKEN": # peer broken: remove relation from list vnf_relations.remove(r) - if vca.get('vdu_id') == r.get('entities')[1].get('id'): - if vca_status.get('status') == 'BROKEN': + if vca.get("vdu_id") == r.get("entities")[1].get( + "id" + ): + if vca_status.get("status") == "BROKEN": # peer broken: remove relation from list vnf_relations.remove(r) except Exception: @@ -2238,32 +2814,48 @@ class NsLcm(LcmBase): await asyncio.sleep(5.0) if not ns_relations and not vnf_relations: - self.logger.debug('Relations added') + self.logger.debug("Relations added") break return True except Exception as e: - self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e)) + self.logger.warn(logging_text + " ERROR adding relations: {}".format(e)) return False - async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict, - vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600, - vca_id: str = None): + async def _install_kdu( + self, + nsr_id: str, + nsr_db_path: str, + vnfr_data: dict, + kdu_index: int, + kdud: dict, + vnfd: dict, + k8s_instance_info: dict, + k8params: dict = None, + timeout: int = 600, + vca_id: str = None, + ): try: k8sclustertype = k8s_instance_info["k8scluster-type"] # Instantiate kdu - db_dict_install = {"collection": "nsrs", - "filter": {"_id": nsr_id}, - "path": nsr_db_path} + db_dict_install = { + "collection": "nsrs", + "filter": {"_id": nsr_id}, + "path": nsr_db_path, + } - kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name( + kdu_instance = self.k8scluster_map[ + k8sclustertype + ].generate_kdu_instance_name( db_dict=db_dict_install, kdu_model=k8s_instance_info["kdu-model"], kdu_name=k8s_instance_info["kdu-name"], ) - self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) + self.update_db_2( + "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} + ) await self.k8scluster_map[k8sclustertype].install( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_model=k8s_instance_info["kdu-model"], @@ -2276,13 +2868,16 @@ class NsLcm(LcmBase): kdu_instance=kdu_instance, vca_id=vca_id, ) - self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) + self.update_db_2( + "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} + ) # Obtain services to obtain management service ip services = await self.k8scluster_map[k8sclustertype].get_services( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_instance=kdu_instance, - namespace=k8s_instance_info["namespace"]) + namespace=k8s_instance_info["namespace"], + ) # Obtain management service info (if exists) vnfr_update_dict = {} @@ -2294,7 +2889,11 @@ class NsLcm(LcmBase): if services: vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services - mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")] + mgmt_services = [ + service + for service in kdud.get("service", []) + if service.get("mgmt-service") + ] for mgmt_service in mgmt_services: for service in services: if service["name"].startswith(mgmt_service["name"]): @@ -2303,51 +2902,81 @@ class NsLcm(LcmBase): if isinstance(ip, list) and len(ip) == 1: ip = ip[0] - vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip + vnfr_update_dict[ + "kdur.{}.ip-address".format(kdu_index) + ] = ip # Check if must update also mgmt ip at the vnf - service_external_cp = mgmt_service.get("external-connection-point-ref") + service_external_cp = mgmt_service.get( + "external-connection-point-ref" + ) if service_external_cp: - if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp: + if ( + deep_get(vnfd, ("mgmt-interface", "cp")) + == service_external_cp + ): vnfr_update_dict["ip-address"] = ip if find_in_list( target_ee_list, - lambda ee: ee.get("external-connection-point-ref", "") == service_external_cp + lambda ee: ee.get( + "external-connection-point-ref", "" + ) + == service_external_cp, ): - vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip + vnfr_update_dict[ + "kdur.{}.ip-address".format(kdu_index) + ] = ip break else: - self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"])) + self.logger.warn( + "Mgmt service name: {} not found".format( + mgmt_service["name"] + ) + ) vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY" self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict) kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"]) - if kdu_config and kdu_config.get("initial-config-primitive") and \ - get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None: - initial_config_primitive_list = kdu_config.get("initial-config-primitive") + if ( + kdu_config + and kdu_config.get("initial-config-primitive") + and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None + ): + initial_config_primitive_list = kdu_config.get( + "initial-config-primitive" + ) initial_config_primitive_list.sort(key=lambda val: int(val["seq"])) for initial_config_primitive in initial_config_primitive_list: - primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {}) + primitive_params_ = self._map_primitive_params( + initial_config_primitive, {}, {} + ) await asyncio.wait_for( self.k8scluster_map[k8sclustertype].exec_primitive( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_instance=kdu_instance, primitive_name=initial_config_primitive["name"], - params=primitive_params_, db_dict=db_dict_install, + params=primitive_params_, + db_dict=db_dict_install, vca_id=vca_id, ), - timeout=timeout + timeout=timeout, ) except Exception as e: # Prepare update db with error and raise exception try: - self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}) - self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"}) + self.update_db_2( + "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)} + ) + self.update_db_2( + "vnfrs", + vnfr_data.get("_id"), + {"kdur.{}.status".format(kdu_index): "ERROR"}, + ) except Exception: # ignore to keep original exception pass @@ -2356,10 +2985,22 @@ class NsLcm(LcmBase): return kdu_instance - async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info): + async def deploy_kdus( + self, + logging_text, + nsr_id, + nslcmop_id, + db_vnfrs, + db_vnfds, + task_instantiation_info, + ): # Launch kdus if present in the descriptor - k8scluster_id_2_uuic = {"helm-chart-v3": {}, "helm-chart": {}, "juju-bundle": {}} + k8scluster_id_2_uuic = { + "helm-chart-v3": {}, + "helm-chart": {}, + "juju-bundle": {}, + } async def _get_cluster_id(cluster_id, cluster_type): nonlocal k8scluster_id_2_uuic @@ -2367,13 +3008,19 @@ class NsLcm(LcmBase): return k8scluster_id_2_uuic[cluster_type][cluster_id] # check if K8scluster is creating and wait look if previous tasks in process - task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id) + task_name, task_dependency = self.lcm_tasks.lookfor_related( + "k8scluster", cluster_id + ) if task_dependency: - text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id) + text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format( + task_name, cluster_id + ) self.logger.debug(logging_text + text) await asyncio.wait(task_dependency, timeout=3600) - db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False) + db_k8scluster = self.db.get_one( + "k8sclusters", {"_id": cluster_id}, fail_on_empty=False + ) if not db_k8scluster: raise LcmException("K8s cluster {} cannot be found".format(cluster_id)) @@ -2382,22 +3029,40 @@ class NsLcm(LcmBase): if cluster_type == "helm-chart-v3": try: # backward compatibility for existing clusters that have not been initialized for helm v3 - k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials")) - k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(k8s_credentials, - reuse_cluster_uuid=cluster_id) + k8s_credentials = yaml.safe_dump( + db_k8scluster.get("credentials") + ) + k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env( + k8s_credentials, reuse_cluster_uuid=cluster_id + ) db_k8scluster_update = {} db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id - db_k8scluster_update["_admin.helm-chart-v3.created"] = uninstall_sw - db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "ENABLED" - self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update) + db_k8scluster_update[ + "_admin.helm-chart-v3.created" + ] = uninstall_sw + db_k8scluster_update[ + "_admin.helm-chart-v3.operationalState" + ] = "ENABLED" + self.update_db_2( + "k8sclusters", cluster_id, db_k8scluster_update + ) except Exception as e: - self.logger.error(logging_text + "error initializing helm-v3 cluster: {}".format(str(e))) - raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id, - cluster_type)) - else: - raise LcmException("K8s cluster '{}' has not been initialized for '{}'". - format(cluster_id, cluster_type)) + self.logger.error( + logging_text + + "error initializing helm-v3 cluster: {}".format(str(e)) + ) + raise LcmException( + "K8s cluster '{}' has not been initialized for '{}'".format( + cluster_id, cluster_type + ) + ) + else: + raise LcmException( + "K8s cluster '{}' has not been initialized for '{}'".format( + cluster_id, cluster_type + ) + ) k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id return k8s_id @@ -2416,83 +3081,159 @@ class NsLcm(LcmBase): for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")): # Step 0: Prepare and set parameters desc_params = parse_yaml_strings(kdur.get("additionalParams")) - vnfd_id = vnfr_data.get('vnfd-id') - vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id) - kdud = next(kdud for kdud in vnfd_with_id["kdu"] if kdud["name"] == kdur["kdu-name"]) + vnfd_id = vnfr_data.get("vnfd-id") + vnfd_with_id = find_in_list( + db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id + ) + kdud = next( + kdud + for kdud in vnfd_with_id["kdu"] + if kdud["name"] == kdur["kdu-name"] + ) namespace = kdur.get("k8s-namespace") if kdur.get("helm-chart"): kdumodel = kdur["helm-chart"] # Default version: helm3, if helm-version is v2 assign v2 k8sclustertype = "helm-chart-v3" self.logger.debug("kdur: {}".format(kdur)) - if kdur.get("helm-version") and kdur.get("helm-version") == "v2": + if ( + kdur.get("helm-version") + and kdur.get("helm-version") == "v2" + ): k8sclustertype = "helm-chart" elif kdur.get("juju-bundle"): kdumodel = kdur["juju-bundle"] k8sclustertype = "juju-bundle" else: - raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor " - "juju-bundle. Maybe an old NBI version is running". - format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"])) + raise LcmException( + "kdu type for kdu='{}.{}' is neither helm-chart nor " + "juju-bundle. Maybe an old NBI version is running".format( + vnfr_data["member-vnf-index-ref"], kdur["kdu-name"] + ) + ) # check if kdumodel is a file and exists try: - vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id) - storage = deep_get(vnfd_with_id, ('_admin', 'storage')) - if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts + vnfd_with_id = find_in_list( + db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id + ) + storage = deep_get(vnfd_with_id, ("_admin", "storage")) + if storage and storage.get( + "pkg-dir" + ): # may be not present if vnfd has not artifacts # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel - filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype, - kdumodel) - if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'): + filename = "{}/{}/{}s/{}".format( + storage["folder"], + storage["pkg-dir"], + k8sclustertype, + kdumodel, + ) + if self.fs.file_exists( + filename, mode="file" + ) or self.fs.file_exists(filename, mode="dir"): kdumodel = self.fs.path + filename except (asyncio.TimeoutError, asyncio.CancelledError): raise - except Exception: # it is not a file + except Exception: # it is not a file pass k8s_cluster_id = kdur["k8s-cluster"]["id"] - step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id) + step = "Synchronize repos for k8s cluster '{}'".format( + k8s_cluster_id + ) cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype) # Synchronize repos - if (k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list)\ - or (k8sclustertype == "helm-chart-v3" and cluster_uuid not in updated_v3_cluster_list): + if ( + k8sclustertype == "helm-chart" + and cluster_uuid not in updated_cluster_list + ) or ( + k8sclustertype == "helm-chart-v3" + and cluster_uuid not in updated_v3_cluster_list + ): del_repo_list, added_repo_dict = await asyncio.ensure_future( - self.k8scluster_map[k8sclustertype].synchronize_repos(cluster_uuid=cluster_uuid)) + self.k8scluster_map[k8sclustertype].synchronize_repos( + cluster_uuid=cluster_uuid + ) + ) if del_repo_list or added_repo_dict: if k8sclustertype == "helm-chart": - unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list} - updated = {'_admin.helm_charts_added.' + - item: name for item, name in added_repo_dict.items()} + unset = { + "_admin.helm_charts_added." + item: None + for item in del_repo_list + } + updated = { + "_admin.helm_charts_added." + item: name + for item, name in added_repo_dict.items() + } updated_cluster_list.append(cluster_uuid) elif k8sclustertype == "helm-chart-v3": - unset = {'_admin.helm_charts_v3_added.' + item: None for item in del_repo_list} - updated = {'_admin.helm_charts_v3_added.' + - item: name for item, name in added_repo_dict.items()} + unset = { + "_admin.helm_charts_v3_added." + item: None + for item in del_repo_list + } + updated = { + "_admin.helm_charts_v3_added." + item: name + for item, name in added_repo_dict.items() + } updated_v3_cluster_list.append(cluster_uuid) - self.logger.debug(logging_text + "repos synchronized on k8s cluster " - "'{}' to_delete: {}, to_add: {}". - format(k8s_cluster_id, del_repo_list, added_repo_dict)) - self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset) + self.logger.debug( + logging_text + "repos synchronized on k8s cluster " + "'{}' to_delete: {}, to_add: {}".format( + k8s_cluster_id, del_repo_list, added_repo_dict + ) + ) + self.db.set_one( + "k8sclusters", + {"_id": k8s_cluster_id}, + updated, + unset=unset, + ) # Instantiate kdu - step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"], - kdur["kdu-name"], k8s_cluster_id) - k8s_instance_info = {"kdu-instance": None, - "k8scluster-uuid": cluster_uuid, - "k8scluster-type": k8sclustertype, - "member-vnf-index": vnfr_data["member-vnf-index-ref"], - "kdu-name": kdur["kdu-name"], - "kdu-model": kdumodel, - "namespace": namespace} + step = "Instantiating KDU {}.{} in k8s cluster {}".format( + vnfr_data["member-vnf-index-ref"], + kdur["kdu-name"], + k8s_cluster_id, + ) + k8s_instance_info = { + "kdu-instance": None, + "k8scluster-uuid": cluster_uuid, + "k8scluster-type": k8sclustertype, + "member-vnf-index": vnfr_data["member-vnf-index-ref"], + "kdu-name": kdur["kdu-name"], + "kdu-model": kdumodel, + "namespace": namespace, + } db_path = "_admin.deployed.K8s.{}".format(index) db_nsr_update[db_path] = k8s_instance_info self.update_db_2("nsrs", nsr_id, db_nsr_update) - vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id) + vnfd_with_id = find_in_list( + db_vnfds, lambda vnf: vnf["_id"] == vnfd_id + ) task = asyncio.ensure_future( - self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id, - k8s_instance_info, k8params=desc_params, timeout=600, vca_id=vca_id)) - self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task) - task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"]) + self._install_kdu( + nsr_id, + db_path, + vnfr_data, + kdu_index, + kdud, + vnfd_with_id, + k8s_instance_info, + k8params=desc_params, + timeout=600, + vca_id=vca_id, + ) + ) + self.lcm_tasks.register( + "ns", + nsr_id, + nslcmop_id, + "instantiate_KDU-{}".format(index), + task, + ) + task_instantiation_info[task] = "Deploying KDU {}".format( + kdur["kdu-name"] + ) index += 1 @@ -2509,54 +3250,89 @@ class NsLcm(LcmBase): if db_nsr_update: self.update_db_2("nsrs", nsr_id, db_nsr_update) - def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id, - kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config, - base_folder, task_instantiation_info, stage): + def _deploy_n2vc( + self, + logging_text, + db_nsr, + db_vnfr, + nslcmop_id, + nsr_id, + nsi_id, + vnfd_id, + vdu_id, + kdu_name, + member_vnf_index, + vdu_index, + vdu_name, + deploy_params, + descriptor_config, + base_folder, + task_instantiation_info, + stage, + ): # launch instantiate_N2VC in a asyncio task and register task object # Look where information of this charm is at database ._admin.deployed.VCA # if not found, create one entry and update database # fill db_nsr._admin.deployed.VCA. - self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)) + self.logger.debug( + logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id) + ) if "execution-environment-list" in descriptor_config: ee_list = descriptor_config.get("execution-environment-list", []) else: # other types as script are not supported ee_list = [] for ee_item in ee_list: - self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'), - ee_item.get("helm-chart"))) + self.logger.debug( + logging_text + + "_deploy_n2vc ee_item juju={}, helm={}".format( + ee_item.get("juju"), ee_item.get("helm-chart") + ) + ) ee_descriptor_id = ee_item.get("id") if ee_item.get("juju"): - vca_name = ee_item['juju'].get('charm') - vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm" - if ee_item['juju'].get('cloud') == "k8s": + vca_name = ee_item["juju"].get("charm") + vca_type = ( + "lxc_proxy_charm" + if ee_item["juju"].get("charm") is not None + else "native_charm" + ) + if ee_item["juju"].get("cloud") == "k8s": vca_type = "k8s_proxy_charm" - elif ee_item['juju'].get('proxy') is False: + elif ee_item["juju"].get("proxy") is False: vca_type = "native_charm" elif ee_item.get("helm-chart"): - vca_name = ee_item['helm-chart'] + vca_name = ee_item["helm-chart"] if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2": vca_type = "helm" else: vca_type = "helm-v3" else: - self.logger.debug(logging_text + "skipping non juju neither charm configuration") + self.logger.debug( + logging_text + "skipping non juju neither charm configuration" + ) continue vca_index = -1 - for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): + for vca_index, vca_deployed in enumerate( + db_nsr["_admin"]["deployed"]["VCA"] + ): if not vca_deployed: continue - if vca_deployed.get("member-vnf-index") == member_vnf_index and \ - vca_deployed.get("vdu_id") == vdu_id and \ - vca_deployed.get("kdu_name") == kdu_name and \ - vca_deployed.get("vdu_count_index", 0) == vdu_index and \ - vca_deployed.get("ee_descriptor_id") == ee_descriptor_id: + if ( + vca_deployed.get("member-vnf-index") == member_vnf_index + and vca_deployed.get("vdu_id") == vdu_id + and vca_deployed.get("kdu_name") == kdu_name + and vca_deployed.get("vdu_count_index", 0) == vdu_index + and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id + ): break else: # not found, create one. - target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index) + target = ( + "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index) + ) if vdu_id: target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0) elif kdu_name: @@ -2570,18 +3346,18 @@ class NsLcm(LcmBase): "vdu_count_index": vdu_index, "operational-status": "init", # TODO revise "detailed-status": "", # TODO revise - "step": "initial-deploy", # TODO revise + "step": "initial-deploy", # TODO revise "vnfd_id": vnfd_id, "vdu_name": vdu_name, "type": vca_type, - "ee_descriptor_id": ee_descriptor_id + "ee_descriptor_id": ee_descriptor_id, } vca_index += 1 # create VCA and configurationStatus in db db_dict = { "_admin.deployed.VCA.{}".format(vca_index): vca_deployed, - "configurationStatus.{}".format(vca_index): dict() + "configurationStatus.{}".format(vca_index): dict(), } self.update_db_2("nsrs", nsr_id, db_dict) @@ -2609,12 +3385,21 @@ class NsLcm(LcmBase): stage=stage, vca_type=vca_type, vca_name=vca_name, - ee_config_descriptor=ee_item + ee_config_descriptor=ee_item, ) ) - self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc) - task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format( - member_vnf_index or "", vdu_id or "") + self.lcm_tasks.register( + "ns", + nsr_id, + nslcmop_id, + "instantiate_N2VC-{}".format(vca_index), + task_n2vc, + ) + task_instantiation_info[ + task_n2vc + ] = self.task_name_deploy_vca + " {}.{}".format( + member_vnf_index or "", vdu_id or "" + ) @staticmethod def _create_nslcmop(nsr_id, operation, params): @@ -2628,7 +3413,8 @@ class NsLcm(LcmBase): # Raise exception if invalid arguments if not (nsr_id and operation and params): raise LcmException( - "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided") + "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided" + ) now = time() _id = str(uuid4()) nslcmop = { @@ -2646,7 +3432,7 @@ class NsLcm(LcmBase): "links": { "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id, "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id, - } + }, } return nslcmop @@ -2658,7 +3444,7 @@ class NsLcm(LcmBase): return params def _get_terminate_primitive_params(self, seq, vnf_index): - primitive = seq.get('name') + primitive = seq.get("name") primitive_params = {} params = { "member_vnf_index": vnf_index, @@ -2671,8 +3457,8 @@ class NsLcm(LcmBase): # sub-operations def _retry_or_skip_suboperation(self, db_nslcmop, op_index): - op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index] - if op.get('operationState') == 'COMPLETED': + op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index] + if op.get("operationState") == "COMPLETED": # b. Skip sub-operation # _ns_execute_primitive() or RO.create_action() will NOT be executed return self.SUBOPERATION_STATUS_SKIP @@ -2680,10 +3466,11 @@ class NsLcm(LcmBase): # c. retry executing sub-operation # The sub-operation exists, and operationState != 'COMPLETED' # Update operationState = 'PROCESSING' to indicate a retry. - operationState = 'PROCESSING' - detailed_status = 'In progress' + operationState = "PROCESSING" + detailed_status = "In progress" self._update_suboperation_status( - db_nslcmop, op_index, operationState, detailed_status) + db_nslcmop, op_index, operationState, detailed_status + ) # Return the sub-operation index # _ns_execute_primitive() or RO.create_action() will be called from scale() # with arguments extracted from the sub-operation @@ -2693,22 +3480,25 @@ class NsLcm(LcmBase): # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match def _find_suboperation(self, db_nslcmop, match): if db_nslcmop and match: - op_list = db_nslcmop.get('_admin', {}).get('operations', []) + op_list = db_nslcmop.get("_admin", {}).get("operations", []) for i, op in enumerate(op_list): if all(op.get(k) == match[k] for k in match): return i return self.SUBOPERATION_STATUS_NOT_FOUND # Update status for a sub-operation given its index - def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status): + def _update_suboperation_status( + self, db_nslcmop, op_index, operationState, detailed_status + ): # Update DB for HA tasks - q_filter = {'_id': db_nslcmop['_id']} - update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState, - '_admin.operations.{}.detailed-status'.format(op_index): detailed_status} - self.db.set_one("nslcmops", - q_filter=q_filter, - update_dict=update_dict, - fail_on_empty=False) + q_filter = {"_id": db_nslcmop["_id"]} + update_dict = { + "_admin.operations.{}.operationState".format(op_index): operationState, + "_admin.operations.{}.detailed-status".format(op_index): detailed_status, + } + self.db.set_one( + "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False + ) # Add sub-operation, return the index of the added sub-operation # Optionally, set operationState, detailed-status, and operationType @@ -2717,40 +3507,54 @@ class NsLcm(LcmBase): # 'detailed-status' : status message # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE' # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations. - def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive, - mapped_primitive_params, operationState=None, detailed_status=None, operationType=None, - RO_nsr_id=None, RO_scaling_info=None): + def _add_suboperation( + self, + db_nslcmop, + vnf_index, + vdu_id, + vdu_count_index, + vdu_name, + primitive, + mapped_primitive_params, + operationState=None, + detailed_status=None, + operationType=None, + RO_nsr_id=None, + RO_scaling_info=None, + ): if not db_nslcmop: return self.SUBOPERATION_STATUS_NOT_FOUND # Get the "_admin.operations" list, if it exists - db_nslcmop_admin = db_nslcmop.get('_admin', {}) - op_list = db_nslcmop_admin.get('operations') + db_nslcmop_admin = db_nslcmop.get("_admin", {}) + op_list = db_nslcmop_admin.get("operations") # Create or append to the "_admin.operations" list - new_op = {'member_vnf_index': vnf_index, - 'vdu_id': vdu_id, - 'vdu_count_index': vdu_count_index, - 'primitive': primitive, - 'primitive_params': mapped_primitive_params} + new_op = { + "member_vnf_index": vnf_index, + "vdu_id": vdu_id, + "vdu_count_index": vdu_count_index, + "primitive": primitive, + "primitive_params": mapped_primitive_params, + } if operationState: - new_op['operationState'] = operationState + new_op["operationState"] = operationState if detailed_status: - new_op['detailed-status'] = detailed_status + new_op["detailed-status"] = detailed_status if operationType: - new_op['lcmOperationType'] = operationType + new_op["lcmOperationType"] = operationType if RO_nsr_id: - new_op['RO_nsr_id'] = RO_nsr_id + new_op["RO_nsr_id"] = RO_nsr_id if RO_scaling_info: - new_op['RO_scaling_info'] = RO_scaling_info + new_op["RO_scaling_info"] = RO_scaling_info if not op_list: # No existing operations, create key 'operations' with current operation as first list element - db_nslcmop_admin.update({'operations': [new_op]}) - op_list = db_nslcmop_admin.get('operations') + db_nslcmop_admin.update({"operations": [new_op]}) + op_list = db_nslcmop_admin.get("operations") else: # Existing operations, append operation to list op_list.append(new_op) - db_nslcmop_update = {'_admin.operations': op_list} - self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update) + db_nslcmop_update = {"_admin.operations": op_list} + self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update) op_index = len(op_list) - 1 return op_index @@ -2761,22 +3565,30 @@ class NsLcm(LcmBase): # a. New: First time execution, return SUBOPERATION_STATUS_NEW # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute - def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params, - operationType, RO_nsr_id=None, RO_scaling_info=None): + def _check_or_add_scale_suboperation( + self, + db_nslcmop, + vnf_index, + vnf_config_primitive, + primitive_params, + operationType, + RO_nsr_id=None, + RO_scaling_info=None, + ): # Find this sub-operation if RO_nsr_id and RO_scaling_info: - operationType = 'SCALE-RO' + operationType = "SCALE-RO" match = { - 'member_vnf_index': vnf_index, - 'RO_nsr_id': RO_nsr_id, - 'RO_scaling_info': RO_scaling_info, + "member_vnf_index": vnf_index, + "RO_nsr_id": RO_nsr_id, + "RO_scaling_info": RO_scaling_info, } else: match = { - 'member_vnf_index': vnf_index, - 'primitive': vnf_config_primitive, - 'primitive_params': primitive_params, - 'lcmOperationType': operationType + "member_vnf_index": vnf_index, + "primitive": vnf_config_primitive, + "primitive_params": primitive_params, + "lcmOperationType": operationType, } op_index = self._find_suboperation(db_nslcmop, match) if op_index == self.SUBOPERATION_STATUS_NOT_FOUND: @@ -2794,21 +3606,23 @@ class NsLcm(LcmBase): RO_nsr_id = None RO_scaling_info = None # Initial status for sub-operation - operationState = 'PROCESSING' - detailed_status = 'In progress' + operationState = "PROCESSING" + detailed_status = "In progress" # Add sub-operation for pre/post-scaling (zero or more operations) - self._add_suboperation(db_nslcmop, - vnf_index, - vdu_id, - vdu_count_index, - vdu_name, - vnf_config_primitive, - primitive_params, - operationState, - detailed_status, - operationType, - RO_nsr_id, - RO_scaling_info) + self._add_suboperation( + db_nslcmop, + vnf_index, + vdu_id, + vdu_count_index, + vdu_name, + vnf_config_primitive, + primitive_params, + operationState, + detailed_status, + operationType, + RO_nsr_id, + RO_scaling_info, + ) return self.SUBOPERATION_STATUS_NEW else: # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'), @@ -2850,7 +3664,8 @@ class NsLcm(LcmBase): """ self.logger.debug( - logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format( + logging_text + + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format( vca_index, vca_deployed, config_descriptor, destroy_ee ) ) @@ -2860,7 +3675,9 @@ class NsLcm(LcmBase): # execute terminate_primitives if exec_primitives: terminate_primitives = get_ee_sorted_terminate_config_primitive_list( - config_descriptor.get("terminate-config-primitive"), vca_deployed.get("ee_descriptor_id")) + config_descriptor.get("terminate-config-primitive"), + vca_deployed.get("ee_descriptor_id"), + ) vdu_id = vca_deployed.get("vdu_id") vdu_count_index = vca_deployed.get("vdu_count_index") vdu_name = vca_deployed.get("vdu_name") @@ -2869,24 +3686,30 @@ class NsLcm(LcmBase): for seq in terminate_primitives: # For each sequence in list, get primitive and call _ns_execute_primitive() step = "Calling terminate action for vnf_member_index={} primitive={}".format( - vnf_index, seq.get("name")) + vnf_index, seq.get("name") + ) self.logger.debug(logging_text + step) # Create the primitive for each sequence, i.e. "primitive": "touch" - primitive = seq.get('name') - mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index) + primitive = seq.get("name") + mapped_primitive_params = self._get_terminate_primitive_params( + seq, vnf_index + ) # Add sub-operation - self._add_suboperation(db_nslcmop, - vnf_index, - vdu_id, - vdu_count_index, - vdu_name, - primitive, - mapped_primitive_params) + self._add_suboperation( + db_nslcmop, + vnf_index, + vdu_id, + vdu_count_index, + vdu_name, + primitive, + mapped_primitive_params, + ) # Sub-operations: Call _ns_execute_primitive() instead of action() try: result, result_detail = await self._ns_execute_primitive( - vca_deployed["ee_id"], primitive, + vca_deployed["ee_id"], + primitive, mapped_primitive_params, vca_type=vca_type, vca_id=vca_id, @@ -2894,13 +3717,19 @@ class NsLcm(LcmBase): except LcmException: # this happens when VCA is not deployed. In this case it is not needed to terminate continue - result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED'] + result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"] if result not in result_ok: - raise LcmException("terminate_primitive {} for vnf_member_index={} fails with " - "error {}".format(seq.get("name"), vnf_index, result_detail)) + raise LcmException( + "terminate_primitive {} for vnf_member_index={} fails with " + "error {}".format(seq.get("name"), vnf_index, result_detail) + ) # set that this VCA do not need terminated - db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index) - self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}) + db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format( + vca_index + ) + self.update_db_2( + "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False} + ) if vca_deployed.get("prometheus_jobs") and self.prometheus: await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"]) @@ -2913,7 +3742,7 @@ class NsLcm(LcmBase): ) async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None): - self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING') + self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING") namespace = "." + db_nsr["_id"] try: await self.n2vc.delete_namespace( @@ -2923,9 +3752,11 @@ class NsLcm(LcmBase): ) except N2VCNotFound: # already deleted. Skip pass - self._write_all_config_status(db_nsr=db_nsr, status='DELETED') + self._write_all_config_status(db_nsr=db_nsr, status="DELETED") - async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage): + async def _terminate_RO( + self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage + ): """ Terminates a deployment from RO :param logging_text: @@ -2952,15 +3783,22 @@ class NsLcm(LcmBase): self._write_op_status(nslcmop_id, stage) desc = await self.RO.delete("ns", ro_nsr_id) ro_delete_action = desc["action_id"] - db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action + db_nsr_update[ + "_admin.deployed.RO.nsr_delete_action_id" + ] = ro_delete_action db_nsr_update["_admin.deployed.RO.nsr_id"] = None db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" if ro_delete_action: # wait until NS is deleted from VIM stage[2] = "Waiting ns deleted from VIM." detailed_status_old = None - self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id, - ro_delete_action)) + self.logger.debug( + logging_text + + stage[2] + + " RO_id={} ro_delete_action={}".format( + ro_nsr_id, ro_delete_action + ) + ) self.update_db_2("nsrs", nsr_id, db_nsr_update) self._write_op_status(nslcmop_id, stage) @@ -2970,7 +3808,8 @@ class NsLcm(LcmBase): "ns", item_id_name=ro_nsr_id, extra_item="action", - extra_item_id=ro_delete_action) + extra_item_id=ro_delete_action, + ) # deploymentStatus self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc) @@ -2985,7 +3824,11 @@ class NsLcm(LcmBase): db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" break else: - assert False, "ROclient.check_action_status returns unknown {}".format(ns_status) + assert ( + False + ), "ROclient.check_action_status returns unknown {}".format( + ns_status + ) if stage[2] != detailed_status_old: detailed_status_old = stage[2] db_nsr_update["detailed-status"] = " ".join(stage) @@ -2994,21 +3837,34 @@ class NsLcm(LcmBase): await asyncio.sleep(5, loop=self.loop) delete_timeout -= 5 else: # delete_timeout <= 0: - raise ROclient.ROClientException("Timeout waiting ns deleted from VIM") + raise ROclient.ROClientException( + "Timeout waiting ns deleted from VIM" + ) except Exception as e: self.update_db_2("nsrs", nsr_id, db_nsr_update) - if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found + if ( + isinstance(e, ROclient.ROClientException) and e.http_code == 404 + ): # not found db_nsr_update["_admin.deployed.RO.nsr_id"] = None db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None - self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)) - elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict + self.logger.debug( + logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id) + ) + elif ( + isinstance(e, ROclient.ROClientException) and e.http_code == 409 + ): # conflict failed_detail.append("delete conflict: {}".format(e)) - self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)) + self.logger.debug( + logging_text + + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e) + ) else: failed_detail.append("delete error: {}".format(e)) - self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)) + self.logger.error( + logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e) + ) # Delete nsd if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")): @@ -3019,17 +3875,29 @@ class NsLcm(LcmBase): self.update_db_2("nsrs", nsr_id, db_nsr_update) self._write_op_status(nslcmop_id, stage) await self.RO.delete("nsd", ro_nsd_id) - self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)) + self.logger.debug( + logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id) + ) db_nsr_update["_admin.deployed.RO.nsd_id"] = None except Exception as e: - if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found + if ( + isinstance(e, ROclient.ROClientException) and e.http_code == 404 + ): # not found db_nsr_update["_admin.deployed.RO.nsd_id"] = None - self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)) - elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict - failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)) + self.logger.debug( + logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id) + ) + elif ( + isinstance(e, ROclient.ROClientException) and e.http_code == 409 + ): # conflict + failed_detail.append( + "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e) + ) self.logger.debug(logging_text + failed_detail[-1]) else: - failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)) + failed_detail.append( + "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e) + ) self.logger.error(logging_text + failed_detail[-1]) if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")): @@ -3038,23 +3906,41 @@ class NsLcm(LcmBase): continue try: ro_vnfd_id = vnf_deployed["id"] - stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format( - vnf_deployed["member-vnf-index"], ro_vnfd_id) + stage[ + 2 + ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format( + vnf_deployed["member-vnf-index"], ro_vnfd_id + ) db_nsr_update["detailed-status"] = " ".join(stage) self.update_db_2("nsrs", nsr_id, db_nsr_update) self._write_op_status(nslcmop_id, stage) await self.RO.delete("vnfd", ro_vnfd_id) - self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)) + self.logger.debug( + logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id) + ) db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None except Exception as e: - if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found - db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None - self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)) - elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict - failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)) + if ( + isinstance(e, ROclient.ROClientException) and e.http_code == 404 + ): # not found + db_nsr_update[ + "_admin.deployed.RO.vnfd.{}.id".format(index) + ] = None + self.logger.debug( + logging_text + + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id) + ) + elif ( + isinstance(e, ROclient.ROClientException) and e.http_code == 409 + ): # conflict + failed_detail.append( + "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e) + ) self.logger.debug(logging_text + failed_detail[-1]) else: - failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)) + failed_detail.append( + "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e) + ) self.logger.error(logging_text + failed_detail[-1]) if failed_detail: @@ -3070,7 +3956,7 @@ class NsLcm(LcmBase): async def terminate(self, nsr_id, nslcmop_id): # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id) + task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id) if not task_is_locked_by_me: return @@ -3081,16 +3967,20 @@ class NsLcm(LcmBase): db_nslcmop = None operation_params = None exc = None - error_list = [] # annotates all failed error messages + error_list = [] # annotates all failed error messages db_nslcmop_update = {} autoremove = False # autoremove after terminated tasks_dict_info = {} db_nsr_update = {} - stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""] + stage = [ + "Stage 1/3: Preparing task.", + "Waiting for previous operations to terminate.", + "", + ] # ^ contains [stage, step, VIM-status] try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id) + await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id) stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id) db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) @@ -3107,20 +3997,18 @@ class NsLcm(LcmBase): ns_state="TERMINATING", current_operation="TERMINATING", current_operation_id=nslcmop_id, - other_update=db_nsr_update - ) - self._write_op_status( - op_id=nslcmop_id, - queuePosition=0, - stage=stage + other_update=db_nsr_update, ) + self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage) nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {} if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED": return stage[1] = "Getting vnf descriptors from db." db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) - db_vnfrs_dict = {db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list} + db_vnfrs_dict = { + db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list + } db_vnfds_from_id = {} db_vnfds_from_member_index = {} # Loop over VNFRs @@ -3129,7 +4017,9 @@ class NsLcm(LcmBase): if vnfd_id not in db_vnfds_from_id: vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) db_vnfds_from_id[vnfd_id] = vnfd - db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id] + db_vnfds_from_member_index[ + vnfr["member-vnf-index-ref"] + ] = db_vnfds_from_id[vnfd_id] # Destroy individual execution environments when there are terminating primitives. # Rest of EE will be deleted at once @@ -3160,11 +4050,14 @@ class NsLcm(LcmBase): db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) vca_type = vca.get("type") - exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and - vca.get("needed_terminate")) + exec_terminate_primitives = not operation_params.get( + "skip_terminate_primitives" + ) and vca.get("needed_terminate") # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are # pending native charms - destroy_ee = True if vca_type in ("helm", "helm-v3", "native_charm") else False + destroy_ee = ( + True if vca_type in ("helm", "helm-v3", "native_charm") else False + ) # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format( # vca_index, vca.get("ee_id"), vca_type, destroy_ee)) task = asyncio.ensure_future( @@ -3183,13 +4076,20 @@ class NsLcm(LcmBase): # wait for pending tasks of terminate primitives if tasks_dict_info: - self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys()))) - error_list = await self._wait_for_tasks(logging_text, tasks_dict_info, - min(self.timeout_charm_delete, timeout_ns_terminate), - stage, nslcmop_id) + self.logger.debug( + logging_text + + "Waiting for tasks {}".format(list(tasks_dict_info.keys())) + ) + error_list = await self._wait_for_tasks( + logging_text, + tasks_dict_info, + min(self.timeout_charm_delete, timeout_ns_terminate), + stage, + nslcmop_id, + ) tasks_dict_info.clear() if error_list: - return # raise LcmException("; ".join(error_list)) + return # raise LcmException("; ".join(error_list)) # remove All execution environments at once stage[0] = "Stage 3/3 delete all." @@ -3201,7 +4101,7 @@ class NsLcm(LcmBase): task_delete_ee = asyncio.ensure_future( asyncio.wait_for( self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), - timeout=self.timeout_charm_delete + timeout=self.timeout_charm_delete, ) ) # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id)) @@ -3226,32 +4126,54 @@ class NsLcm(LcmBase): ) ) else: - self.logger.error(logging_text + "Unknown k8s deployment type {}". - format(kdu.get("k8scluster-type"))) + self.logger.error( + logging_text + + "Unknown k8s deployment type {}".format( + kdu.get("k8scluster-type") + ) + ) continue - tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name")) + tasks_dict_info[ + task_delete_kdu_instance + ] = "Terminating KDU '{}'".format(kdu.get("kdu-name")) # remove from RO stage[1] = "Deleting ns from VIM." if self.ng_ro: task_delete_ro = asyncio.ensure_future( - self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage)) + self._terminate_ng_ro( + logging_text, nsr_deployed, nsr_id, nslcmop_id, stage + ) + ) else: task_delete_ro = asyncio.ensure_future( - self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage)) + self._terminate_RO( + logging_text, nsr_deployed, nsr_id, nslcmop_id, stage + ) + ) tasks_dict_info[task_delete_ro] = "Removing deployment from VIM" # rest of staff will be done at finally - except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e: + except ( + ROclient.ROClientException, + DbException, + LcmException, + N2VCException, + ) as e: self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except asyncio.CancelledError: - self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1])) + self.logger.error( + logging_text + "Cancelled Exception while '{}'".format(stage[1]) + ) exc = "Operation was cancelled" except Exception as e: exc = traceback.format_exc() - self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception while '{}': {}".format(stage[1], e), + exc_info=True, + ) finally: if exc: error_list.append(str(exc)) @@ -3260,8 +4182,13 @@ class NsLcm(LcmBase): if tasks_dict_info: stage[1] = "Waiting for terminate pending tasks." self.logger.debug(logging_text + stage[1]) - error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate, - stage, nslcmop_id) + error_list += await self._wait_for_tasks( + logging_text, + tasks_dict_info, + timeout_ns_terminate, + stage, + nslcmop_id, + ) stage[1] = stage[2] = "" except asyncio.CancelledError: error_list.append("Cancelled") @@ -3272,11 +4199,17 @@ class NsLcm(LcmBase): if error_list: error_detail = "; ".join(error_list) # self.logger.error(logging_text + error_detail) - error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail) - error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0]) + error_description_nslcmop = "{} Detail: {}".format( + stage[0], error_detail + ) + error_description_nsr = "Operation: TERMINATING.{}, {}.".format( + nslcmop_id, stage[0] + ) db_nsr_update["operational-status"] = "failed" - db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail + db_nsr_update["detailed-status"] = ( + error_description_nsr + " Detail: " + error_detail + ) db_nslcmop_update["detailed-status"] = error_detail nslcmop_operation_state = "FAILED" ns_state = "BROKEN" @@ -3298,7 +4231,7 @@ class NsLcm(LcmBase): current_operation_id=None, error_description=error_description_nsr, error_detail=error_detail, - other_update=db_nsr_update + other_update=db_nsr_update, ) self._write_op_status( op_id=nslcmop_id, @@ -3309,25 +4242,44 @@ class NsLcm(LcmBase): ) if ns_state == "NOT_INSTANTIATED": try: - self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"}) + self.db.set_list( + "vnfrs", + {"nsr-id-ref": nsr_id}, + {"_admin.nsState": "NOT_INSTANTIATED"}, + ) except DbException as e: - self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'. - format(nsr_id, e)) + self.logger.warn( + logging_text + + "Error writing VNFR status for nsr-id-ref: {} -> {}".format( + nsr_id, e + ) + ) if operation_params: autoremove = operation_params.get("autoremove", False) if nslcmop_operation_state: try: - await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id, - "operationState": nslcmop_operation_state, - "autoremove": autoremove}, - loop=self.loop) + await self.msg.aiowrite( + "ns", + "terminated", + { + "nsr_id": nsr_id, + "nslcmop_id": nslcmop_id, + "operationState": nslcmop_operation_state, + "autoremove": autoremove, + }, + loop=self.loop, + ) except Exception as e: - self.logger.error(logging_text + "kafka_write notification Exception {}".format(e)) + self.logger.error( + logging_text + "kafka_write notification Exception {}".format(e) + ) self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate") - async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None): + async def _wait_for_tasks( + self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None + ): time_start = time() error_detail_list = [] error_list = [] @@ -3339,10 +4291,11 @@ class NsLcm(LcmBase): while pending_tasks: new_error = None _timeout = timeout + time_start - time() - done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout, - return_when=asyncio.FIRST_COMPLETED) + done, pending_tasks = await asyncio.wait( + pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED + ) num_done += len(done) - if not done: # Timeout + if not done: # Timeout for task in pending_tasks: new_error = created_tasks_info[task] + ": Timeout" error_detail_list.append(new_error) @@ -3359,20 +4312,45 @@ class NsLcm(LcmBase): new_error = created_tasks_info[task] + ": {}".format(exc) error_list.append(created_tasks_info[task]) error_detail_list.append(new_error) - if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException, - K8sException, NgRoException)): + if isinstance( + exc, + ( + str, + DbException, + N2VCException, + ROclient.ROClientException, + LcmException, + K8sException, + NgRoException, + ), + ): self.logger.error(logging_text + new_error) else: - exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__)) - self.logger.error(logging_text + created_tasks_info[task] + " " + exc_traceback) + exc_traceback = "".join( + traceback.format_exception(None, exc, exc.__traceback__) + ) + self.logger.error( + logging_text + + created_tasks_info[task] + + " " + + exc_traceback + ) else: - self.logger.debug(logging_text + created_tasks_info[task] + ": Done") + self.logger.debug( + logging_text + created_tasks_info[task] + ": Done" + ) stage[1] = "{}/{}.".format(num_done, num_tasks) if new_error: stage[1] += " Errors: " + ". ".join(error_detail_list) + "." if nsr_id: # update also nsr - self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list), - "errorDetail": ". ".join(error_detail_list)}) + self.update_db_2( + "nsrs", + nsr_id, + { + "errorDescription": "Error at: " + ", ".join(error_list), + "errorDetail": ". ".join(error_detail_list), + }, + ) self._write_op_status(nslcmop_id, stage) return error_detail_list @@ -3396,46 +4374,77 @@ class NsLcm(LcmBase): calculated_params[param_name] = parameter["value"] else: calculated_params[param_name] = parameter["default-value"] - if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \ - and calculated_params[param_name].endswith(">"): + if ( + isinstance(calculated_params[param_name], str) + and calculated_params[param_name].startswith("<") + and calculated_params[param_name].endswith(">") + ): if calculated_params[param_name][1:-1] in instantiation_params: - calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]] + calculated_params[param_name] = instantiation_params[ + calculated_params[param_name][1:-1] + ] else: - raise LcmException("Parameter {} needed to execute primitive {} not provided". - format(calculated_params[param_name], primitive_desc["name"])) + raise LcmException( + "Parameter {} needed to execute primitive {} not provided".format( + calculated_params[param_name], primitive_desc["name"] + ) + ) else: - raise LcmException("Parameter {} needed to execute primitive {} not provided". - format(param_name, primitive_desc["name"])) + raise LcmException( + "Parameter {} needed to execute primitive {} not provided".format( + param_name, primitive_desc["name"] + ) + ) if isinstance(calculated_params[param_name], (dict, list, tuple)): - calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], - default_flow_style=True, width=256) - elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "): + calculated_params[param_name] = yaml.safe_dump( + calculated_params[param_name], default_flow_style=True, width=256 + ) + elif isinstance(calculated_params[param_name], str) and calculated_params[ + param_name + ].startswith("!!yaml "): calculated_params[param_name] = calculated_params[param_name][7:] if parameter.get("data-type") == "INTEGER": try: calculated_params[param_name] = int(calculated_params[param_name]) except ValueError: # error converting string to int raise LcmException( - "Parameter {} of primitive {} must be integer".format(param_name, primitive_desc["name"])) + "Parameter {} of primitive {} must be integer".format( + param_name, primitive_desc["name"] + ) + ) elif parameter.get("data-type") == "BOOLEAN": - calculated_params[param_name] = not ((str(calculated_params[param_name])).lower() == 'false') + calculated_params[param_name] = not ( + (str(calculated_params[param_name])).lower() == "false" + ) # add always ns_config_info if primitive name is config if primitive_desc["name"] == "config": if "ns_config_info" in instantiation_params: - calculated_params["ns_config_info"] = instantiation_params["ns_config_info"] + calculated_params["ns_config_info"] = instantiation_params[ + "ns_config_info" + ] return calculated_params - def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None, - ee_descriptor_id=None): + def _look_for_deployed_vca( + self, + deployed_vca, + member_vnf_index, + vdu_id, + vdu_count_index, + kdu_name=None, + ee_descriptor_id=None, + ): # find vca_deployed record for this action. Raise LcmException if not found or there is not any id. for vca in deployed_vca: if not vca: continue if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]: continue - if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]: + if ( + vdu_count_index is not None + and vdu_count_index != vca["vdu_count_index"] + ): continue if kdu_name and kdu_name != vca["kdu_name"]: continue @@ -3444,16 +4453,28 @@ class NsLcm(LcmBase): break else: # vca_deployed not found - raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}" - " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name, - ee_descriptor_id)) + raise LcmException( + "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}" + " is not deployed".format( + member_vnf_index, + vdu_id, + vdu_count_index, + kdu_name, + ee_descriptor_id, + ) + ) # get ee_id ee_id = vca.get("ee_id") - vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm + vca_type = vca.get( + "type", "lxc_proxy_charm" + ) # default value for backward compatibility - proxy charm if not ee_id: - raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not " - "execution environment" - .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index)) + raise LcmException( + "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not " + "execution environment".format( + member_vnf_index, vdu_id, kdu_name, vdu_count_index + ) + ) return ee_id, vca_type async def _ns_execute_primitive( @@ -3486,7 +4507,8 @@ class NsLcm(LcmBase): db_dict=db_dict, vca_id=vca_id, ), - timeout=timeout or self.timeout_primitive) + timeout=timeout or self.timeout_primitive, + ) # execution was OK break except asyncio.CancelledError: @@ -3496,18 +4518,22 @@ class NsLcm(LcmBase): e = "Timeout" retries -= 1 if retries >= 0: - self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e)) + self.logger.debug( + "Error executing action {} on {} -> {}".format( + primitive, ee_id, e + ) + ) # wait and retry await asyncio.sleep(retries_interval, loop=self.loop) else: - return 'FAILED', str(e) + return "FAILED", str(e) - return 'COMPLETED', output + return "COMPLETED", output except (LcmException, asyncio.CancelledError): raise except Exception as e: - return 'FAIL', 'Error executing action {}: {}'.format(primitive, e) + return "FAIL", "Error executing action {}: {}".format(primitive, e) async def vca_status_refresh(self, nsr_id, nslcmop_id): """ @@ -3520,12 +4546,14 @@ class NsLcm(LcmBase): self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id)) db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) vca_id = self.get_vca_id({}, db_nsr) - if db_nsr['_admin']['deployed']['K8s']: - for k8s_index, k8s in enumerate(db_nsr['_admin']['deployed']['K8s']): + if db_nsr["_admin"]["deployed"]["K8s"]: + for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"] - await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id}, vca_id=vca_id) + await self._on_update_k8s_db( + cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id + ) else: - for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']): + for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): table, filter = "nsrs", {"_id": nsr_id} path = "_admin.deployed.VCA.{}.".format(vca_index) await self._on_update_n2vc_db(table, filter, path, {}) @@ -3535,7 +4563,7 @@ class NsLcm(LcmBase): async def action(self, nsr_id, nslcmop_id): # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id) + task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id) if not task_is_locked_by_me: return @@ -3552,13 +4580,13 @@ class NsLcm(LcmBase): try: # wait for any previous tasks in process step = "Waiting for previous operations to terminate" - await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) + await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id) self._write_ns_status( nsr_id=nsr_id, ns_state=None, current_operation="RUNNING ACTION", - current_operation_id=nslcmop_id + current_operation_id=nslcmop_id, ) step = "Getting information from database" @@ -3572,11 +4600,15 @@ class NsLcm(LcmBase): vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index") primitive = db_nslcmop["operationParams"]["primitive"] primitive_params = db_nslcmop["operationParams"]["primitive_params"] - timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive) + timeout_ns_action = db_nslcmop["operationParams"].get( + "timeout_ns_action", self.timeout_primitive + ) if vnf_index: step = "Getting vnfr from database" - db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}) + db_vnfr = self.db.get_one( + "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id} + ) step = "Getting vnfd from database" db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) else: @@ -3601,7 +4633,9 @@ class NsLcm(LcmBase): else: descriptor_configuration = db_nsd.get("ns-configuration") - if descriptor_configuration and descriptor_configuration.get("config-primitive"): + if descriptor_configuration and descriptor_configuration.get( + "config-primitive" + ): for config_primitive in descriptor_configuration["config-primitive"]: if config_primitive["name"] == primitive: config_primitive_desc = config_primitive @@ -3609,23 +4643,36 @@ class NsLcm(LcmBase): if not config_primitive_desc: if not (kdu_name and primitive in ("upgrade", "rollback", "status")): - raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ". - format(primitive)) + raise LcmException( + "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format( + primitive + ) + ) primitive_name = primitive ee_descriptor_id = None else: - primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive) - ee_descriptor_id = config_primitive_desc.get("execution-environment-ref") + primitive_name = config_primitive_desc.get( + "execution-environment-primitive", primitive + ) + ee_descriptor_id = config_primitive_desc.get( + "execution-environment-ref" + ) if vnf_index: if vdu_id: - vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None) + vdur = next( + (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None + ) desc_params = parse_yaml_strings(vdur.get("additionalParams")) elif kdu_name: - kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None) + kdur = next( + (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None + ) desc_params = parse_yaml_strings(kdur.get("additionalParams")) else: - desc_params = parse_yaml_strings(db_vnfr.get("additionalParamsForVnf")) + desc_params = parse_yaml_strings( + db_vnfr.get("additionalParamsForVnf") + ) else: desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs")) if kdu_name and get_configuration(db_vnfd, kdu_name): @@ -3638,25 +4685,39 @@ class NsLcm(LcmBase): kdu_action = True if primitive_name in actions else False # TODO check if ns is in a proper status - if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action): + if kdu_name and ( + primitive_name in ("upgrade", "rollback", "status") or kdu_action + ): # kdur and desc_params already set from before if primitive_params: desc_params.update(primitive_params) # TODO Check if we will need something at vnf level for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")): - if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index: + if ( + kdu_name == kdu["kdu-name"] + and kdu["member-vnf-index"] == vnf_index + ): break else: - raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)) + raise LcmException( + "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index) + ) if kdu.get("k8scluster-type") not in self.k8scluster_map: - msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type")) + msg = "unknown k8scluster-type '{}'".format( + kdu.get("k8scluster-type") + ) raise LcmException(msg) - db_dict = {"collection": "nsrs", - "filter": {"_id": nsr_id}, - "path": "_admin.deployed.K8s.{}".format(index)} - self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)) + db_dict = { + "collection": "nsrs", + "filter": {"_id": nsr_id}, + "path": "_admin.deployed.K8s.{}".format(index), + } + self.logger.debug( + logging_text + + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name) + ) step = "Executing kdu {}".format(primitive_name) if primitive_name == "upgrade": if desc_params.get("kdu_model"): @@ -3672,18 +4733,26 @@ class NsLcm(LcmBase): self.k8scluster_map[kdu["k8scluster-type"]].upgrade( cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu.get("kdu-instance"), - atomic=True, kdu_model=kdu_model, - params=desc_params, db_dict=db_dict, - timeout=timeout_ns_action), - timeout=timeout_ns_action + 10) - self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status)) + atomic=True, + kdu_model=kdu_model, + params=desc_params, + db_dict=db_dict, + timeout=timeout_ns_action, + ), + timeout=timeout_ns_action + 10, + ) + self.logger.debug( + logging_text + " Upgrade of kdu {} done".format(detailed_status) + ) elif primitive_name == "rollback": detailed_status = await asyncio.wait_for( self.k8scluster_map[kdu["k8scluster-type"]].rollback( cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu.get("kdu-instance"), - db_dict=db_dict), - timeout=timeout_ns_action) + db_dict=db_dict, + ), + timeout=timeout_ns_action, + ) elif primitive_name == "status": detailed_status = await asyncio.wait_for( self.k8scluster_map[kdu["k8scluster-type"]].status_kdu( @@ -3691,43 +4760,61 @@ class NsLcm(LcmBase): kdu_instance=kdu.get("kdu-instance"), vca_id=vca_id, ), - timeout=timeout_ns_action + timeout=timeout_ns_action, ) else: - kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id) - params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params) + kdu_instance = kdu.get("kdu-instance") or "{}-{}".format( + kdu["kdu-name"], nsr_id + ) + params = self._map_primitive_params( + config_primitive_desc, primitive_params, desc_params + ) detailed_status = await asyncio.wait_for( self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive( cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu_instance, primitive_name=primitive_name, - params=params, db_dict=db_dict, + params=params, + db_dict=db_dict, timeout=timeout_ns_action, vca_id=vca_id, ), - timeout=timeout_ns_action + timeout=timeout_ns_action, ) if detailed_status: - nslcmop_operation_state = 'COMPLETED' + nslcmop_operation_state = "COMPLETED" else: - detailed_status = '' - nslcmop_operation_state = 'FAILED' + detailed_status = "" + nslcmop_operation_state = "FAILED" else: - ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], member_vnf_index=vnf_index, - vdu_id=vdu_id, vdu_count_index=vdu_count_index, - ee_descriptor_id=ee_descriptor_id) - for vca_index, vca_deployed in enumerate(db_nsr['_admin']['deployed']['VCA']): + ee_id, vca_type = self._look_for_deployed_vca( + nsr_deployed["VCA"], + member_vnf_index=vnf_index, + vdu_id=vdu_id, + vdu_count_index=vdu_count_index, + ee_descriptor_id=ee_descriptor_id, + ) + for vca_index, vca_deployed in enumerate( + db_nsr["_admin"]["deployed"]["VCA"] + ): if vca_deployed.get("member-vnf-index") == vnf_index: - db_dict = {"collection": "nsrs", - "filter": {"_id": nsr_id}, - "path": "_admin.deployed.VCA.{}.".format(vca_index)} + db_dict = { + "collection": "nsrs", + "filter": {"_id": nsr_id}, + "path": "_admin.deployed.VCA.{}.".format(vca_index), + } break - nslcmop_operation_state, detailed_status = await self._ns_execute_primitive( + ( + nslcmop_operation_state, + detailed_status, + ) = await self._ns_execute_primitive( ee_id, primitive=primitive_name, - primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params), + primitive_params=self._map_primitive_params( + config_primitive_desc, primitive_params, desc_params + ), timeout=timeout_ns_action, vca_type=vca_type, db_dict=db_dict, @@ -3735,61 +4822,91 @@ class NsLcm(LcmBase): ) db_nslcmop_update["detailed-status"] = detailed_status - error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else "" - self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state, - detailed_status)) + error_description_nslcmop = ( + detailed_status if nslcmop_operation_state == "FAILED" else "" + ) + self.logger.debug( + logging_text + + " task Done with result {} {}".format( + nslcmop_operation_state, detailed_status + ) + ) return # database update is called inside finally except (DbException, LcmException, N2VCException, K8sException) as e: self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except asyncio.CancelledError: - self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step)) + self.logger.error( + logging_text + "Cancelled Exception while '{}'".format(step) + ) exc = "Operation was cancelled" except asyncio.TimeoutError: self.logger.error(logging_text + "Timeout while '{}'".format(step)) exc = "Timeout" except Exception as e: exc = traceback.format_exc() - self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {} {}".format(type(e).__name__, e), + exc_info=True, + ) finally: if exc: - db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \ - "FAILED {}: {}".format(step, exc) + db_nslcmop_update[ + "detailed-status" + ] = ( + detailed_status + ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc) nslcmop_operation_state = "FAILED" if db_nsr: self._write_ns_status( nsr_id=nsr_id, - ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status + ns_state=db_nsr[ + "nsState" + ], # TODO check if degraded. For the moment use previous status current_operation="IDLE", current_operation_id=None, # error_description=error_description_nsr, # error_detail=error_detail, - other_update=db_nsr_update + other_update=db_nsr_update, ) - self._write_op_status(op_id=nslcmop_id, stage="", error_message=error_description_nslcmop, - operation_state=nslcmop_operation_state, other_update=db_nslcmop_update) + self._write_op_status( + op_id=nslcmop_id, + stage="", + error_message=error_description_nslcmop, + operation_state=nslcmop_operation_state, + other_update=db_nslcmop_update, + ) if nslcmop_operation_state: try: - await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id, - "operationState": nslcmop_operation_state}, - loop=self.loop) + await self.msg.aiowrite( + "ns", + "actioned", + { + "nsr_id": nsr_id, + "nslcmop_id": nslcmop_id, + "operationState": nslcmop_operation_state, + }, + loop=self.loop, + ) except Exception as e: - self.logger.error(logging_text + "kafka_write notification Exception {}".format(e)) + self.logger.error( + logging_text + "kafka_write notification Exception {}".format(e) + ) self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action") return nslcmop_operation_state, detailed_status async def scale(self, nsr_id, nslcmop_id): # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id) + task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id) if not task_is_locked_by_me: return logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id) - stage = ['', '', ''] + stage = ["", "", ""] tasks_dict_info = {} # ^ stage, step, VIM progress self.logger.debug(logging_text + "Enter") @@ -3806,12 +4923,18 @@ class NsLcm(LcmBase): try: # wait for any previous tasks in process step = "Waiting for previous operations to terminate" - await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) - self._write_ns_status(nsr_id=nsr_id, ns_state=None, - current_operation="SCALING", current_operation_id=nslcmop_id) + await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id) + self._write_ns_status( + nsr_id=nsr_id, + ns_state=None, + current_operation="SCALING", + current_operation_id=nslcmop_id, + ) step = "Getting nslcmop from database" - self.logger.debug(step + " after having waited for previous tasks to be completed") + self.logger.debug( + step + " after having waited for previous tasks to be completed" + ) db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) step = "Getting nsr from database" @@ -3832,8 +4955,12 @@ class NsLcm(LcmBase): # vdu_name = db_nslcmop["operationParams"].get("vdu_name") ####### - vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"] - scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"] + vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][ + "scaleByStepData" + ]["member-vnf-index"] + scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][ + "scaleByStepData" + ]["scaling-group-descriptor"] scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"] # for backward compatibility if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict): @@ -3842,7 +4969,9 @@ class NsLcm(LcmBase): self.update_db_2("nsrs", nsr_id, db_nsr_update) step = "Getting vnfr from database" - db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}) + db_vnfr = self.db.get_one( + "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id} + ) vca_id = self.get_vca_id(db_vnfr, db_nsr) @@ -3853,29 +4982,41 @@ class NsLcm(LcmBase): step = "Getting scaling-group-descriptor" scaling_descriptor = find_in_list( - get_scaling_aspect( - db_vnfd - ), - lambda scale_desc: scale_desc["name"] == scaling_group + get_scaling_aspect(db_vnfd), + lambda scale_desc: scale_desc["name"] == scaling_group, ) if not scaling_descriptor: - raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present " - "at vnfd:scaling-group-descriptor".format(scaling_group)) + raise LcmException( + "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present " + "at vnfd:scaling-group-descriptor".format(scaling_group) + ) step = "Sending scale order to VIM" # TODO check if ns is in a proper status nb_scale_op = 0 if not db_nsr["_admin"].get("scaling-group"): - self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]}) + self.update_db_2( + "nsrs", + nsr_id, + { + "_admin.scaling-group": [ + {"name": scaling_group, "nb-scale-op": 0} + ] + }, + ) admin_scale_index = 0 else: - for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]): + for admin_scale_index, admin_scale_info in enumerate( + db_nsr["_admin"]["scaling-group"] + ): if admin_scale_info["name"] == scaling_group: nb_scale_op = admin_scale_info.get("nb-scale-op", 0) break else: # not found, set index one plus last element and add new entry with the name admin_scale_index += 1 - db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group + db_nsr_update[ + "_admin.scaling-group.{}.name".format(admin_scale_index) + ] = scaling_group RO_scaling_info = [] VCA_scaling_info = [] vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []} @@ -3895,17 +5036,26 @@ class NsLcm(LcmBase): for vdu_delta in delta["vdu-delta"]: vdud = get_vdu(db_vnfd, vdu_delta["id"]) vdu_index = get_vdur_index(db_vnfr, vdu_delta) - cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd) + cloud_init_text = self._get_vdu_cloud_init_content( + vdud, db_vnfd + ) if cloud_init_text: - additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {} + additional_params = ( + self._get_vdu_additional_params(db_vnfr, vdud["id"]) + or {} + ) cloud_init_list = [] vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"]) max_instance_count = 10 if vdu_profile and "max-number-of-instances" in vdu_profile: - max_instance_count = vdu_profile.get("max-number-of-instances", 10) - - default_instance_num = get_number_of_instances(db_vnfd, vdud["id"]) + max_instance_count = vdu_profile.get( + "max-number-of-instances", 10 + ) + + default_instance_num = get_number_of_instances( + db_vnfd, vdud["id"] + ) nb_scale_op += vdu_delta.get("number-of-instances", 1) @@ -3913,22 +5063,22 @@ class NsLcm(LcmBase): raise LcmException( "reached the limit of {} (max-instance-count) " "scaling-out operations for the " - "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group) + "scaling-group-descriptor '{}'".format( + nb_scale_op, scaling_group + ) ) for x in range(vdu_delta.get("number-of-instances", 1)): if cloud_init_text: # TODO Information of its own ip is not available because db_vnfr is not updated. additional_params["OSM"] = get_osm_params( - db_vnfr, - vdu_delta["id"], - vdu_index + x + db_vnfr, vdu_delta["id"], vdu_index + x ) cloud_init_list.append( self._parse_cloud_init( cloud_init_text, additional_params, db_vnfd["id"], - vdud["id"] + vdud["id"], ) ) VCA_scaling_info.append( @@ -3936,7 +5086,7 @@ class NsLcm(LcmBase): "osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, "type": "create", - "vdu_index": vdu_index + x + "vdu_index": vdu_index + x, } ) RO_scaling_info.append( @@ -3944,17 +5094,22 @@ class NsLcm(LcmBase): "osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, "type": "create", - "count": vdu_delta.get("number-of-instances", 1) + "count": vdu_delta.get("number-of-instances", 1), } ) if cloud_init_list: RO_scaling_info[-1]["cloud_init"] = cloud_init_list - vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1) + vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get( + "number-of-instances", 1 + ) elif scaling_type == "SCALE_IN": - if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None: + if ( + "min-instance-count" in scaling_descriptor + and scaling_descriptor["min-instance-count"] is not None + ): min_instance_count = int(scaling_descriptor["min-instance-count"]) - + vdu_scaling_info["scaling_direction"] = "IN" vdu_scaling_info["vdu-delete"] = {} deltas = scaling_descriptor.get("aspect-delta-details")["deltas"] @@ -3966,27 +5121,39 @@ class NsLcm(LcmBase): if vdu_profile and "min-number-of-instances" in vdu_profile: min_instance_count = vdu_profile["min-number-of-instances"] - default_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"]) + default_instance_num = get_number_of_instances( + db_vnfd, vdu_delta["id"] + ) nb_scale_op -= vdu_delta.get("number-of-instances", 1) if nb_scale_op + default_instance_num < min_instance_count: raise LcmException( "reached the limit of {} (min-instance-count) scaling-in operations for the " - "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group) + "scaling-group-descriptor '{}'".format( + nb_scale_op, scaling_group + ) ) - RO_scaling_info.append({"osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, - "type": "delete", "count": vdu_delta.get("number-of-instances", 1), - "vdu_index": vdu_index - 1}) + RO_scaling_info.append( + { + "osm_vdu_id": vdu_delta["id"], + "member-vnf-index": vnf_index, + "type": "delete", + "count": vdu_delta.get("number-of-instances", 1), + "vdu_index": vdu_index - 1, + } + ) for x in range(vdu_delta.get("number-of-instances", 1)): VCA_scaling_info.append( { "osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, "type": "delete", - "vdu_index": vdu_index - 1 - x + "vdu_index": vdu_index - 1 - x, } ) - vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1) + vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get( + "number-of-instances", 1 + ) # update VDU_SCALING_INFO with the VDUs to delete ip_addresses vdu_delete = copy(vdu_scaling_info.get("vdu-delete")) @@ -3994,40 +5161,57 @@ class NsLcm(LcmBase): for vdur in reversed(db_vnfr["vdur"]): if vdu_delete.get(vdur["vdu-id-ref"]): vdu_delete[vdur["vdu-id-ref"]] -= 1 - vdu_scaling_info["vdu"].append({ - "name": vdur.get("name") or vdur.get("vdu-name"), - "vdu_id": vdur["vdu-id-ref"], - "interface": [] - }) + vdu_scaling_info["vdu"].append( + { + "name": vdur.get("name") or vdur.get("vdu-name"), + "vdu_id": vdur["vdu-id-ref"], + "interface": [], + } + ) for interface in vdur["interfaces"]: - vdu_scaling_info["vdu"][-1]["interface"].append({ - "name": interface["name"], - "ip_address": interface["ip-address"], - "mac_address": interface.get("mac-address"), - }) + vdu_scaling_info["vdu"][-1]["interface"].append( + { + "name": interface["name"], + "ip_address": interface["ip-address"], + "mac_address": interface.get("mac-address"), + } + ) # vdu_delete = vdu_scaling_info.pop("vdu-delete") # PRE-SCALE BEGIN step = "Executing pre-scale vnf-config-primitive" if scaling_descriptor.get("scaling-config-action"): - for scaling_config_action in scaling_descriptor["scaling-config-action"]: - if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \ - or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"): - vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"] - step = db_nslcmop_update["detailed-status"] = \ - "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive) + for scaling_config_action in scaling_descriptor[ + "scaling-config-action" + ]: + if ( + scaling_config_action.get("trigger") == "pre-scale-in" + and scaling_type == "SCALE_IN" + ) or ( + scaling_config_action.get("trigger") == "pre-scale-out" + and scaling_type == "SCALE_OUT" + ): + vnf_config_primitive = scaling_config_action[ + "vnf-config-primitive-name-ref" + ] + step = db_nslcmop_update[ + "detailed-status" + ] = "executing pre-scale scaling-config-action '{}'".format( + vnf_config_primitive + ) # look for primitive - for config_primitive in (get_configuration( - db_vnfd, db_vnfd["id"] - ) or {}).get("config-primitive", ()): + for config_primitive in ( + get_configuration(db_vnfd, db_vnfd["id"]) or {} + ).get("config-primitive", ()): if config_primitive["name"] == vnf_config_primitive: break else: raise LcmException( "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action" "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-" - "primitive".format(scaling_group, vnf_config_primitive)) + "primitive".format(scaling_group, vnf_config_primitive) + ) vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info} if db_vnfr.get("additionalParamsForVnf"): @@ -4035,52 +5219,87 @@ class NsLcm(LcmBase): scale_process = "VCA" db_nsr_update["config-status"] = "configuring pre-scaling" - primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params) + primitive_params = self._map_primitive_params( + config_primitive, {}, vnfr_params + ) # Pre-scale retry check: Check if this sub-operation has been executed before op_index = self._check_or_add_scale_suboperation( - db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE') + db_nslcmop, + nslcmop_id, + vnf_index, + vnf_config_primitive, + primitive_params, + "PRE-SCALE", + ) if op_index == self.SUBOPERATION_STATUS_SKIP: # Skip sub-operation - result = 'COMPLETED' - result_detail = 'Done' - self.logger.debug(logging_text + - "vnf_config_primitive={} Skipped sub-operation, result {} {}".format( - vnf_config_primitive, result, result_detail)) + result = "COMPLETED" + result_detail = "Done" + self.logger.debug( + logging_text + + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format( + vnf_config_primitive, result, result_detail + ) + ) else: if op_index == self.SUBOPERATION_STATUS_NEW: # New sub-operation: Get index of this sub-operation - op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1 - self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation". - format(vnf_config_primitive)) + op_index = ( + len(db_nslcmop.get("_admin", {}).get("operations")) + - 1 + ) + self.logger.debug( + logging_text + + "vnf_config_primitive={} New sub-operation".format( + vnf_config_primitive + ) + ) else: # retry: Get registered params for this existing sub-operation - op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index] - vnf_index = op.get('member_vnf_index') - vnf_config_primitive = op.get('primitive') - primitive_params = op.get('primitive_params') - self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry". - format(vnf_config_primitive)) + op = db_nslcmop.get("_admin", {}).get("operations", [])[ + op_index + ] + vnf_index = op.get("member_vnf_index") + vnf_config_primitive = op.get("primitive") + primitive_params = op.get("primitive_params") + self.logger.debug( + logging_text + + "vnf_config_primitive={} Sub-operation retry".format( + vnf_config_primitive + ) + ) # Execute the primitive, either with new (first-time) or registered (reintent) args - ee_descriptor_id = config_primitive.get("execution-environment-ref") - primitive_name = config_primitive.get("execution-environment-primitive", - vnf_config_primitive) - ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], - member_vnf_index=vnf_index, - vdu_id=None, - vdu_count_index=None, - ee_descriptor_id=ee_descriptor_id) + ee_descriptor_id = config_primitive.get( + "execution-environment-ref" + ) + primitive_name = config_primitive.get( + "execution-environment-primitive", vnf_config_primitive + ) + ee_id, vca_type = self._look_for_deployed_vca( + nsr_deployed["VCA"], + member_vnf_index=vnf_index, + vdu_id=None, + vdu_count_index=None, + ee_descriptor_id=ee_descriptor_id, + ) result, result_detail = await self._ns_execute_primitive( - ee_id, primitive_name, + ee_id, + primitive_name, primitive_params, vca_type=vca_type, vca_id=vca_id, ) - self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format( - vnf_config_primitive, result, result_detail)) + self.logger.debug( + logging_text + + "vnf_config_primitive={} Done with result {} {}".format( + vnf_config_primitive, result, result_detail + ) + ) # Update operationState = COMPLETED | FAILED self._update_suboperation_status( - db_nslcmop, op_index, result, result_detail) + db_nslcmop, op_index, result, result_detail + ) if result == "FAILED": raise LcmException(result_detail) @@ -4088,41 +5307,60 @@ class NsLcm(LcmBase): scale_process = None # PRE-SCALE END - db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op - db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time() + db_nsr_update[ + "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index) + ] = nb_scale_op + db_nsr_update[ + "_admin.scaling-group.{}.time".format(admin_scale_index) + ] = time() # SCALE-IN VCA - BEGIN if VCA_scaling_info: - step = db_nslcmop_update["detailed-status"] = \ - "Deleting the execution environments" + step = db_nslcmop_update[ + "detailed-status" + ] = "Deleting the execution environments" scale_process = "VCA" for vdu_info in VCA_scaling_info: if vdu_info["type"] == "delete": member_vnf_index = str(vdu_info["member-vnf-index"]) - self.logger.debug(logging_text + "vdu info: {}".format(vdu_info)) + self.logger.debug( + logging_text + "vdu info: {}".format(vdu_info) + ) vdu_id = vdu_info["osm_vdu_id"] vdu_index = int(vdu_info["vdu_index"]) - stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( - member_vnf_index, vdu_id, vdu_index) - stage[2] = step = "Scaling in VCA" - self._write_op_status( - op_id=nslcmop_id, - stage=stage + stage[ + 1 + ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index ) + stage[2] = step = "Scaling in VCA" + self._write_op_status(op_id=nslcmop_id, stage=stage) vca_update = db_nsr["_admin"]["deployed"]["VCA"] config_update = db_nsr["configurationStatus"] for vca_index, vca in enumerate(vca_update): - if (vca or vca.get("ee_id")) and vca["member-vnf-index"] == member_vnf_index and \ - vca["vdu_count_index"] == vdu_index: + if ( + (vca or vca.get("ee_id")) + and vca["member-vnf-index"] == member_vnf_index + and vca["vdu_count_index"] == vdu_index + ): if vca.get("vdu_id"): - config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id")) + config_descriptor = get_configuration( + db_vnfd, vca.get("vdu_id") + ) elif vca.get("kdu_name"): - config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name")) + config_descriptor = get_configuration( + db_vnfd, vca.get("kdu_name") + ) else: - config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) - operation_params = db_nslcmop.get("operationParams") or {} - exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and - vca.get("needed_terminate")) + config_descriptor = get_configuration( + db_vnfd, db_vnfd["id"] + ) + operation_params = ( + db_nslcmop.get("operationParams") or {} + ) + exec_terminate_primitives = not operation_params.get( + "skip_terminate_primitives" + ) and vca.get("needed_terminate") task = asyncio.ensure_future( asyncio.wait_for( self.destroy_N2VC( @@ -4136,29 +5374,42 @@ class NsLcm(LcmBase): scaling_in=True, vca_id=vca_id, ), - timeout=self.timeout_charm_delete + timeout=self.timeout_charm_delete, ) ) - tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id")) + tasks_dict_info[task] = "Terminating VCA {}".format( + vca.get("ee_id") + ) del vca_update[vca_index] del config_update[vca_index] # wait for pending tasks of terminate primitives if tasks_dict_info: - self.logger.debug(logging_text + - 'Waiting for tasks {}'.format(list(tasks_dict_info.keys()))) - error_list = await self._wait_for_tasks(logging_text, tasks_dict_info, - min(self.timeout_charm_delete, - self.timeout_ns_terminate), - stage, nslcmop_id) + self.logger.debug( + logging_text + + "Waiting for tasks {}".format( + list(tasks_dict_info.keys()) + ) + ) + error_list = await self._wait_for_tasks( + logging_text, + tasks_dict_info, + min( + self.timeout_charm_delete, self.timeout_ns_terminate + ), + stage, + nslcmop_id, + ) tasks_dict_info.clear() if error_list: raise LcmException("; ".join(error_list)) db_vca_and_config_update = { "_admin.deployed.VCA": vca_update, - "configurationStatus": config_update + "configurationStatus": config_update, } - self.update_db_2("nsrs", db_nsr["_id"], db_vca_and_config_update) + self.update_db_2( + "nsrs", db_nsr["_id"], db_vca_and_config_update + ) scale_process = None # SCALE-IN VCA - END @@ -4166,7 +5417,14 @@ class NsLcm(LcmBase): if RO_scaling_info: scale_process = "RO" if self.ro_config.get("ng"): - await self._scale_ng_ro(logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage) + await self._scale_ng_ro( + logging_text, + db_nsr, + db_nslcmop, + db_vnfr, + vdu_scaling_info, + stage, + ) vdu_scaling_info.pop("vdu-create", None) vdu_scaling_info.pop("vdu-delete", None) @@ -4177,25 +5435,33 @@ class NsLcm(LcmBase): # SCALE-UP VCA - BEGIN if VCA_scaling_info: - step = db_nslcmop_update["detailed-status"] = \ - "Creating new execution environments" + step = db_nslcmop_update[ + "detailed-status" + ] = "Creating new execution environments" scale_process = "VCA" for vdu_info in VCA_scaling_info: if vdu_info["type"] == "create": member_vnf_index = str(vdu_info["member-vnf-index"]) - self.logger.debug(logging_text + "vdu info: {}".format(vdu_info)) + self.logger.debug( + logging_text + "vdu info: {}".format(vdu_info) + ) vnfd_id = db_vnfr["vnfd-ref"] vdu_index = int(vdu_info["vdu_index"]) deploy_params = {"OSM": get_osm_params(db_vnfr)} if db_vnfr.get("additionalParamsForVnf"): - deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())) + deploy_params.update( + parse_yaml_strings( + db_vnfr["additionalParamsForVnf"].copy() + ) + ) descriptor_config = get_configuration(db_vnfd, db_vnfd["id"]) if descriptor_config: vdu_id = None vdu_name = None kdu_name = None self._deploy_n2vc( - logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), + logging_text=logging_text + + "member_vnf_index={} ".format(member_vnf_index), db_nsr=db_nsr, db_vnfr=db_vnfr, nslcmop_id=nslcmop_id, @@ -4211,29 +5477,37 @@ class NsLcm(LcmBase): descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_info=tasks_dict_info, - stage=stage + stage=stage, ) vdu_id = vdu_info["osm_vdu_id"] - vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id) + vdur = find_in_list( + db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id + ) descriptor_config = get_configuration(db_vnfd, vdu_id) if vdur.get("additionalParams"): - deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"]) + deploy_params_vdu = parse_yaml_strings( + vdur["additionalParams"] + ) else: deploy_params_vdu = deploy_params - deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=vdu_index) + deploy_params_vdu["OSM"] = get_osm_params( + db_vnfr, vdu_id, vdu_count_index=vdu_index + ) if descriptor_config: vdu_name = None kdu_name = None - stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( - member_vnf_index, vdu_id, vdu_index) - stage[2] = step = "Scaling out VCA" - self._write_op_status( - op_id=nslcmop_id, - stage=stage + stage[ + 1 + ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index ) + stage[2] = step = "Scaling out VCA" + self._write_op_status(op_id=nslcmop_id, stage=stage) self._deploy_n2vc( - logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( - member_vnf_index, vdu_id, vdu_index), + logging_text=logging_text + + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index + ), db_nsr=db_nsr, db_vnfr=db_vnfr, nslcmop_id=nslcmop_id, @@ -4249,7 +5523,7 @@ class NsLcm(LcmBase): descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_info=tasks_dict_info, - stage=stage + stage=stage, ) # SCALE-UP VCA - END scale_process = None @@ -4258,12 +5532,24 @@ class NsLcm(LcmBase): # execute primitive service POST-SCALING step = "Executing post-scale vnf-config-primitive" if scaling_descriptor.get("scaling-config-action"): - for scaling_config_action in scaling_descriptor["scaling-config-action"]: - if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \ - or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"): - vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"] - step = db_nslcmop_update["detailed-status"] = \ - "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive) + for scaling_config_action in scaling_descriptor[ + "scaling-config-action" + ]: + if ( + scaling_config_action.get("trigger") == "post-scale-in" + and scaling_type == "SCALE_IN" + ) or ( + scaling_config_action.get("trigger") == "post-scale-out" + and scaling_type == "SCALE_OUT" + ): + vnf_config_primitive = scaling_config_action[ + "vnf-config-primitive-name-ref" + ] + step = db_nslcmop_update[ + "detailed-status" + ] = "executing post-scale scaling-config-action '{}'".format( + vnf_config_primitive + ) vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info} if db_vnfr.get("additionalParamsForVnf"): @@ -4279,44 +5565,76 @@ class NsLcm(LcmBase): raise LcmException( "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-" "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:" - "config-primitive".format(scaling_group, vnf_config_primitive)) + "config-primitive".format( + scaling_group, vnf_config_primitive + ) + ) scale_process = "VCA" db_nsr_update["config-status"] = "configuring post-scaling" - primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params) + primitive_params = self._map_primitive_params( + config_primitive, {}, vnfr_params + ) # Post-scale retry check: Check if this sub-operation has been executed before op_index = self._check_or_add_scale_suboperation( - db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE') + db_nslcmop, + nslcmop_id, + vnf_index, + vnf_config_primitive, + primitive_params, + "POST-SCALE", + ) if op_index == self.SUBOPERATION_STATUS_SKIP: # Skip sub-operation - result = 'COMPLETED' - result_detail = 'Done' - self.logger.debug(logging_text + - "vnf_config_primitive={} Skipped sub-operation, result {} {}". - format(vnf_config_primitive, result, result_detail)) + result = "COMPLETED" + result_detail = "Done" + self.logger.debug( + logging_text + + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format( + vnf_config_primitive, result, result_detail + ) + ) else: if op_index == self.SUBOPERATION_STATUS_NEW: # New sub-operation: Get index of this sub-operation - op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1 - self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation". - format(vnf_config_primitive)) + op_index = ( + len(db_nslcmop.get("_admin", {}).get("operations")) + - 1 + ) + self.logger.debug( + logging_text + + "vnf_config_primitive={} New sub-operation".format( + vnf_config_primitive + ) + ) else: # retry: Get registered params for this existing sub-operation - op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index] - vnf_index = op.get('member_vnf_index') - vnf_config_primitive = op.get('primitive') - primitive_params = op.get('primitive_params') - self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry". - format(vnf_config_primitive)) + op = db_nslcmop.get("_admin", {}).get("operations", [])[ + op_index + ] + vnf_index = op.get("member_vnf_index") + vnf_config_primitive = op.get("primitive") + primitive_params = op.get("primitive_params") + self.logger.debug( + logging_text + + "vnf_config_primitive={} Sub-operation retry".format( + vnf_config_primitive + ) + ) # Execute the primitive, either with new (first-time) or registered (reintent) args - ee_descriptor_id = config_primitive.get("execution-environment-ref") - primitive_name = config_primitive.get("execution-environment-primitive", - vnf_config_primitive) - ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], - member_vnf_index=vnf_index, - vdu_id=None, - vdu_count_index=None, - ee_descriptor_id=ee_descriptor_id) + ee_descriptor_id = config_primitive.get( + "execution-environment-ref" + ) + primitive_name = config_primitive.get( + "execution-environment-primitive", vnf_config_primitive + ) + ee_id, vca_type = self._look_for_deployed_vca( + nsr_deployed["VCA"], + member_vnf_index=vnf_index, + vdu_id=None, + vdu_count_index=None, + ee_descriptor_id=ee_descriptor_id, + ) result, result_detail = await self._ns_execute_primitive( ee_id, primitive_name, @@ -4324,11 +5642,16 @@ class NsLcm(LcmBase): vca_type=vca_type, vca_id=vca_id, ) - self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format( - vnf_config_primitive, result, result_detail)) + self.logger.debug( + logging_text + + "vnf_config_primitive={} Done with result {} {}".format( + vnf_config_primitive, result, result_detail + ) + ) # Update operationState = COMPLETED | FAILED self._update_suboperation_status( - db_nslcmop, op_index, result, result_detail) + db_nslcmop, op_index, result, result_detail + ) if result == "FAILED": raise LcmException(result_detail) @@ -4336,29 +5659,57 @@ class NsLcm(LcmBase): scale_process = None # POST-SCALE END - db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type) - db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \ + db_nsr_update[ + "detailed-status" + ] = "" # "scaled {} {}".format(scaling_group, scaling_type) + db_nsr_update["operational-status"] = ( + "running" + if old_operational_status == "failed" else old_operational_status + ) db_nsr_update["config-status"] = old_config_status return - except (ROclient.ROClientException, DbException, LcmException, NgRoException) as e: + except ( + ROclient.ROClientException, + DbException, + LcmException, + NgRoException, + ) as e: self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except asyncio.CancelledError: - self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step)) + self.logger.error( + logging_text + "Cancelled Exception while '{}'".format(step) + ) exc = "Operation was cancelled" except Exception as e: exc = traceback.format_exc() - self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {} {}".format(type(e).__name__, e), + exc_info=True, + ) finally: - self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", current_operation_id=None) + self._write_ns_status( + nsr_id=nsr_id, + ns_state=None, + current_operation="IDLE", + current_operation_id=None, + ) if tasks_dict_info: stage[1] = "Waiting for instantiate pending tasks." self.logger.debug(logging_text + stage[1]) - exc = await self._wait_for_tasks(logging_text, tasks_dict_info, self.timeout_ns_deploy, - stage, nslcmop_id, nsr_id=nsr_id) + exc = await self._wait_for_tasks( + logging_text, + tasks_dict_info, + self.timeout_ns_deploy, + stage, + nslcmop_id, + nsr_id=nsr_id, + ) if exc: - db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc) + db_nslcmop_update[ + "detailed-status" + ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc) nslcmop_operation_state = "FAILED" if db_nsr: db_nsr_update["operational-status"] = old_operational_status @@ -4369,29 +5720,50 @@ class NsLcm(LcmBase): db_nsr_update["config-status"] = "failed" if "RO" in scale_process: db_nsr_update["operational-status"] = "failed" - db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step, - exc) + db_nsr_update[ + "detailed-status" + ] = "FAILED scaling nslcmop={} {}: {}".format( + nslcmop_id, step, exc + ) else: error_description_nslcmop = None nslcmop_operation_state = "COMPLETED" db_nslcmop_update["detailed-status"] = "Done" - self._write_op_status(op_id=nslcmop_id, stage="", error_message=error_description_nslcmop, - operation_state=nslcmop_operation_state, other_update=db_nslcmop_update) + self._write_op_status( + op_id=nslcmop_id, + stage="", + error_message=error_description_nslcmop, + operation_state=nslcmop_operation_state, + other_update=db_nslcmop_update, + ) if db_nsr: - self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", - current_operation_id=None, other_update=db_nsr_update) + self._write_ns_status( + nsr_id=nsr_id, + ns_state=None, + current_operation="IDLE", + current_operation_id=None, + other_update=db_nsr_update, + ) if nslcmop_operation_state: try: - msg = {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id, "operationState": nslcmop_operation_state} + msg = { + "nsr_id": nsr_id, + "nslcmop_id": nslcmop_id, + "operationState": nslcmop_operation_state, + } await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop) except Exception as e: - self.logger.error(logging_text + "kafka_write notification Exception {}".format(e)) + self.logger.error( + logging_text + "kafka_write notification Exception {}".format(e) + ) self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale") - async def _scale_ng_ro(self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage): + async def _scale_ng_ro( + self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage + ): nsr_id = db_nslcmop["nsInstanceId"] db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) db_vnfrs = {} @@ -4410,29 +5782,54 @@ class NsLcm(LcmBase): db_vnfds.append(vnfd) n2vc_key = self.n2vc.get_public_key() n2vc_key_list = [n2vc_key] - self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"), - mark_delete=True) + self.scale_vnfr( + db_vnfr, + vdu_scaling_info.get("vdu-create"), + vdu_scaling_info.get("vdu-delete"), + mark_delete=True, + ) # db_vnfr has been updated, update db_vnfrs to use it db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr - await self._instantiate_ng_ro(logging_text, nsr_id, db_nsd, db_nsr, db_nslcmop, db_vnfrs, - db_vnfds, n2vc_key_list, stage=stage, start_deploy=time(), - timeout_ns_deploy=self.timeout_ns_deploy) + await self._instantiate_ng_ro( + logging_text, + nsr_id, + db_nsd, + db_nsr, + db_nslcmop, + db_vnfrs, + db_vnfds, + n2vc_key_list, + stage=stage, + start_deploy=time(), + timeout_ns_deploy=self.timeout_ns_deploy, + ) if vdu_scaling_info.get("vdu-delete"): - self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False) + self.scale_vnfr( + db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False + ) - async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip): + async def add_prometheus_metrics( + self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip + ): if not self.prometheus: return # look if exist a file called 'prometheus*.j2' and artifact_content = self.fs.dir_ls(artifact_path) - job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None) + job_file = next( + ( + f + for f in artifact_content + if f.startswith("prometheus") and f.endswith(".j2") + ), + None, + ) if not job_file: return with self.fs.file_open((artifact_path, job_file), "r") as f: job_data = f.read() # TODO get_service - _, _, service = ee_id.partition(".") # remove prefix "namespace." + _, _, service = ee_id.partition(".") # remove prefix "namespace." host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"]) host_port = "80" vnfr_id = vnfr_id.replace("-", "") @@ -4445,7 +5842,10 @@ class NsLcm(LcmBase): job_list = self.prometheus.parse_job(job_data, variables) # ensure job_name is using the vnfr_id. Adding the metadata nsr_id for job in job_list: - if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]: + if ( + not isinstance(job.get("job_name"), str) + or vnfr_id not in job["job_name"] + ): job["job_name"] = vnfr_id + "_" + str(randint(1, 10000)) job["nsr_id"] = nsr_id job_dict = {jl["job_name"]: jl for jl in job_list} diff --git a/osm_lcm/prometheus.py b/osm_lcm/prometheus.py index 5afa496..6517d0b 100644 --- a/osm_lcm/prometheus.py +++ b/osm_lcm/prometheus.py @@ -36,14 +36,17 @@ initial_prometheus_data = { "locked_by": None, "modified": 1593445184, # 2020-06-29 "created": 1593445184, - "version": "1.0" # to allow future version updates + "version": "1.0", # to allow future version updates }, - 'scrape_configs': { # Dictionary at database. Converted to list before sending to prometheus - 'mon_exporter': {'static_configs': [{'targets': ['mon:8000']}], 'job_name': 'mon_exporter'}, + "scrape_configs": { # Dictionary at database. Converted to list before sending to prometheus + "mon_exporter": { + "static_configs": [{"targets": ["mon:8000"]}], + "job_name": "mon_exporter", + }, }, - 'global': {'evaluation_interval': '15s', 'scrape_interval': '15s'}, - 'rule_files': None, - 'alerting': {'alertmanagers': [{'static_configs': [{'targets': None}]}]} + "global": {"evaluation_interval": "15s", "scrape_interval": "15s"}, + "rule_files": None, + "alerting": {"alertmanagers": [{"static_configs": [{"targets": None}]}]}, } @@ -74,15 +77,20 @@ class Prometheus: return yaml.safe_load(job_parsed) except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e: # TODO yaml exceptions - raise LcmException("Error parsing Jinja2 to prometheus job. job_data={}, variables={}. Error={}".format( - job_data, variables, e)) + raise LcmException( + "Error parsing Jinja2 to prometheus job. job_data={}, variables={}. Error={}".format( + job_data, variables, e + ) + ) async def start(self): for retry in range(4): try: # self.logger("Starting prometheus ") # read from database - prometheus_data = self.db.get_one("admin", {"_id": "prometheus"}, fail_on_empty=False) + prometheus_data = self.db.get_one( + "admin", {"_id": "prometheus"}, fail_on_empty=False + ) if not prometheus_data: self.logger.info("Init db.admin.prometheus content") self.db.create("admin", initial_prometheus_data) @@ -92,7 +100,11 @@ class Prometheus: return except DbException as e: if retry == 3: - raise LcmException("Max retries trying to init prometheus configuration: {}".format(e)) + raise LcmException( + "Max retries trying to init prometheus configuration: {}".format( + e + ) + ) await asyncio.sleep(5, loop=self.loop) async def update(self, add_jobs: dict = None, remove_jobs: list = None) -> bool: @@ -110,33 +122,50 @@ class Prometheus: # lock database now = time() if not self.db.set_one( - "admin", - q_filter={"_id": "prometheus", "_admin.locked_at.lt": now - self.PROMETHEUS_LOCKED_TIME}, - update_dict={"_admin.locked_at": now, "_admin.locked_by": self.worker_id}, - fail_on_empty=False): + "admin", + q_filter={ + "_id": "prometheus", + "_admin.locked_at.lt": now - self.PROMETHEUS_LOCKED_TIME, + }, + update_dict={ + "_admin.locked_at": now, + "_admin.locked_by": self.worker_id, + }, + fail_on_empty=False, + ): continue # read database prometheus_data = self.db.get_one("admin", {"_id": "prometheus"}) - update_dict = {"_admin.locked_at": 0, - "_admin.locked_by": None} + update_dict = {"_admin.locked_at": 0, "_admin.locked_by": None} # Make changes from prometheus_incremental push_dict = pull_dict = None if add_jobs or remove_jobs: log_text_list = [] if add_jobs: - log_text_list.append("adding jobs: {}".format(list(add_jobs.keys()))) + log_text_list.append( + "adding jobs: {}".format(list(add_jobs.keys())) + ) prometheus_data["scrape_configs"].update(add_jobs) - push_dict = {"scrape_configs." + job_id: job_data for job_id, job_data in add_jobs.items()} + push_dict = { + "scrape_configs." + job_id: job_data + for job_id, job_data in add_jobs.items() + } elif remove_jobs: log_text_list.append("removing jobs: {}".format(list(remove_jobs))) for job_id in remove_jobs: prometheus_data["scrape_configs"].pop(job_id, None) - pull_dict = {"scrape_configs." + job_id: None for job_id in remove_jobs} + pull_dict = { + "scrape_configs." + job_id: None for job_id in remove_jobs + } self.logger.debug("Updating. " + ". ".join(log_text_list)) if not await self.send_data(prometheus_data): - self.logger.error("Cannot update add_jobs: {}. remove_jobs: {}".format(add_jobs, remove_jobs)) + self.logger.error( + "Cannot update add_jobs: {}. remove_jobs: {}".format( + add_jobs, remove_jobs + ) + ) push_dict = pull_dict = None result = False @@ -146,8 +175,16 @@ class Prometheus: if push_dict or pull_dict: update_dict["_admin.modified_at"] = now if not self.db.set_one( - "admin", {"_id": "prometheus", "_admin.locked_at": now, "_admin.locked_by": self.worker_id}, - update_dict=update_dict, unset=pull_dict, fail_on_empty=False): + "admin", + { + "_id": "prometheus", + "_admin.locked_at": now, + "_admin.locked_by": self.worker_id, + }, + update_dict=update_dict, + unset=pull_dict, + fail_on_empty=False, + ): continue return result raise LcmException("Cannot update prometheus database. Reached max retries") @@ -192,7 +229,9 @@ class Prometheus: restore_backup = False return True except Exception as e: - self.logger.error("Error updating configuration url={}: {}".format(self.server, e)) + self.logger.error( + "Error updating configuration url={}: {}".format(self.server, e) + ) return False finally: if restore_backup: @@ -205,17 +244,25 @@ class Prometheus: try: # self.logger.debug("Comparing current_config='{}' with expected_config='{}'".format(current_config, # expected_config)) - current_config_yaml = yaml.safe_load(current_config['data']['yaml']) - current_jobs = [j["job_name"] for j in current_config_yaml["scrape_configs"]] + current_config_yaml = yaml.safe_load(current_config["data"]["yaml"]) + current_jobs = [ + j["job_name"] for j in current_config_yaml["scrape_configs"] + ] expected_jobs = [j["job_name"] for j in expected_config["scrape_configs"]] if current_jobs == expected_jobs: return True else: - self.logger.error("Not all jobs have been loaded. Target jobs: {} Loaded jobs: {}".format( - expected_jobs, current_jobs)) + self.logger.error( + "Not all jobs have been loaded. Target jobs: {} Loaded jobs: {}".format( + expected_jobs, current_jobs + ) + ) return False except Exception as e: - self.logger.error("Invalid obtained status from server. Error: '{}'. Obtained data: '{}'".format( - e, current_config)) + self.logger.error( + "Invalid obtained status from server. Error: '{}'. Obtained data: '{}'".format( + e, current_config + ) + ) # if format is not understood, cannot be compared, assume it is ok return True diff --git a/osm_lcm/tests/test_db_descriptors.py b/osm_lcm/tests/test_db_descriptors.py index 9decd4e..a42449b 100644 --- a/osm_lcm/tests/test_db_descriptors.py +++ b/osm_lcm/tests/test_db_descriptors.py @@ -1791,5 +1791,5 @@ test_ids = { "ns": "0bcb701c-ee4d-41ab-8ee6-f4156f7f114d", "instantiate": "cf3aa178-7640-4174-b921-2330e6f2aad6", "terminate": None, - } + }, } diff --git a/osm_lcm/tests/test_lcm_helm_conn.py b/osm_lcm/tests/test_lcm_helm_conn.py index 47838b3..89ca891 100644 --- a/osm_lcm/tests/test_lcm_helm_conn.py +++ b/osm_lcm/tests/test_lcm_helm_conn.py @@ -34,20 +34,13 @@ class TestLcmHelmConn(asynctest.TestCase): async def setUp(self): Database.instance = None - self.db = Mock(Database({ - "database": { - "driver": "memory" - } - }).instance.db) + self.db = Mock(Database({"database": {"driver": "memory"}}).instance.db) Database().instance.db = self.db Filesystem.instance = None - self.fs = asynctest.Mock(Filesystem({ - "storage": { - "driver": "local", - "path": "/" - } - }).instance.fs) + self.fs = asynctest.Mock( + Filesystem({"storage": {"driver": "local", "path": "/"}}).instance.fs + ) Filesystem.instance.fs = self.fs self.fs.path = "/" @@ -55,11 +48,15 @@ class TestLcmHelmConn(asynctest.TestCase): vca_config = { "helmpath": "/usr/local/bin/helm", "helm3path": "/usr/local/bin/helm3", - "kubectlpath": "/usr/bin/kubectl" + "kubectlpath": "/usr/bin/kubectl", } lcm_helm_conn.K8sHelmConnector = asynctest.Mock(lcm_helm_conn.K8sHelmConnector) - lcm_helm_conn.K8sHelm3Connector = asynctest.Mock(lcm_helm_conn.K8sHelm3Connector) - self.helm_conn = LCMHelmConn(loop=self.loop, vca_config=vca_config, log=self.logger) + lcm_helm_conn.K8sHelm3Connector = asynctest.Mock( + lcm_helm_conn.K8sHelm3Connector + ) + self.helm_conn = LCMHelmConn( + loop=self.loop, vca_config=vca_config, log=self.logger + ) @asynctest.fail_on(active_handles=True) async def test_create_execution_environment(self): @@ -67,24 +64,36 @@ class TestLcmHelmConn(asynctest.TestCase): db_dict = {} artifact_path = "helm_sample_charm" helm_chart_id = "helm_sample_charm_0001" - self.helm_conn._k8sclusterhelm3.install = asynctest.CoroutineMock(return_value=None) + self.helm_conn._k8sclusterhelm3.install = asynctest.CoroutineMock( + return_value=None + ) self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name = Mock() - self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name.return_value = helm_chart_id + self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name.return_value = ( + helm_chart_id + ) self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name = Mock() - self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name.return_value = helm_chart_id + self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name.return_value = ( + helm_chart_id + ) self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}} - ee_id, _ = await self.helm_conn.create_execution_environment(namespace, - db_dict, - artifact_path=artifact_path, - vca_type="helm-v3") - self.assertEqual(ee_id, "{}:{}.{}".format("helm-v3", "osm", helm_chart_id), - "Check ee_id format: :.") - self.helm_conn._k8sclusterhelm3.install.assert_called_once_with("myk8s_id", - kdu_model="/helm_sample_charm", - kdu_instance=helm_chart_id, - namespace="osm", db_dict=db_dict, - params=None, timeout=None) + ee_id, _ = await self.helm_conn.create_execution_environment( + namespace, db_dict, artifact_path=artifact_path, vca_type="helm-v3" + ) + self.assertEqual( + ee_id, + "{}:{}.{}".format("helm-v3", "osm", helm_chart_id), + "Check ee_id format: :.", + ) + self.helm_conn._k8sclusterhelm3.install.assert_called_once_with( + "myk8s_id", + kdu_model="/helm_sample_charm", + kdu_instance=helm_chart_id, + namespace="osm", + db_dict=db_dict, + params=None, + timeout=None, + ) @asynctest.fail_on(active_handles=True) async def test_get_ee_ssh_public__key(self): @@ -94,7 +103,9 @@ class TestLcmHelmConn(asynctest.TestCase): mock_pub_key = "ssh-rsapubkey" self.db.get_one.return_value = {"_admin": {"helm-chart": {"id": "myk8s_id"}}} self.helm_conn._get_ssh_key = asynctest.CoroutineMock(return_value=mock_pub_key) - pub_key = await self.helm_conn.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict) + pub_key = await self.helm_conn.get_ee_ssh_public__key( + ee_id=ee_id, db_dict=db_dict + ) self.assertEqual(pub_key, mock_pub_key) @asynctest.fail_on(active_handles=True) @@ -104,7 +115,9 @@ class TestLcmHelmConn(asynctest.TestCase): primitive_name = "sleep" params = {} self.db.get_one.return_value = {"_admin": {"helm-chart": {"id": "myk8s_id"}}} - self.helm_conn._execute_primitive_internal = asynctest.CoroutineMock(return_value=("OK", "test-ok")) + self.helm_conn._execute_primitive_internal = asynctest.CoroutineMock( + return_value=("OK", "test-ok") + ) message = await self.helm_conn.exec_primitive(ee_id, primitive_name, params) self.assertEqual(message, "test-ok") @@ -116,7 +129,9 @@ class TestLcmHelmConn(asynctest.TestCase): primitive_name = "config" params = {"ssh-host-name": "host1"} self.db.get_one.return_value = {"_admin": {"helm-chart": {"id": "myk8s_id"}}} - self.helm_conn._execute_primitive_internal = asynctest.CoroutineMock(return_value=("OK", "CONFIG OK")) + self.helm_conn._execute_primitive_internal = asynctest.CoroutineMock( + return_value=("OK", "CONFIG OK") + ) message = await self.helm_conn.exec_primitive(ee_id, primitive_name, params) self.assertEqual(message, "CONFIG OK") @@ -124,10 +139,14 @@ class TestLcmHelmConn(asynctest.TestCase): async def test_delete_execution_environment(self): ee_id = "helm-v3:osm.helm_sample_charm_0001" self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}} - self.helm_conn._k8sclusterhelm3.uninstall = asynctest.CoroutineMock(return_value="") + self.helm_conn._k8sclusterhelm3.uninstall = asynctest.CoroutineMock( + return_value="" + ) await self.helm_conn.delete_execution_environment(ee_id) - self.helm_conn._k8sclusterhelm3.uninstall.assert_called_once_with("myk8s_id", "helm_sample_charm_0001") + self.helm_conn._k8sclusterhelm3.uninstall.assert_called_once_with( + "myk8s_id", "helm_sample_charm_0001" + ) -if __name__ == '__main__': +if __name__ == "__main__": asynctest.main() diff --git a/osm_lcm/tests/test_ns.py b/osm_lcm/tests/test_ns.py index 28b8fbe..192f8e0 100644 --- a/osm_lcm/tests/test_ns.py +++ b/osm_lcm/tests/test_ns.py @@ -16,7 +16,7 @@ ## -import asynctest # pip3 install asynctest --user +import asynctest # pip3 install asynctest --user import asyncio import yaml import copy @@ -50,36 +50,46 @@ It allows, if some testing ENV are supplied, testing without mocking some extern """ lcm_config = { - "global": { - "loglevel": "DEBUG" - }, + "global": {"loglevel": "DEBUG"}, "timeout": {}, - "VCA": { # TODO replace with os.get_env to get other configurations + "VCA": { # TODO replace with os.get_env to get other configurations "host": getenv("OSMLCM_VCA_HOST", "vca"), "port": getenv("OSMLCM_VCA_PORT", 17070), "user": getenv("OSMLCM_VCA_USER", "admin"), "secret": getenv("OSMLCM_VCA_SECRET", "vca"), "public_key": getenv("OSMLCM_VCA_PUBKEY", None), - 'ca_cert': getenv("OSMLCM_VCA_CACERT", None), - 'apiproxy': getenv("OSMLCM_VCA_APIPROXY", "192.168.1.1"), + "ca_cert": getenv("OSMLCM_VCA_CACERT", None), + "apiproxy": getenv("OSMLCM_VCA_APIPROXY", "192.168.1.1"), }, "ro_config": { - "uri": "http://{}:{}/openmano".format(getenv("OSMLCM_RO_HOST", "ro"), - getenv("OSMLCM_RO_PORT", "9090")), + "uri": "http://{}:{}/openmano".format( + getenv("OSMLCM_RO_HOST", "ro"), getenv("OSMLCM_RO_PORT", "9090") + ), "tenant": getenv("OSMLCM_RO_TENANT", "osm"), "logger_name": "lcm.ROclient", "loglevel": "DEBUG", - "ng": True - } + "ng": True, + }, } class TestMyNS(asynctest.TestCase): - - async def _n2vc_DeployCharms(self, model_name, application_name, vnfd, charm_path, params={}, machine_spec={}, - callback=None, *callback_args): + async def _n2vc_DeployCharms( + self, + model_name, + application_name, + vnfd, + charm_path, + params={}, + machine_spec={}, + callback=None, + *callback_args + ): if callback: - for status, message in (("maintenance", "installing sofwware"), ("active", "Ready!")): + for status, message in ( + ("maintenance", "installing sofwware"), + ("active", "Ready!"), + ): # call callback after some time asyncio.sleep(5, loop=self.loop) callback(model_name, application_name, status, message, *callback_args) @@ -91,7 +101,9 @@ class TestMyNS(asynctest.TestCase): yield "app_name-{}".format(num_calls) num_calls += 1 - def _n2vc_CreateExecutionEnvironment(self, namespace, reuse_ee_id, db_dict, *args, **kwargs): + def _n2vc_CreateExecutionEnvironment( + self, namespace, reuse_ee_id, db_dict, *args, **kwargs + ): k_list = namespace.split(".") ee_id = k_list[1] + "." if len(k_list) >= 2: @@ -105,7 +117,9 @@ class TestMyNS(asynctest.TestCase): print("Args > {}".format(args)) print("kwargs > {}".format(kwargs)) if kwargs.get("delete"): - ro_ns_desc = yaml.load(descriptors.ro_delete_action_text, Loader=yaml.Loader) + ro_ns_desc = yaml.load( + descriptors.ro_delete_action_text, Loader=yaml.Loader + ) while True: yield ro_ns_desc @@ -138,11 +152,7 @@ class TestMyNS(asynctest.TestCase): break def _ro_deploy(self, *args, **kwargs): - return { - 'action_id': args[1]["action_id"], - 'nsr_id': args[0], - 'status': 'ok' - } + return {"action_id": args[1]["action_id"], "nsr_id": args[0], "status": "ok"} def _return_uuid(self, *args, **kwargs): return str(uuid4()) @@ -154,32 +164,45 @@ class TestMyNS(asynctest.TestCase): # Cleanup singleton Database instance Database.instance = None - self.db = Database({ - "database": { - "driver": "memory" - } - }).instance.db - self.db.create_list("vnfds", yaml.load(descriptors.db_vnfds_text, Loader=yaml.Loader)) - self.db.create_list("nsds", yaml.load(descriptors.db_nsds_text, Loader=yaml.Loader)) - self.db.create_list("nsrs", yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader)) - self.db.create_list("vim_accounts", yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader)) - self.db.create_list("k8sclusters", yaml.load(descriptors.db_k8sclusters_text, Loader=yaml.Loader)) - self.db.create_list("nslcmops", yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader)) - self.db.create_list("vnfrs", yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader)) - self.db_vim_accounts = yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader) + self.db = Database({"database": {"driver": "memory"}}).instance.db + self.db.create_list( + "vnfds", yaml.load(descriptors.db_vnfds_text, Loader=yaml.Loader) + ) + self.db.create_list( + "nsds", yaml.load(descriptors.db_nsds_text, Loader=yaml.Loader) + ) + self.db.create_list( + "nsrs", yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader) + ) + self.db.create_list( + "vim_accounts", + yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader), + ) + self.db.create_list( + "k8sclusters", + yaml.load(descriptors.db_k8sclusters_text, Loader=yaml.Loader), + ) + self.db.create_list( + "nslcmops", yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader) + ) + self.db.create_list( + "vnfrs", yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader) + ) + self.db_vim_accounts = yaml.load( + descriptors.db_vim_accounts_text, Loader=yaml.Loader + ) # Mock kafka self.msg = asynctest.Mock(MsgKafka()) # Mock filesystem if not getenv("OSMLCMTEST_FS_NOMOCK"): - self.fs = asynctest.Mock(Filesystem({ - "storage": { - "driver": "local", - "path": "/" - } - }).instance.fs) - self.fs.get_params.return_value = {"path": getenv("OSMLCMTEST_PACKAGES_PATH", "./test/temp/packages")} + self.fs = asynctest.Mock( + Filesystem({"storage": {"driver": "local", "path": "/"}}).instance.fs + ) + self.fs.get_params.return_value = { + "path": getenv("OSMLCMTEST_PACKAGES_PATH", "./test/temp/packages") + } self.fs.file_open = asynctest.mock_open() # self.fs.file_open.return_value.__enter__.return_value = asynctest.MagicMock() # called on a python "with" # self.fs.file_open.return_value.__enter__.return_value.read.return_value = "" # empty file @@ -214,30 +237,57 @@ class TestMyNS(asynctest.TestCase): if not getenv("OSMLCMTEST_VCA_NOMOCK"): pub_key = getenv("OSMLCMTEST_NS_PUBKEY", "ssh-rsa test-pub-key t@osm.com") # self.my_ns.n2vc = asynctest.Mock(N2VC()) - self.my_ns.n2vc.GetPublicKey.return_value = getenv("OSMLCM_VCA_PUBKEY", "public_key") + self.my_ns.n2vc.GetPublicKey.return_value = getenv( + "OSMLCM_VCA_PUBKEY", "public_key" + ) # allow several versions of n2vc - self.my_ns.n2vc.FormatApplicationName = asynctest.Mock(side_effect=self._n2vc_FormatApplicationName()) - self.my_ns.n2vc.DeployCharms = asynctest.CoroutineMock(side_effect=self._n2vc_DeployCharms) + self.my_ns.n2vc.FormatApplicationName = asynctest.Mock( + side_effect=self._n2vc_FormatApplicationName() + ) + self.my_ns.n2vc.DeployCharms = asynctest.CoroutineMock( + side_effect=self._n2vc_DeployCharms + ) self.my_ns.n2vc.create_execution_environment = asynctest.CoroutineMock( - side_effect=self._n2vc_CreateExecutionEnvironment) - self.my_ns.n2vc.install_configuration_sw = asynctest.CoroutineMock(return_value=pub_key) - self.my_ns.n2vc.get_ee_ssh_public__key = asynctest.CoroutineMock(return_value=pub_key) - self.my_ns.n2vc.exec_primitive = asynctest.CoroutineMock(side_effect=self._return_uuid) - self.my_ns.n2vc.exec_primitive = asynctest.CoroutineMock(side_effect=self._return_uuid) - self.my_ns.n2vc.GetPrimitiveStatus = asynctest.CoroutineMock(return_value="completed") - self.my_ns.n2vc.GetPrimitiveOutput = asynctest.CoroutineMock(return_value={"result": "ok", - "pubkey": pub_key}) - self.my_ns.n2vc.delete_execution_environment = asynctest.CoroutineMock(return_value=None) + side_effect=self._n2vc_CreateExecutionEnvironment + ) + self.my_ns.n2vc.install_configuration_sw = asynctest.CoroutineMock( + return_value=pub_key + ) + self.my_ns.n2vc.get_ee_ssh_public__key = asynctest.CoroutineMock( + return_value=pub_key + ) + self.my_ns.n2vc.exec_primitive = asynctest.CoroutineMock( + side_effect=self._return_uuid + ) + self.my_ns.n2vc.exec_primitive = asynctest.CoroutineMock( + side_effect=self._return_uuid + ) + self.my_ns.n2vc.GetPrimitiveStatus = asynctest.CoroutineMock( + return_value="completed" + ) + self.my_ns.n2vc.GetPrimitiveOutput = asynctest.CoroutineMock( + return_value={"result": "ok", "pubkey": pub_key} + ) + self.my_ns.n2vc.delete_execution_environment = asynctest.CoroutineMock( + return_value=None + ) self.my_ns.n2vc.get_public_key = asynctest.CoroutineMock( - return_value=getenv("OSMLCM_VCA_PUBKEY", "public_key")) - self.my_ns.n2vc.delete_namespace = asynctest.CoroutineMock(return_value=None) + return_value=getenv("OSMLCM_VCA_PUBKEY", "public_key") + ) + self.my_ns.n2vc.delete_namespace = asynctest.CoroutineMock( + return_value=None + ) # Mock RO if not getenv("OSMLCMTEST_RO_NOMOCK"): - self.my_ns.RO = asynctest.Mock(NgRoClient(self.loop, **lcm_config["ro_config"])) + self.my_ns.RO = asynctest.Mock( + NgRoClient(self.loop, **lcm_config["ro_config"]) + ) # TODO first time should be empty list, following should return a dict # self.my_ns.RO.get_list = asynctest.CoroutineMock(self.my_ns.RO.get_list, return_value=[]) - self.my_ns.RO.deploy = asynctest.CoroutineMock(self.my_ns.RO.deploy, side_effect=self._ro_deploy) + self.my_ns.RO.deploy = asynctest.CoroutineMock( + self.my_ns.RO.deploy, side_effect=self._ro_deploy + ) # self.my_ns.RO.status = asynctest.CoroutineMock(self.my_ns.RO.status, side_effect=self._ro_status) # self.my_ns.RO.create_action = asynctest.CoroutineMock(self.my_ns.RO.create_action, # return_value={"vm-id": {"vim_result": 200, @@ -326,7 +376,7 @@ class TestMyNS(asynctest.TestCase): # # this will check that the initial-congig-primitive 'not_to_be_called' is not called # Test scale() and related methods - @asynctest.fail_on(active_handles=True) # all async tasks must be completed + @asynctest.fail_on(active_handles=True) # all async tasks must be completed async def test_scale(self): # print("Test scale started") @@ -337,8 +387,10 @@ class TestMyNS(asynctest.TestCase): nsr_id = descriptors.test_ids["TEST-A"]["ns"] nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] await self.my_ns.scale(nsr_id, nslcmop_id) - expected_value = 'FAILED' - return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("operationState") + expected_value = "FAILED" + return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get( + "operationState" + ) self.assertEqual(return_value, expected_value) # print("scale_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status"))) @@ -352,12 +404,24 @@ class TestMyNS(asynctest.TestCase): for i, _ in enumerate(vnf_descriptors): for j, value in enumerate(vnf_descriptors[i]["df"]): if "lcm-operations-configuration" in vnf_descriptors[i]["df"][j]: - if "day1-2" in value["lcm-operations-configuration"]["operate-vnf-op-config"]: - for k, v in enumerate(value["lcm-operations-configuration"]["operate-vnf-op-config"]["day1-2"]): + if ( + "day1-2" + in value["lcm-operations-configuration"][ + "operate-vnf-op-config" + ] + ): + for k, v in enumerate( + value["lcm-operations-configuration"][ + "operate-vnf-op-config" + ]["day1-2"] + ): if "juju" in v["execution-environment-list"][k]: - expected_value = self.db.get_list("nsrs")[i]["vcaStatus"] - await self.my_ns._on_update_n2vc_db("nsrs", {"_id": nsr_id}, - "_admin.deployed.VCA.0", {}) + expected_value = self.db.get_list("nsrs")[i][ + "vcaStatus" + ] + await self.my_ns._on_update_n2vc_db( + "nsrs", {"_id": nsr_id}, "_admin.deployed.VCA.0", {} + ) return_value = self.db.get_list("nsrs")[i]["vcaStatus"] self.assertEqual(return_value, expected_value) @@ -368,15 +432,15 @@ class TestMyNS(asynctest.TestCase): def test_scale_retry_or_skip_suboperation(self): # Load an alternative 'nslcmops' YAML for this test nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] - db_nslcmop = self.db.get_one('nslcmops', {"_id": nslcmop_id}) + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) op_index = 2 # Test when 'operationState' is 'COMPLETED' - db_nslcmop['_admin']['operations'][op_index]['operationState'] = 'COMPLETED' + db_nslcmop["_admin"]["operations"][op_index]["operationState"] = "COMPLETED" return_value = self.my_ns._retry_or_skip_suboperation(db_nslcmop, op_index) expected_value = self.my_ns.SUBOPERATION_STATUS_SKIP self.assertEqual(return_value, expected_value) # Test when 'operationState' is not 'COMPLETED' - db_nslcmop['_admin']['operations'][op_index]['operationState'] = None + db_nslcmop["_admin"]["operations"][op_index]["operationState"] = None return_value = self.my_ns._retry_or_skip_suboperation(db_nslcmop, op_index) expected_value = op_index self.assertEqual(return_value, expected_value) @@ -386,24 +450,26 @@ class TestMyNS(asynctest.TestCase): def test_scale_find_suboperation(self): # Load an alternative 'nslcmops' YAML for this test nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] - db_nslcmop = self.db.get_one('nslcmops', {"_id": nslcmop_id}) + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) # Find this sub-operation op_index = 2 - vnf_index = db_nslcmop['_admin']['operations'][op_index]['member_vnf_index'] - primitive = db_nslcmop['_admin']['operations'][op_index]['primitive'] - primitive_params = db_nslcmop['_admin']['operations'][op_index]['primitive_params'] + vnf_index = db_nslcmop["_admin"]["operations"][op_index]["member_vnf_index"] + primitive = db_nslcmop["_admin"]["operations"][op_index]["primitive"] + primitive_params = db_nslcmop["_admin"]["operations"][op_index][ + "primitive_params" + ] match = { - 'member_vnf_index': vnf_index, - 'primitive': primitive, - 'primitive_params': primitive_params, + "member_vnf_index": vnf_index, + "primitive": primitive, + "primitive_params": primitive_params, } found_op_index = self.my_ns._find_suboperation(db_nslcmop, match) self.assertEqual(found_op_index, op_index) # Test with not-matching params match = { - 'member_vnf_index': vnf_index, - 'primitive': '', - 'primitive_params': primitive_params, + "member_vnf_index": vnf_index, + "primitive": "", + "primitive_params": primitive_params, } found_op_index = self.my_ns._find_suboperation(db_nslcmop, match) self.assertEqual(found_op_index, self.my_ns.SUBOPERATION_STATUS_NOT_FOUND) @@ -416,65 +482,112 @@ class TestMyNS(asynctest.TestCase): def test_scale_update_suboperation_status(self): self.db.set_one = asynctest.Mock() nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] - db_nslcmop = self.db.get_one('nslcmops', {"_id": nslcmop_id}) + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) op_index = 0 # Force the initial values to be distinct from the updated ones q_filter = {"_id": db_nslcmop["_id"]} # Test to change 'operationState' and 'detailed-status' - operationState = 'COMPLETED' - detailed_status = 'Done' - expected_update_dict = {'_admin.operations.0.operationState': operationState, - '_admin.operations.0.detailed-status': detailed_status, - } - self.my_ns._update_suboperation_status(db_nslcmop, op_index, operationState, detailed_status) - self.db.set_one.assert_called_once_with("nslcmops", q_filter=q_filter, update_dict=expected_update_dict, - fail_on_empty=False) + operationState = "COMPLETED" + detailed_status = "Done" + expected_update_dict = { + "_admin.operations.0.operationState": operationState, + "_admin.operations.0.detailed-status": detailed_status, + } + self.my_ns._update_suboperation_status( + db_nslcmop, op_index, operationState, detailed_status + ) + self.db.set_one.assert_called_once_with( + "nslcmops", + q_filter=q_filter, + update_dict=expected_update_dict, + fail_on_empty=False, + ) def test_scale_add_suboperation(self): nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] - db_nslcmop = self.db.get_one('nslcmops', {"_id": nslcmop_id}) - vnf_index = '1' - num_ops_before = len(db_nslcmop.get('_admin', {}).get('operations', [])) - 1 + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + vnf_index = "1" + num_ops_before = len(db_nslcmop.get("_admin", {}).get("operations", [])) - 1 vdu_id = None vdu_count_index = None vdu_name = None - primitive = 'touch' - mapped_primitive_params = {'parameter': - [{'data-type': 'STRING', - 'name': 'filename', - 'default-value': ''}], - 'name': 'touch'} - operationState = 'PROCESSING' - detailed_status = 'In progress' - operationType = 'PRE-SCALE' + primitive = "touch" + mapped_primitive_params = { + "parameter": [ + { + "data-type": "STRING", + "name": "filename", + "default-value": "", + } + ], + "name": "touch", + } + operationState = "PROCESSING" + detailed_status = "In progress" + operationType = "PRE-SCALE" # Add a 'pre-scale' suboperation - op_index_after = self.my_ns._add_suboperation(db_nslcmop, vnf_index, vdu_id, vdu_count_index, - vdu_name, primitive, mapped_primitive_params, - operationState, detailed_status, operationType) + op_index_after = self.my_ns._add_suboperation( + db_nslcmop, + vnf_index, + vdu_id, + vdu_count_index, + vdu_name, + primitive, + mapped_primitive_params, + operationState, + detailed_status, + operationType, + ) self.assertEqual(op_index_after, num_ops_before + 1) # Delete all suboperations and add the same operation again - del db_nslcmop['_admin']['operations'] - op_index_zero = self.my_ns._add_suboperation(db_nslcmop, vnf_index, vdu_id, vdu_count_index, - vdu_name, primitive, mapped_primitive_params, - operationState, detailed_status, operationType) + del db_nslcmop["_admin"]["operations"] + op_index_zero = self.my_ns._add_suboperation( + db_nslcmop, + vnf_index, + vdu_id, + vdu_count_index, + vdu_name, + primitive, + mapped_primitive_params, + operationState, + detailed_status, + operationType, + ) self.assertEqual(op_index_zero, 0) # Add a 'RO' suboperation - RO_nsr_id = '1234567890' - RO_scaling_info = [{'type': 'create', 'count': 1, 'member-vnf-index': '1', 'osm_vdu_id': 'dataVM'}] - op_index = self.my_ns._add_suboperation(db_nslcmop, vnf_index, vdu_id, vdu_count_index, - vdu_name, primitive, mapped_primitive_params, - operationState, detailed_status, operationType, - RO_nsr_id, RO_scaling_info) - db_RO_nsr_id = db_nslcmop['_admin']['operations'][op_index]['RO_nsr_id'] + RO_nsr_id = "1234567890" + RO_scaling_info = [ + { + "type": "create", + "count": 1, + "member-vnf-index": "1", + "osm_vdu_id": "dataVM", + } + ] + op_index = self.my_ns._add_suboperation( + db_nslcmop, + vnf_index, + vdu_id, + vdu_count_index, + vdu_name, + primitive, + mapped_primitive_params, + operationState, + detailed_status, + operationType, + RO_nsr_id, + RO_scaling_info, + ) + db_RO_nsr_id = db_nslcmop["_admin"]["operations"][op_index]["RO_nsr_id"] self.assertEqual(op_index, 1) self.assertEqual(RO_nsr_id, db_RO_nsr_id) # Try to add an invalid suboperation, should return SUBOPERATION_STATUS_NOT_FOUND - op_index_invalid = self.my_ns._add_suboperation(None, None, None, None, None, - None, None, None, - None, None, None) + op_index_invalid = self.my_ns._add_suboperation( + None, None, None, None, None, None, None, None, None, None, None + ) self.assertEqual(op_index_invalid, self.my_ns.SUBOPERATION_STATUS_NOT_FOUND) # Test _check_or_add_scale_suboperation() and _check_or_add_scale_suboperation_RO() @@ -484,68 +597,92 @@ class TestMyNS(asynctest.TestCase): # - SUBOPERATION_STATUS_SKIP: This is an existing sub-operation, operationState == 'COMPLETED' def test_scale_check_or_add_scale_suboperation(self): nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] - db_nslcmop = self.db.get_one('nslcmops', {"_id": nslcmop_id}) - operationType = 'PRE-SCALE' - vnf_index = '1' - primitive = 'touch' - primitive_params = {'parameter': - [{'data-type': 'STRING', - 'name': 'filename', - 'default-value': ''}], - 'name': 'touch'} + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + operationType = "PRE-SCALE" + vnf_index = "1" + primitive = "touch" + primitive_params = { + "parameter": [ + { + "data-type": "STRING", + "name": "filename", + "default-value": "", + } + ], + "name": "touch", + } # Delete all sub-operations to be sure this is a new sub-operation - del db_nslcmop['_admin']['operations'] + del db_nslcmop["_admin"]["operations"] # Add a new sub-operation # For new sub-operations, operationState is set to 'PROCESSING' by default op_index_new = self.my_ns._check_or_add_scale_suboperation( - db_nslcmop, vnf_index, primitive, primitive_params, operationType) + db_nslcmop, vnf_index, primitive, primitive_params, operationType + ) self.assertEqual(op_index_new, self.my_ns.SUBOPERATION_STATUS_NEW) # Use the same parameters again to match the already added sub-operation # which has status 'PROCESSING' (!= 'COMPLETED') by default # The expected return value is a non-negative number op_index_existing = self.my_ns._check_or_add_scale_suboperation( - db_nslcmop, vnf_index, primitive, primitive_params, operationType) + db_nslcmop, vnf_index, primitive, primitive_params, operationType + ) self.assertTrue(op_index_existing >= 0) # Change operationState 'manually' for this sub-operation - db_nslcmop['_admin']['operations'][op_index_existing]['operationState'] = 'COMPLETED' + db_nslcmop["_admin"]["operations"][op_index_existing][ + "operationState" + ] = "COMPLETED" # Then use the same parameters again to match the already added sub-operation, # which now has status 'COMPLETED' # The expected return value is SUBOPERATION_STATUS_SKIP op_index_skip = self.my_ns._check_or_add_scale_suboperation( - db_nslcmop, vnf_index, primitive, primitive_params, operationType) + db_nslcmop, vnf_index, primitive, primitive_params, operationType + ) self.assertEqual(op_index_skip, self.my_ns.SUBOPERATION_STATUS_SKIP) # RO sub-operation test: # Repeat tests for the very similar _check_or_add_scale_suboperation_RO(), - RO_nsr_id = '1234567890' - RO_scaling_info = [{'type': 'create', 'count': 1, 'member-vnf-index': '1', 'osm_vdu_id': 'dataVM'}] + RO_nsr_id = "1234567890" + RO_scaling_info = [ + { + "type": "create", + "count": 1, + "member-vnf-index": "1", + "osm_vdu_id": "dataVM", + } + ] op_index_new_RO = self.my_ns._check_or_add_scale_suboperation( - db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info) + db_nslcmop, vnf_index, None, None, "SCALE-RO", RO_nsr_id, RO_scaling_info + ) self.assertEqual(op_index_new_RO, self.my_ns.SUBOPERATION_STATUS_NEW) # Use the same parameters again to match the already added RO sub-operation op_index_existing_RO = self.my_ns._check_or_add_scale_suboperation( - db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info) + db_nslcmop, vnf_index, None, None, "SCALE-RO", RO_nsr_id, RO_scaling_info + ) self.assertTrue(op_index_existing_RO >= 0) # Change operationState 'manually' for this RO sub-operation - db_nslcmop['_admin']['operations'][op_index_existing_RO]['operationState'] = 'COMPLETED' + db_nslcmop["_admin"]["operations"][op_index_existing_RO][ + "operationState" + ] = "COMPLETED" # Then use the same parameters again to match the already added sub-operation, # which now has status 'COMPLETED' # The expected return value is SUBOPERATION_STATUS_SKIP op_index_skip_RO = self.my_ns._check_or_add_scale_suboperation( - db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info) + db_nslcmop, vnf_index, None, None, "SCALE-RO", RO_nsr_id, RO_scaling_info + ) self.assertEqual(op_index_skip_RO, self.my_ns.SUBOPERATION_STATUS_SKIP) async def test_deploy_kdus(self): nsr_id = descriptors.test_ids["TEST-KDU"]["ns"] nslcmop_id = descriptors.test_ids["TEST-KDU"]["instantiate"] db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) - db_vnfr = self.db.get_one("vnfrs", {"nsr-id-ref": nsr_id, "member-vnf-index-ref": "multikdu"}) + db_vnfr = self.db.get_one( + "vnfrs", {"nsr-id-ref": nsr_id, "member-vnf-index-ref": "multikdu"} + ) db_vnfrs = {"multikdu": db_vnfr} db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) db_vnfds = [db_vnfd] @@ -554,26 +691,44 @@ class TestMyNS(asynctest.TestCase): self.my_ns.k8sclusterhelm3.generate_kdu_instance_name = asynctest.mock.Mock() self.my_ns.k8sclusterhelm3.generate_kdu_instance_name.return_value = "k8s_id" self.my_ns.k8sclusterhelm3.install = asynctest.CoroutineMock() - self.my_ns.k8sclusterhelm3.synchronize_repos = asynctest.CoroutineMock(return_value=("", "")) - self.my_ns.k8sclusterhelm3.get_services = asynctest.CoroutineMock(return_value=([])) - await self.my_ns.deploy_kdus(logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_register) + self.my_ns.k8sclusterhelm3.synchronize_repos = asynctest.CoroutineMock( + return_value=("", "") + ) + self.my_ns.k8sclusterhelm3.get_services = asynctest.CoroutineMock( + return_value=([]) + ) + await self.my_ns.deploy_kdus( + logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_register + ) await asyncio.wait(list(task_register.keys()), timeout=100) db_nsr = self.db.get_list("nsrs")[1] - self.assertIn("K8s", db_nsr["_admin"]["deployed"], "K8s entry not created at '_admin.deployed'") - self.assertIsInstance(db_nsr["_admin"]["deployed"]["K8s"], list, "K8s entry is not of type list") - self.assertEqual(len(db_nsr["_admin"]["deployed"]["K8s"]), 2, "K8s entry is not of type list") - k8s_instace_info = {"kdu-instance": "k8s_id", "k8scluster-uuid": "73d96432-d692-40d2-8440-e0c73aee209c", - "k8scluster-type": "helm-chart-v3", - "kdu-name": "ldap", - "member-vnf-index": "multikdu", - "namespace": None} + self.assertIn( + "K8s", + db_nsr["_admin"]["deployed"], + "K8s entry not created at '_admin.deployed'", + ) + self.assertIsInstance( + db_nsr["_admin"]["deployed"]["K8s"], list, "K8s entry is not of type list" + ) + self.assertEqual( + len(db_nsr["_admin"]["deployed"]["K8s"]), 2, "K8s entry is not of type list" + ) + k8s_instace_info = { + "kdu-instance": "k8s_id", + "k8scluster-uuid": "73d96432-d692-40d2-8440-e0c73aee209c", + "k8scluster-type": "helm-chart-v3", + "kdu-name": "ldap", + "member-vnf-index": "multikdu", + "namespace": None, + } nsr_result = copy.deepcopy(db_nsr["_admin"]["deployed"]["K8s"][0]) nsr_kdu_model_result = nsr_result.pop("kdu-model") expected_kdu_model = "stable/openldap:1.2.1" self.assertEqual(nsr_result, k8s_instace_info) self.assertTrue( - nsr_kdu_model_result in expected_kdu_model or expected_kdu_model in nsr_kdu_model_result + nsr_kdu_model_result in expected_kdu_model + or expected_kdu_model in nsr_kdu_model_result ) nsr_result = copy.deepcopy(db_nsr["_admin"]["deployed"]["K8s"][1]) nsr_kdu_model_result = nsr_result.pop("kdu-model") @@ -581,7 +736,8 @@ class TestMyNS(asynctest.TestCase): expected_kdu_model = "stable/mongodb" self.assertEqual(nsr_result, k8s_instace_info) self.assertTrue( - nsr_kdu_model_result in expected_kdu_model or expected_kdu_model in nsr_kdu_model_result + nsr_kdu_model_result in expected_kdu_model + or expected_kdu_model in nsr_kdu_model_result ) # async def test_instantiate_pdu(self): @@ -665,5 +821,5 @@ class TestMyNS(asynctest.TestCase): # self.assertEqual(db_nsr.get("errorDetail"), None, "errorDetail different than None") -if __name__ == '__main__': +if __name__ == "__main__": asynctest.main() diff --git a/osm_lcm/tests/test_prometheus.py b/osm_lcm/tests/test_prometheus.py index 72785df..2eb81f5 100644 --- a/osm_lcm/tests/test_prometheus.py +++ b/osm_lcm/tests/test_prometheus.py @@ -20,24 +20,18 @@ from osm_lcm.prometheus import Prometheus, initial_prometheus_data from asynctest.mock import Mock from osm_lcm.data_utils.database.database import Database -__author__ = 'Alfonso Tierno ' +__author__ = "Alfonso Tierno " class TestPrometheus(asynctest.TestCase): - async def setUp(self): - config = {'uri': 'http:prometheus:9090', - 'path': '/etc/prometheus'} + config = {"uri": "http:prometheus:9090", "path": "/etc/prometheus"} # Cleanup singleton Database instance Database.instance = None - self.db = Mock(Database({ - "database": { - "driver": "memory" - } - }).instance.db) + self.db = Mock(Database({"database": {"driver": "memory"}}).instance.db) Database().instance.db = self.db - self.p = Prometheus(config, worker_id='1', loop=self.loop) + self.p = Prometheus(config, worker_id="1", loop=self.loop) @asynctest.fail_on(active_handles=True) async def test_start(self): @@ -45,7 +39,7 @@ class TestPrometheus(asynctest.TestCase): self.db.get_one.return_value = False self.p.update = asynctest.CoroutineMock() await self.p.start() - self.db.create.assert_called_once_with('admin', initial_prometheus_data) + self.db.create.assert_called_once_with("admin", initial_prometheus_data) self.p.update.assert_called_once_with() # test with database not empty @@ -69,55 +63,80 @@ class TestPrometheus(asynctest.TestCase): if number_call_set_one == 1: return else: - return {'update': 1} + return {"update": 1} def _check_set_one_calls(set_one_calls): # check the three calls to database set_one - self.assertEqual(len(set_one_calls), 3, 'Not called three times to db.set_one, two blocks, one unblock') - self.assertIn('admin', set_one_calls[0][0], 'db.set_one collection should be admin') - first_used_time = set_one_calls[0][1]['update_dict']['_admin.locked_at'] - second_used_time = set_one_calls[1][1]['update_dict']['_admin.locked_at'] - third_used_time = set_one_calls[2][1]['update_dict']['_admin.locked_at'] - self.assertTrue(first_used_time != 0 and second_used_time != 0, 'blocking locked_at time must not be 0') - self.assertGreater(second_used_time, first_used_time, - 'Every blocking try must contain a new locked_at time') - self.assertEqual(third_used_time, 0, 'For unblocking must be set locked_at=0') + self.assertEqual( + len(set_one_calls), + 3, + "Not called three times to db.set_one, two blocks, one unblock", + ) + self.assertIn( + "admin", set_one_calls[0][0], "db.set_one collection should be admin" + ) + first_used_time = set_one_calls[0][1]["update_dict"]["_admin.locked_at"] + second_used_time = set_one_calls[1][1]["update_dict"]["_admin.locked_at"] + third_used_time = set_one_calls[2][1]["update_dict"]["_admin.locked_at"] + self.assertTrue( + first_used_time != 0 and second_used_time != 0, + "blocking locked_at time must not be 0", + ) + self.assertGreater( + second_used_time, + first_used_time, + "Every blocking try must contain a new locked_at time", + ) + self.assertEqual( + third_used_time, 0, "For unblocking must be set locked_at=0" + ) # check add_jobs number_call_set_one = 0 self.db.get_one.return_value = initial_prometheus_data self.db.set_one.side_effect = _db_set_one self.p.send_data = asynctest.CoroutineMock(return_value=True) - add_jobs = {'job1': {'job_name': 'job1', 'nsr_id': 'nsr_id'}} + add_jobs = {"job1": {"job_name": "job1", "nsr_id": "nsr_id"}} await self.p.update(add_jobs=add_jobs) set_one_calls = self.db.set_one.call_args_list _check_set_one_calls(set_one_calls) - update_dict = set_one_calls[2][1]['update_dict'] - unset_dict = set_one_calls[2][1]['unset'] + update_dict = set_one_calls[2][1]["update_dict"] + unset_dict = set_one_calls[2][1]["unset"] expected_final_set = { - '_admin.locked_at': 0, - '_admin.locked_by': None, - '_admin.modified_at': set_one_calls[1][1]['update_dict']['_admin.locked_at'], - 'scrape_configs.job1': add_jobs['job1']} - self.assertEqual(update_dict, expected_final_set, 'invalid set and unlock values') - self.assertEqual(unset_dict, None, 'invalid unset and unlock values') + "_admin.locked_at": 0, + "_admin.locked_by": None, + "_admin.modified_at": set_one_calls[1][1]["update_dict"][ + "_admin.locked_at" + ], + "scrape_configs.job1": add_jobs["job1"], + } + self.assertEqual( + update_dict, expected_final_set, "invalid set and unlock values" + ) + self.assertEqual(unset_dict, None, "invalid unset and unlock values") # check remove_jobs number_call_set_one = 0 - remove_jobs = ['job1'] + remove_jobs = ["job1"] self.db.set_one.reset_mock() await self.p.update(remove_jobs=remove_jobs) set_one_calls = self.db.set_one.call_args_list _check_set_one_calls(set_one_calls) - update_dict = set_one_calls[2][1]['update_dict'] - unset_dict = set_one_calls[2][1]['unset'] + update_dict = set_one_calls[2][1]["update_dict"] + unset_dict = set_one_calls[2][1]["unset"] expected_final_set = { - '_admin.locked_at': 0, - '_admin.locked_by': None, - '_admin.modified_at': set_one_calls[1][1]['update_dict']['_admin.locked_at'] + "_admin.locked_at": 0, + "_admin.locked_by": None, + "_admin.modified_at": set_one_calls[1][1]["update_dict"][ + "_admin.locked_at" + ], } - self.assertEqual(update_dict, expected_final_set, 'invalid set and unlock values') - self.assertEqual(unset_dict, {'scrape_configs.job1': None}, 'invalid unset and unlock values') + self.assertEqual( + update_dict, expected_final_set, "invalid set and unlock values" + ) + self.assertEqual( + unset_dict, {"scrape_configs.job1": None}, "invalid unset and unlock values" + ) def test_parse_job(self): text_to_parse = """ @@ -125,14 +144,11 @@ class TestPrometheus(asynctest.TestCase): key1: "parsing var1='{{ var1 }}'" key2: "parsing var2='{{ var2 }}'" """ - vars = {'var1': 'VAR1', 'var2': 'VAR2', 'var3': 'VAR3'} - expected = { - 'key1': "parsing var1='VAR1'", - 'key2': "parsing var2='VAR2'" - } + vars = {"var1": "VAR1", "var2": "VAR2", "var3": "VAR3"} + expected = {"key1": "parsing var1='VAR1'", "key2": "parsing var2='VAR2'"} result = self.p.parse_job(text_to_parse, vars) - self.assertEqual(result, expected, 'Error at jinja2 parse') + self.assertEqual(result, expected, "Error at jinja2 parse") -if __name__ == '__main__': +if __name__ == "__main__": asynctest.main() diff --git a/osm_lcm/vim_sdn.py b/osm_lcm/vim_sdn.py index a1623ba..1641e96 100644 --- a/osm_lcm/vim_sdn.py +++ b/osm_lcm/vim_sdn.py @@ -36,8 +36,15 @@ __author__ = "Alfonso Tierno" class VimLcm(LcmBase): # values that are encrypted at vim config because they are passwords - vim_config_encrypted = {"1.1": ("admin_password", "nsx_password", "vcenter_password"), - "default": ("admin_password", "nsx_password", "vcenter_password", "vrops_password")} + vim_config_encrypted = { + "1.1": ("admin_password", "nsx_password", "vcenter_password"), + "default": ( + "admin_password", + "nsx_password", + "vcenter_password", + "vrops_password", + ), + } def __init__(self, msg, lcm_tasks, config, loop): """ @@ -46,7 +53,7 @@ class VimLcm(LcmBase): :return: None """ - self.logger = logging.getLogger('lcm.vim') + self.logger = logging.getLogger("lcm.vim") self.loop = loop self.lcm_tasks = lcm_tasks self.ro_config = config["ro_config"] @@ -59,8 +66,8 @@ class VimLcm(LcmBase): # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, 'op_id' is None, and lock_HA() will do nothing. # Register 'create' task here for related future HA operations - op_id = vim_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('vim', 'create', op_id): + op_id = vim_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("vim", "create", op_id): return vim_id = vim_content["_id"] @@ -74,19 +81,32 @@ class VimLcm(LcmBase): try: step = "Getting vim-id='{}' from db".format(vim_id) db_vim = self.db.get_one("vim_accounts", {"_id": vim_id}) - if vim_content.get("config") and vim_content["config"].get("sdn-controller"): - step = "Getting sdn-controller-id='{}' from db".format(vim_content["config"]["sdn-controller"]) - db_sdn = self.db.get_one("sdns", {"_id": vim_content["config"]["sdn-controller"]}) + if vim_content.get("config") and vim_content["config"].get( + "sdn-controller" + ): + step = "Getting sdn-controller-id='{}' from db".format( + vim_content["config"]["sdn-controller"] + ) + db_sdn = self.db.get_one( + "sdns", {"_id": vim_content["config"]["sdn-controller"]} + ) # If the VIM account has an associated SDN account, also # wait for any previous tasks in process for the SDN - await self.lcm_tasks.waitfor_related_HA('sdn', 'ANY', db_sdn["_id"]) + await self.lcm_tasks.waitfor_related_HA("sdn", "ANY", db_sdn["_id"]) - if db_sdn.get("_admin") and db_sdn["_admin"].get("deployed") and db_sdn["_admin"]["deployed"].get("RO"): + if ( + db_sdn.get("_admin") + and db_sdn["_admin"].get("deployed") + and db_sdn["_admin"]["deployed"].get("RO") + ): RO_sdn_id = db_sdn["_admin"]["deployed"]["RO"] else: - raise LcmException("sdn-controller={} is not available. Not deployed at RO".format( - vim_content["config"]["sdn-controller"])) + raise LcmException( + "sdn-controller={} is not available. Not deployed at RO".format( + vim_content["config"]["sdn-controller"] + ) + ) step = "Creating vim at RO" db_vim_update["_admin.deployed.RO"] = None @@ -107,66 +127,87 @@ class VimLcm(LcmBase): desc = await RO.create("vim", descriptor=vim_RO) RO_vim_id = desc["uuid"] db_vim_update["_admin.deployed.RO"] = RO_vim_id - self.logger.debug(logging_text + "VIM created at RO_vim_id={}".format(RO_vim_id)) + self.logger.debug( + logging_text + "VIM created at RO_vim_id={}".format(RO_vim_id) + ) step = "Creating vim_account at RO" db_vim_update["_admin.detailed-status"] = step self.update_db_2("vim_accounts", vim_id, db_vim_update) if vim_content.get("vim_password"): - vim_content["vim_password"] = self.db.decrypt(vim_content["vim_password"], - schema_version=schema_version, - salt=vim_id) - vim_account_RO = {"vim_tenant_name": vim_content["vim_tenant_name"], - "vim_username": vim_content["vim_user"], - "vim_password": vim_content["vim_password"] - } + vim_content["vim_password"] = self.db.decrypt( + vim_content["vim_password"], + schema_version=schema_version, + salt=vim_id, + ) + vim_account_RO = { + "vim_tenant_name": vim_content["vim_tenant_name"], + "vim_username": vim_content["vim_user"], + "vim_password": vim_content["vim_password"], + } if vim_RO.get("config"): vim_account_RO["config"] = vim_RO["config"] if "sdn-controller" in vim_account_RO["config"]: del vim_account_RO["config"]["sdn-controller"] if "sdn-port-mapping" in vim_account_RO["config"]: del vim_account_RO["config"]["sdn-port-mapping"] - vim_config_encrypted_keys = self.vim_config_encrypted.get(schema_version) or \ - self.vim_config_encrypted.get("default") + vim_config_encrypted_keys = self.vim_config_encrypted.get( + schema_version + ) or self.vim_config_encrypted.get("default") for p in vim_config_encrypted_keys: if vim_account_RO["config"].get(p): - vim_account_RO["config"][p] = self.db.decrypt(vim_account_RO["config"][p], - schema_version=schema_version, - salt=vim_id) + vim_account_RO["config"][p] = self.db.decrypt( + vim_account_RO["config"][p], + schema_version=schema_version, + salt=vim_id, + ) desc = await RO.attach("vim_account", RO_vim_id, descriptor=vim_account_RO) db_vim_update["_admin.deployed.RO-account"] = desc["uuid"] db_vim_update["_admin.operationalState"] = "ENABLED" db_vim_update["_admin.detailed-status"] = "Done" # Mark the VIM 'create' HA task as successful - operation_state = 'COMPLETED' - operation_details = 'Done' + operation_state = "COMPLETED" + operation_details = "Done" - self.logger.debug(logging_text + "Exit Ok VIM account created at RO_vim_account_id={}".format(desc["uuid"])) + self.logger.debug( + logging_text + + "Exit Ok VIM account created at RO_vim_account_id={}".format( + desc["uuid"] + ) + ) return except (ROclient.ROClientException, DbException, asyncio.CancelledError) as e: self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_vim: db_vim_update["_admin.operationalState"] = "ERROR" - db_vim_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_vim_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the VIM 'create' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_vim_update: self.update_db_2("vim_accounts", vim_id, db_vim_update) # Register the VIM 'create' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('vim', 'create', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "vim", + "create", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) @@ -177,8 +218,8 @@ class VimLcm(LcmBase): # HA tasks and backward compatibility: # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, and the HA check always returns True - op_id = vim_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('vim', 'edit', op_id): + op_id = vim_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("vim", "edit", op_id): return vim_id = vim_content["_id"] @@ -193,25 +234,41 @@ class VimLcm(LcmBase): step = "Getting vim-id='{}' from db".format(vim_id) try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('vim', 'edit', op_id) + await self.lcm_tasks.waitfor_related_HA("vim", "edit", op_id) db_vim = self.db.get_one("vim_accounts", {"_id": vim_id}) - if db_vim.get("_admin") and db_vim["_admin"].get("deployed") and db_vim["_admin"]["deployed"].get("RO"): - if vim_content.get("config") and vim_content["config"].get("sdn-controller"): - step = "Getting sdn-controller-id='{}' from db".format(vim_content["config"]["sdn-controller"]) - db_sdn = self.db.get_one("sdns", {"_id": vim_content["config"]["sdn-controller"]}) + if ( + db_vim.get("_admin") + and db_vim["_admin"].get("deployed") + and db_vim["_admin"]["deployed"].get("RO") + ): + if vim_content.get("config") and vim_content["config"].get( + "sdn-controller" + ): + step = "Getting sdn-controller-id='{}' from db".format( + vim_content["config"]["sdn-controller"] + ) + db_sdn = self.db.get_one( + "sdns", {"_id": vim_content["config"]["sdn-controller"]} + ) # If the VIM account has an associated SDN account, also # wait for any previous tasks in process for the SDN - await self.lcm_tasks.waitfor_related_HA('sdn', 'ANY', db_sdn["_id"]) + await self.lcm_tasks.waitfor_related_HA("sdn", "ANY", db_sdn["_id"]) - if db_sdn.get("_admin") and db_sdn["_admin"].get("deployed") and db_sdn["_admin"]["deployed"].get( - "RO"): + if ( + db_sdn.get("_admin") + and db_sdn["_admin"].get("deployed") + and db_sdn["_admin"]["deployed"].get("RO") + ): RO_sdn_id = db_sdn["_admin"]["deployed"]["RO"] else: - raise LcmException("sdn-controller={} is not available. Not deployed at RO".format( - vim_content["config"]["sdn-controller"])) + raise LcmException( + "sdn-controller={} is not available. Not deployed at RO".format( + vim_content["config"]["sdn-controller"] + ) + ) RO_vim_id = db_vim["_admin"]["deployed"]["RO"] step = "Editing vim at RO" @@ -246,19 +303,24 @@ class VimLcm(LcmBase): if "vim_password" in vim_content: vim_account_RO["vim_password"] = vim_content["vim_password"] if vim_content.get("vim_password"): - vim_account_RO["vim_password"] = self.db.decrypt(vim_content["vim_password"], - schema_version=schema_version, - salt=vim_id) + vim_account_RO["vim_password"] = self.db.decrypt( + vim_content["vim_password"], + schema_version=schema_version, + salt=vim_id, + ) if "config" in vim_content: vim_account_RO["config"] = vim_content["config"] if vim_content.get("config"): - vim_config_encrypted_keys = self.vim_config_encrypted.get(schema_version) or \ - self.vim_config_encrypted.get("default") + vim_config_encrypted_keys = self.vim_config_encrypted.get( + schema_version + ) or self.vim_config_encrypted.get("default") for p in vim_config_encrypted_keys: if vim_content["config"].get(p): - vim_account_RO["config"][p] = self.db.decrypt(vim_content["config"][p], - schema_version=schema_version, - salt=vim_id) + vim_account_RO["config"][p] = self.db.decrypt( + vim_content["config"][p], + schema_version=schema_version, + salt=vim_id, + ) if "vim_user" in vim_content: vim_content["vim_username"] = vim_content["vim_user"] @@ -267,8 +329,8 @@ class VimLcm(LcmBase): await RO.edit("vim_account", RO_vim_id, descriptor=vim_account_RO) db_vim_update["_admin.operationalState"] = "ENABLED" # Mark the VIM 'edit' HA task as successful - operation_state = 'COMPLETED' - operation_details = 'Done' + operation_state = "COMPLETED" + operation_details = "Done" self.logger.debug(logging_text + "Exit Ok RO_vim_id={}".format(RO_vim_id)) return @@ -277,23 +339,31 @@ class VimLcm(LcmBase): self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_vim: db_vim_update["_admin.operationalState"] = "ERROR" - db_vim_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_vim_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the VIM 'edit' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_vim_update: self.update_db_2("vim_accounts", vim_id, db_vim_update) # Register the VIM 'edit' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('vim', 'edit', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "vim", + "edit", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) @@ -304,8 +374,8 @@ class VimLcm(LcmBase): # HA tasks and backward compatibility: # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, and the HA check always returns True - op_id = vim_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('vim', 'delete', op_id): + op_id = vim_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("vim", "delete", op_id): return vim_id = vim_content["_id"] @@ -318,10 +388,14 @@ class VimLcm(LcmBase): step = "Getting vim from db" try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('vim', 'delete', op_id) + await self.lcm_tasks.waitfor_related_HA("vim", "delete", op_id) if not self.ro_config.get("ng"): db_vim = self.db.get_one("vim_accounts", {"_id": vim_id}) - if db_vim.get("_admin") and db_vim["_admin"].get("deployed") and db_vim["_admin"]["deployed"].get("RO"): + if ( + db_vim.get("_admin") + and db_vim["_admin"].get("deployed") + and db_vim["_admin"]["deployed"].get("RO") + ): RO_vim_id = db_vim["_admin"]["deployed"]["RO"] RO = ROclient.ROClient(self.loop, **self.ro_config) step = "Detaching vim from RO tenant" @@ -329,7 +403,10 @@ class VimLcm(LcmBase): await RO.detach("vim_account", RO_vim_id) except ROclient.ROClientException as e: if e.http_code == 404: # not found - self.logger.debug(logging_text + "RO_vim_id={} already detached".format(RO_vim_id)) + self.logger.debug( + logging_text + + "RO_vim_id={} already detached".format(RO_vim_id) + ) else: raise @@ -338,7 +415,10 @@ class VimLcm(LcmBase): await RO.delete("vim", RO_vim_id) except ROclient.ROClientException as e: if e.http_code == 404: # not found - self.logger.debug(logging_text + "RO_vim_id={} already deleted".format(RO_vim_id)) + self.logger.debug( + logging_text + + "RO_vim_id={} already deleted".format(RO_vim_id) + ) else: raise else: @@ -353,19 +433,27 @@ class VimLcm(LcmBase): self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: self.lcm_tasks.remove("vim_account", vim_id, order_id) if exc and db_vim: db_vim_update["_admin.operationalState"] = "ERROR" - db_vim_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_vim_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the VIM 'delete' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) - self.lcm_tasks.unlock_HA('vim', 'delete', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "vim", + "delete", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) try: if db_vim and db_vim_update: self.update_db_2("vim_accounts", vim_id, db_vim_update) @@ -387,7 +475,7 @@ class WimLcm(LcmBase): :return: None """ - self.logger = logging.getLogger('lcm.vim') + self.logger = logging.getLogger("lcm.vim") self.loop = loop self.lcm_tasks = lcm_tasks self.ro_config = config["ro_config"] @@ -400,8 +488,8 @@ class WimLcm(LcmBase): # If 'wim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, 'op_id' is None, and lock_HA() will do nothing. # Register 'create' task here for related future HA operations - op_id = wim_content.pop('op_id', None) - self.lcm_tasks.lock_HA('wim', 'create', op_id) + op_id = wim_content.pop("op_id", None) + self.lcm_tasks.lock_HA("wim", "create", op_id) wim_id = wim_content["_id"] logging_text = "Task wim_create={} ".format(wim_id) @@ -431,62 +519,82 @@ class WimLcm(LcmBase): desc = await RO.create("wim", descriptor=wim_RO) RO_wim_id = desc["uuid"] db_wim_update["_admin.deployed.RO"] = RO_wim_id - self.logger.debug(logging_text + "WIM created at RO_wim_id={}".format(RO_wim_id)) + self.logger.debug( + logging_text + "WIM created at RO_wim_id={}".format(RO_wim_id) + ) step = "Creating wim_account at RO" db_wim_update["_admin.detailed-status"] = step self.update_db_2("wim_accounts", wim_id, db_wim_update) if wim_content.get("wim_password"): - wim_content["wim_password"] = self.db.decrypt(wim_content["wim_password"], - schema_version=schema_version, - salt=wim_id) - wim_account_RO = {"name": wim_content["name"], - "user": wim_content["user"], - "password": wim_content["password"] - } + wim_content["wim_password"] = self.db.decrypt( + wim_content["wim_password"], + schema_version=schema_version, + salt=wim_id, + ) + wim_account_RO = { + "name": wim_content["name"], + "user": wim_content["user"], + "password": wim_content["password"], + } if wim_RO.get("config"): wim_account_RO["config"] = wim_RO["config"] if "wim_port_mapping" in wim_account_RO["config"]: del wim_account_RO["config"]["wim_port_mapping"] for p in self.wim_config_encrypted: if wim_account_RO["config"].get(p): - wim_account_RO["config"][p] = self.db.decrypt(wim_account_RO["config"][p], - schema_version=schema_version, - salt=wim_id) + wim_account_RO["config"][p] = self.db.decrypt( + wim_account_RO["config"][p], + schema_version=schema_version, + salt=wim_id, + ) desc = await RO.attach("wim_account", RO_wim_id, descriptor=wim_account_RO) db_wim_update["_admin.deployed.RO-account"] = desc["uuid"] db_wim_update["_admin.operationalState"] = "ENABLED" db_wim_update["_admin.detailed-status"] = "Done" # Mark the WIM 'create' HA task as successful - operation_state = 'COMPLETED' - operation_details = 'Done' + operation_state = "COMPLETED" + operation_details = "Done" - self.logger.debug(logging_text + "Exit Ok WIM account created at RO_wim_account_id={}".format(desc["uuid"])) + self.logger.debug( + logging_text + + "Exit Ok WIM account created at RO_wim_account_id={}".format( + desc["uuid"] + ) + ) return except (ROclient.ROClientException, DbException, asyncio.CancelledError) as e: self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_wim: db_wim_update["_admin.operationalState"] = "ERROR" - db_wim_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_wim_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the WIM 'create' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_wim_update: self.update_db_2("wim_accounts", wim_id, db_wim_update) # Register the WIM 'create' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('wim', 'create', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "wim", + "create", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("wim_account", wim_id, order_id) @@ -496,8 +604,8 @@ class WimLcm(LcmBase): # HA tasks and backward compatibility: # If 'wim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, and the HA check always returns True - op_id = wim_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('wim', 'edit', op_id): + op_id = wim_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("wim", "edit", op_id): return wim_id = wim_content["_id"] @@ -511,11 +619,15 @@ class WimLcm(LcmBase): step = "Getting wim-id='{}' from db".format(wim_id) try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('wim', 'edit', op_id) + await self.lcm_tasks.waitfor_related_HA("wim", "edit", op_id) db_wim = self.db.get_one("wim_accounts", {"_id": wim_id}) - if db_wim.get("_admin") and db_wim["_admin"].get("deployed") and db_wim["_admin"]["deployed"].get("RO"): + if ( + db_wim.get("_admin") + and db_wim["_admin"].get("deployed") + and db_wim["_admin"]["deployed"].get("RO") + ): RO_wim_id = db_wim["_admin"]["deployed"]["RO"] step = "Editing wim at RO" @@ -546,17 +658,21 @@ class WimLcm(LcmBase): if "wim_password" in wim_content: wim_account_RO["wim_password"] = wim_content["wim_password"] if wim_content.get("wim_password"): - wim_account_RO["wim_password"] = self.db.decrypt(wim_content["wim_password"], - schema_version=schema_version, - salt=wim_id) + wim_account_RO["wim_password"] = self.db.decrypt( + wim_content["wim_password"], + schema_version=schema_version, + salt=wim_id, + ) if "config" in wim_content: wim_account_RO["config"] = wim_content["config"] if wim_content.get("config"): for p in self.wim_config_encrypted: if wim_content["config"].get(p): - wim_account_RO["config"][p] = self.db.decrypt(wim_content["config"][p], - schema_version=schema_version, - salt=wim_id) + wim_account_RO["config"][p] = self.db.decrypt( + wim_content["config"][p], + schema_version=schema_version, + salt=wim_id, + ) if "wim_user" in wim_content: wim_content["wim_username"] = wim_content["wim_user"] @@ -565,8 +681,8 @@ class WimLcm(LcmBase): await RO.edit("wim_account", RO_wim_id, descriptor=wim_account_RO) db_wim_update["_admin.operationalState"] = "ENABLED" # Mark the WIM 'edit' HA task as successful - operation_state = 'COMPLETED' - operation_details = 'Done' + operation_state = "COMPLETED" + operation_details = "Done" self.logger.debug(logging_text + "Exit Ok RO_wim_id={}".format(RO_wim_id)) return @@ -575,23 +691,31 @@ class WimLcm(LcmBase): self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_wim: db_wim_update["_admin.operationalState"] = "ERROR" - db_wim_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_wim_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the WIM 'edit' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_wim_update: self.update_db_2("wim_accounts", wim_id, db_wim_update) # Register the WIM 'edit' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('wim', 'edit', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "wim", + "edit", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("wim_account", wim_id, order_id) @@ -601,8 +725,8 @@ class WimLcm(LcmBase): # HA tasks and backward compatibility: # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, and the HA check always returns True - op_id = wim_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('wim', 'delete', op_id): + op_id = wim_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("wim", "delete", op_id): return wim_id = wim_content["_id"] @@ -615,10 +739,14 @@ class WimLcm(LcmBase): step = "Getting wim from db" try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('wim', 'delete', op_id) + await self.lcm_tasks.waitfor_related_HA("wim", "delete", op_id) db_wim = self.db.get_one("wim_accounts", {"_id": wim_id}) - if db_wim.get("_admin") and db_wim["_admin"].get("deployed") and db_wim["_admin"]["deployed"].get("RO"): + if ( + db_wim.get("_admin") + and db_wim["_admin"].get("deployed") + and db_wim["_admin"]["deployed"].get("RO") + ): RO_wim_id = db_wim["_admin"]["deployed"]["RO"] RO = ROclient.ROClient(self.loop, **self.ro_config) step = "Detaching wim from RO tenant" @@ -626,7 +754,10 @@ class WimLcm(LcmBase): await RO.detach("wim_account", RO_wim_id) except ROclient.ROClientException as e: if e.http_code == 404: # not found - self.logger.debug(logging_text + "RO_wim_id={} already detached".format(RO_wim_id)) + self.logger.debug( + logging_text + + "RO_wim_id={} already detached".format(RO_wim_id) + ) else: raise @@ -635,7 +766,10 @@ class WimLcm(LcmBase): await RO.delete("wim", RO_wim_id) except ROclient.ROClientException as e: if e.http_code == 404: # not found - self.logger.debug(logging_text + "RO_wim_id={} already deleted".format(RO_wim_id)) + self.logger.debug( + logging_text + + "RO_wim_id={} already deleted".format(RO_wim_id) + ) else: raise else: @@ -650,19 +784,27 @@ class WimLcm(LcmBase): self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: self.lcm_tasks.remove("wim_account", wim_id, order_id) if exc and db_wim: db_wim_update["_admin.operationalState"] = "ERROR" - db_wim_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_wim_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the WIM 'delete' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) - self.lcm_tasks.unlock_HA('wim', 'delete', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "wim", + "delete", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) try: if db_wim and db_wim_update: self.update_db_2("wim_accounts", wim_id, db_wim_update) @@ -674,7 +816,6 @@ class WimLcm(LcmBase): class SdnLcm(LcmBase): - def __init__(self, msg, lcm_tasks, config, loop): """ Init, Connect to database, filesystem storage, and messaging @@ -682,7 +823,7 @@ class SdnLcm(LcmBase): :return: None """ - self.logger = logging.getLogger('lcm.sdn') + self.logger = logging.getLogger("lcm.sdn") self.loop = loop self.lcm_tasks = lcm_tasks self.ro_config = config["ro_config"] @@ -695,8 +836,8 @@ class SdnLcm(LcmBase): # If 'sdn_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, 'op_id' is None, and lock_HA() will do nothing. # Register 'create' task here for related future HA operations - op_id = sdn_content.pop('op_id', None) - self.lcm_tasks.lock_HA('sdn', 'create', op_id) + op_id = sdn_content.pop("op_id", None) + self.lcm_tasks.lock_HA("sdn", "create", op_id) sdn_id = sdn_content["_id"] logging_text = "Task sdn_create={} ".format(sdn_id) @@ -723,7 +864,9 @@ class SdnLcm(LcmBase): sdn_RO.pop("schema_type", None) sdn_RO.pop("description", None) if sdn_RO.get("password"): - sdn_RO["password"] = self.db.decrypt(sdn_RO["password"], schema_version=schema_version, salt=sdn_id) + sdn_RO["password"] = self.db.decrypt( + sdn_RO["password"], schema_version=schema_version, salt=sdn_id + ) desc = await RO.create("sdn", descriptor=sdn_RO) RO_sdn_id = desc["uuid"] @@ -731,31 +874,39 @@ class SdnLcm(LcmBase): db_sdn_update["_admin.operationalState"] = "ENABLED" self.logger.debug(logging_text + "Exit Ok RO_sdn_id={}".format(RO_sdn_id)) # Mark the SDN 'create' HA task as successful - operation_state = 'COMPLETED' - operation_details = 'Done' + operation_state = "COMPLETED" + operation_details = "Done" return except (ROclient.ROClientException, DbException, asyncio.CancelledError) as e: self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_sdn: db_sdn_update["_admin.operationalState"] = "ERROR" - db_sdn_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_sdn_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the SDN 'create' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_sdn and db_sdn_update: self.update_db_2("sdns", sdn_id, db_sdn_update) # Register the SDN 'create' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('sdn', 'create', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "sdn", + "create", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("sdn", sdn_id, order_id) @@ -765,8 +916,8 @@ class SdnLcm(LcmBase): # HA tasks and backward compatibility: # If 'sdn_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, and the HA check always returns True - op_id = sdn_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('sdn', 'edit', op_id): + op_id = sdn_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("sdn", "edit", op_id): return sdn_id = sdn_content["_id"] @@ -779,11 +930,15 @@ class SdnLcm(LcmBase): step = "Getting sdn from db" try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('sdn', 'edit', op_id) + await self.lcm_tasks.waitfor_related_HA("sdn", "edit", op_id) db_sdn = self.db.get_one("sdns", {"_id": sdn_id}) RO_sdn_id = None - if db_sdn.get("_admin") and db_sdn["_admin"].get("deployed") and db_sdn["_admin"]["deployed"].get("RO"): + if ( + db_sdn.get("_admin") + and db_sdn["_admin"].get("deployed") + and db_sdn["_admin"]["deployed"].get("RO") + ): RO_sdn_id = db_sdn["_admin"]["deployed"]["RO"] RO = ROclient.ROClient(self.loop, **self.ro_config) step = "Editing sdn at RO" @@ -794,13 +949,15 @@ class SdnLcm(LcmBase): sdn_RO.pop("schema_type", None) sdn_RO.pop("description", None) if sdn_RO.get("password"): - sdn_RO["password"] = self.db.decrypt(sdn_RO["password"], schema_version=schema_version, salt=sdn_id) + sdn_RO["password"] = self.db.decrypt( + sdn_RO["password"], schema_version=schema_version, salt=sdn_id + ) if sdn_RO: await RO.edit("sdn", RO_sdn_id, descriptor=sdn_RO) db_sdn_update["_admin.operationalState"] = "ENABLED" # Mark the SDN 'edit' HA task as successful - operation_state = 'COMPLETED' - operation_details = 'Done' + operation_state = "COMPLETED" + operation_details = "Done" self.logger.debug(logging_text + "Exit Ok RO_sdn_id={}".format(RO_sdn_id)) return @@ -809,23 +966,29 @@ class SdnLcm(LcmBase): self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_sdn: db_sdn["_admin.operationalState"] = "ERROR" db_sdn["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) # Mark the SDN 'edit' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_sdn_update: self.update_db_2("sdns", sdn_id, db_sdn_update) # Register the SDN 'edit' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('sdn', 'edit', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "sdn", + "edit", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("sdn", sdn_id, order_id) @@ -835,8 +998,8 @@ class SdnLcm(LcmBase): # HA tasks and backward compatibility: # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, and the HA check always returns True - op_id = sdn_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('sdn', 'delete', op_id): + op_id = sdn_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("sdn", "delete", op_id): return sdn_id = sdn_content["_id"] @@ -849,10 +1012,14 @@ class SdnLcm(LcmBase): step = "Getting sdn from db" try: # wait for any previous tasks in process - await self.lcm_tasks.waitfor_related_HA('sdn', 'delete', op_id) + await self.lcm_tasks.waitfor_related_HA("sdn", "delete", op_id) db_sdn = self.db.get_one("sdns", {"_id": sdn_id}) - if db_sdn.get("_admin") and db_sdn["_admin"].get("deployed") and db_sdn["_admin"]["deployed"].get("RO"): + if ( + db_sdn.get("_admin") + and db_sdn["_admin"].get("deployed") + and db_sdn["_admin"]["deployed"].get("RO") + ): RO_sdn_id = db_sdn["_admin"]["deployed"]["RO"] RO = ROclient.ROClient(self.loop, **self.ro_config) step = "Deleting sdn from RO" @@ -860,12 +1027,17 @@ class SdnLcm(LcmBase): await RO.delete("sdn", RO_sdn_id) except ROclient.ROClientException as e: if e.http_code == 404: # not found - self.logger.debug(logging_text + "RO_sdn_id={} already deleted".format(RO_sdn_id)) + self.logger.debug( + logging_text + + "RO_sdn_id={} already deleted".format(RO_sdn_id) + ) else: raise else: # nothing to delete - self.logger.error(logging_text + "Skipping. There is not RO information at database") + self.logger.error( + logging_text + "Skipping. There is not RO information at database" + ) self.db.del_one("sdns", {"_id": sdn_id}) db_sdn = None self.logger.debug("sdn_delete task sdn_id={} Exit Ok".format(sdn_id)) @@ -875,18 +1047,24 @@ class SdnLcm(LcmBase): self.logger.error(logging_text + "Exit Exception {}".format(e)) exc = e except Exception as e: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_sdn: db_sdn["_admin.operationalState"] = "ERROR" db_sdn["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) # Mark the SDN 'delete' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) - self.lcm_tasks.unlock_HA('sdn', 'delete', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "sdn", + "delete", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) try: if db_sdn and db_sdn_update: self.update_db_2("sdns", sdn_id, db_sdn_update) @@ -907,7 +1085,7 @@ class K8sClusterLcm(LcmBase): :return: None """ - self.logger = logging.getLogger('lcm.k8scluster') + self.logger = logging.getLogger("lcm.k8scluster") self.loop = loop self.lcm_tasks = lcm_tasks self.vca_config = config["VCA"] @@ -920,7 +1098,7 @@ class K8sClusterLcm(LcmBase): log=self.logger, on_update_db=None, db=self.db, - fs=self.fs + fs=self.fs, ) self.helm3_k8scluster = K8sHelm3Connector( @@ -929,7 +1107,7 @@ class K8sClusterLcm(LcmBase): fs=self.fs, log=self.logger, db=self.db, - on_update_db=None + on_update_db=None, ) self.juju_k8scluster = K8sJujuConnector( @@ -939,7 +1117,7 @@ class K8sClusterLcm(LcmBase): loop=self.loop, on_update_db=None, db=self.db, - fs=self.fs + fs=self.fs, ) self.k8s_map = { @@ -950,8 +1128,8 @@ class K8sClusterLcm(LcmBase): async def create(self, k8scluster_content, order_id): - op_id = k8scluster_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('k8scluster', 'create', op_id): + op_id = k8scluster_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("k8scluster", "create", op_id): return k8scluster_id = k8scluster_content["_id"] @@ -965,8 +1143,13 @@ class K8sClusterLcm(LcmBase): step = "Getting k8scluster-id='{}' from db".format(k8scluster_id) self.logger.debug(logging_text + step) db_k8scluster = self.db.get_one("k8sclusters", {"_id": k8scluster_id}) - self.db.encrypt_decrypt_fields(db_k8scluster.get("credentials"), 'decrypt', ['password', 'secret'], - schema_version=db_k8scluster["schema_version"], salt=db_k8scluster["_id"]) + self.db.encrypt_decrypt_fields( + db_k8scluster.get("credentials"), + "decrypt", + ["password", "secret"], + schema_version=db_k8scluster["schema_version"], + salt=db_k8scluster["_id"], + ) k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials")) pending_tasks = [] task2name = {} @@ -991,10 +1174,13 @@ class K8sClusterLcm(LcmBase): now = time() while pending_tasks: - _timeout = max(1, self.timeout_create - (time() - now)) # ensure not negative with max + _timeout = max( + 1, self.timeout_create - (time() - now) + ) # ensure not negative with max step = "Waiting for k8scluster init tasks" - done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout, - return_when=asyncio.FIRST_COMPLETED) + done, pending_tasks = await asyncio.wait( + pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED + ) if not done: # timeout. Set timeout is reached and process pending as if they hase been finished done = pending_tasks @@ -1010,28 +1196,48 @@ class K8sClusterLcm(LcmBase): exc = task.exception() if exc: - error_text_list.append("Failing init {}: {}".format(task_name, exc)) - db_k8scluster_update["_admin.{}.error_msg".format(task_name)] = str(exc) + error_text_list.append( + "Failing init {}: {}".format(task_name, exc) + ) + db_k8scluster_update[ + "_admin.{}.error_msg".format(task_name) + ] = str(exc) db_k8scluster_update["_admin.{}.id".format(task_name)] = None - db_k8scluster_update["_admin.{}.operationalState".format(task_name)] = "ERROR" - self.logger.error(logging_text + "{} init fail: {}".format(task_name, exc), - exc_info=not isinstance(exc, (N2VCException, str))) + db_k8scluster_update[ + "_admin.{}.operationalState".format(task_name) + ] = "ERROR" + self.logger.error( + logging_text + "{} init fail: {}".format(task_name, exc), + exc_info=not isinstance(exc, (N2VCException, str)), + ) else: k8s_id, uninstall_sw = task.result() tasks_name_ok.append(task_name) - self.logger.debug(logging_text + "{} init success. id={} created={}".format( - task_name, k8s_id, uninstall_sw)) - db_k8scluster_update["_admin.{}.error_msg".format(task_name)] = None + self.logger.debug( + logging_text + + "{} init success. id={} created={}".format( + task_name, k8s_id, uninstall_sw + ) + ) + db_k8scluster_update[ + "_admin.{}.error_msg".format(task_name) + ] = None db_k8scluster_update["_admin.{}.id".format(task_name)] = k8s_id - db_k8scluster_update["_admin.{}.created".format(task_name)] = uninstall_sw - db_k8scluster_update["_admin.{}.operationalState".format(task_name)] = "ENABLED" + db_k8scluster_update[ + "_admin.{}.created".format(task_name) + ] = uninstall_sw + db_k8scluster_update[ + "_admin.{}.operationalState".format(task_name) + ] = "ENABLED" # update database step = "Updating database for " + task_name self.update_db_2("k8sclusters", k8scluster_id, db_k8scluster_update) if tasks_name_ok: operation_details = "ready for " + ", ".join(tasks_name_ok) operation_state = "COMPLETED" - db_k8scluster_update["_admin.operationalState"] = "ENABLED" if not error_text_list else "DEGRADED" + db_k8scluster_update["_admin.operationalState"] = ( + "ENABLED" if not error_text_list else "DEGRADED" + ) operation_details += "; " + ";".join(error_text_list) else: db_k8scluster_update["_admin.operationalState"] = "ERROR" @@ -1042,25 +1248,42 @@ class K8sClusterLcm(LcmBase): exc = None except Exception as e: - if isinstance(e, (LcmException, DbException, K8sException, N2VCException, asyncio.CancelledError)): + if isinstance( + e, + ( + LcmException, + DbException, + K8sException, + N2VCException, + asyncio.CancelledError, + ), + ): self.logger.error(logging_text + "Exit Exception {}".format(e)) else: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_k8scluster: db_k8scluster_update["_admin.operationalState"] = "ERROR" - db_k8scluster_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) - operation_state = 'FAILED' + db_k8scluster_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_k8scluster and db_k8scluster_update: self.update_db_2("k8sclusters", k8scluster_id, db_k8scluster_update) # Register the operation and unlock - self.lcm_tasks.unlock_HA('k8scluster', 'create', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "k8scluster", + "create", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("k8scluster", k8scluster_id, order_id) @@ -1071,8 +1294,8 @@ class K8sClusterLcm(LcmBase): # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, 'op_id' is None, and lock_HA() will do nothing. # Register 'delete' task here for related future HA operations - op_id = k8scluster_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('k8scluster', 'delete', op_id): + op_id = k8scluster_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("k8scluster", "delete", op_id): return k8scluster_id = k8scluster_content["_id"] @@ -1093,7 +1316,10 @@ class K8sClusterLcm(LcmBase): cluster_removed = True if k8s_jb_id: # delete in reverse order of creation step = "Removing juju-bundle '{}'".format(k8s_jb_id) - uninstall_sw = deep_get(db_k8scluster, ("_admin", "juju-bundle", "created")) or False + uninstall_sw = ( + deep_get(db_k8scluster, ("_admin", "juju-bundle", "created")) + or False + ) cluster_removed = await self.juju_k8scluster.reset( cluster_uuid=k8s_jb_id, uninstall_sw=uninstall_sw, @@ -1104,28 +1330,46 @@ class K8sClusterLcm(LcmBase): if k8s_hc_id: step = "Removing helm-chart '{}'".format(k8s_hc_id) - uninstall_sw = deep_get(db_k8scluster, ("_admin", "helm-chart", "created")) or False - cluster_removed = await self.helm2_k8scluster.reset(cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw) + uninstall_sw = ( + deep_get(db_k8scluster, ("_admin", "helm-chart", "created")) + or False + ) + cluster_removed = await self.helm2_k8scluster.reset( + cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw + ) db_k8scluster_update["_admin.helm-chart.id"] = None db_k8scluster_update["_admin.helm-chart.operationalState"] = "DISABLED" if k8s_h3c_id: step = "Removing helm-chart-v3 '{}'".format(k8s_hc_id) - uninstall_sw = deep_get(db_k8scluster, ("_admin", "helm-chart-v3", "created")) or False - cluster_removed = await self.helm3_k8scluster.reset(cluster_uuid=k8s_h3c_id, uninstall_sw=uninstall_sw) + uninstall_sw = ( + deep_get(db_k8scluster, ("_admin", "helm-chart-v3", "created")) + or False + ) + cluster_removed = await self.helm3_k8scluster.reset( + cluster_uuid=k8s_h3c_id, uninstall_sw=uninstall_sw + ) db_k8scluster_update["_admin.helm-chart-v3.id"] = None - db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "DISABLED" + db_k8scluster_update[ + "_admin.helm-chart-v3.operationalState" + ] = "DISABLED" # Try to remove from cluster_inserted to clean old versions if k8s_hc_id and cluster_removed: step = "Removing k8scluster='{}' from k8srepos".format(k8scluster_id) self.logger.debug(logging_text + step) - db_k8srepo_list = self.db.get_list("k8srepos", {"_admin.cluster-inserted": k8s_hc_id}) + db_k8srepo_list = self.db.get_list( + "k8srepos", {"_admin.cluster-inserted": k8s_hc_id} + ) for k8srepo in db_k8srepo_list: try: cluster_list = k8srepo["_admin"]["cluster-inserted"] cluster_list.remove(k8s_hc_id) - self.update_db_2("k8srepos", k8srepo["_id"], {"_admin.cluster-inserted": cluster_list}) + self.update_db_2( + "k8srepos", + k8srepo["_id"], + {"_admin.cluster-inserted": cluster_list}, + ) except Exception as e: self.logger.error("{}: {}".format(step, e)) self.db.del_one("k8sclusters", {"_id": k8scluster_id}) @@ -1133,20 +1377,33 @@ class K8sClusterLcm(LcmBase): self.logger.debug(logging_text + "Done") except Exception as e: - if isinstance(e, (LcmException, DbException, K8sException, N2VCException, asyncio.CancelledError)): + if isinstance( + e, + ( + LcmException, + DbException, + K8sException, + N2VCException, + asyncio.CancelledError, + ), + ): self.logger.error(logging_text + "Exit Exception {}".format(e)) else: - self.logger.critical(logging_text + "Exit Exception {}".format(e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception {}".format(e), exc_info=True + ) exc = e finally: if exc and db_k8scluster: db_k8scluster_update["_admin.operationalState"] = "ERROR" - db_k8scluster_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_k8scluster_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the WIM 'create' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) else: - operation_state = 'COMPLETED' + operation_state = "COMPLETED" operation_details = "deleted" try: @@ -1154,9 +1411,13 @@ class K8sClusterLcm(LcmBase): self.update_db_2("k8sclusters", k8scluster_id, db_k8scluster_update) # Register the K8scluster 'delete' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('k8scluster', 'delete', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "k8scluster", + "delete", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("k8scluster", k8scluster_id, order_id) @@ -1180,10 +1441,7 @@ class VcaLcm(LcmBase): # create N2VC connector self.n2vc = N2VCJujuConnector( - log=self.logger, - loop=self.loop, - fs=self.fs, - db=self.db + log=self.logger, loop=self.loop, fs=self.fs, db=self.db ) def _get_vca_by_id(self, vca_id: str) -> dict: @@ -1192,7 +1450,8 @@ class VcaLcm(LcmBase): db_vca, "decrypt", ["secret", "cacert"], - schema_version=db_vca["schema_version"], salt=db_vca["_id"] + schema_version=db_vca["schema_version"], + salt=db_vca["_id"], ) return db_vca @@ -1208,7 +1467,9 @@ class VcaLcm(LcmBase): db_vca_update = {} try: - self.logger.debug("Task vca_create={} {}".format(vca_id, "Getting vca from db")) + self.logger.debug( + "Task vca_create={} {}".format(vca_id, "Getting vca from db") + ) db_vca = self._get_vca_by_id(vca_id) task = asyncio.ensure_future( @@ -1221,13 +1482,21 @@ class VcaLcm(LcmBase): await asyncio.wait([task], return_when=asyncio.FIRST_COMPLETED) if task.exception(): raise task.exception() - self.logger.debug("Task vca_create={} {}".format(vca_id, "vca registered and validated successfully")) + self.logger.debug( + "Task vca_create={} {}".format( + vca_id, "vca registered and validated successfully" + ) + ) db_vca_update["_admin.operationalState"] = "ENABLED" db_vca_update["_admin.detailed-status"] = "Connectivity: ok" operation_details = "VCA validated" operation_state = "COMPLETED" - self.logger.debug("Task vca_create={} {}".format(vca_id, "Done. Result: {}".format(operation_state))) + self.logger.debug( + "Task vca_create={} {}".format( + vca_id, "Done. Result: {}".format(operation_state) + ) + ) except Exception as e: error_msg = "Failed with exception: {}".format(e) @@ -1246,10 +1515,14 @@ class VcaLcm(LcmBase): "create", op_id, operationState=operation_state, - detailed_status=operation_details + detailed_status=operation_details, ) except DbException as e: - self.logger.error("Task vca_create={} {}".format(vca_id, "Cannot update database: {}".format(e))) + self.logger.error( + "Task vca_create={} {}".format( + vca_id, "Cannot update database: {}".format(e) + ) + ) self.lcm_tasks.remove("vca", vca_id, order_id) async def delete(self, vca_content, order_id): @@ -1266,13 +1539,19 @@ class VcaLcm(LcmBase): vca_id = vca_content["_id"] try: - self.logger.debug("Task vca_delete={} {}".format(vca_id, "Deleting vca from db")) + self.logger.debug( + "Task vca_delete={} {}".format(vca_id, "Deleting vca from db") + ) self.db.del_one("vca", {"_id": vca_id}) db_vca_update = None operation_details = "deleted" operation_state = "COMPLETED" - self.logger.debug("Task vca_delete={} {}".format(vca_id, "Done. Result: {}".format(operation_state))) + self.logger.debug( + "Task vca_delete={} {}".format( + vca_id, "Done. Result: {}".format(operation_state) + ) + ) except Exception as e: error_msg = "Failed with exception: {}".format(e) self.logger.error("Task vca_delete={} {}".format(vca_id, error_msg)) @@ -1291,12 +1570,15 @@ class VcaLcm(LcmBase): detailed_status=operation_details, ) except DbException as e: - self.logger.error("Task vca_delete={} {}".format(vca_id, "Cannot update database: {}".format(e))) + self.logger.error( + "Task vca_delete={} {}".format( + vca_id, "Cannot update database: {}".format(e) + ) + ) self.lcm_tasks.remove("vca", vca_id, order_id) class K8sRepoLcm(LcmBase): - def __init__(self, msg, lcm_tasks, config, loop): """ Init, Connect to database, filesystem storage, and messaging @@ -1304,7 +1586,7 @@ class K8sRepoLcm(LcmBase): :return: None """ - self.logger = logging.getLogger('lcm.k8srepo') + self.logger = logging.getLogger("lcm.k8srepo") self.loop = loop self.lcm_tasks = lcm_tasks self.vca_config = config["VCA"] @@ -1317,7 +1599,7 @@ class K8sRepoLcm(LcmBase): fs=self.fs, log=self.logger, db=self.db, - on_update_db=None + on_update_db=None, ) async def create(self, k8srepo_content, order_id): @@ -1327,8 +1609,8 @@ class K8sRepoLcm(LcmBase): # In such a case, HA is not supported by NBI, 'op_id' is None, and lock_HA() will do nothing. # Register 'create' task here for related future HA operations - op_id = k8srepo_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('k8srepo', 'create', op_id): + op_id = k8srepo_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("k8srepo", "create", op_id): return k8srepo_id = k8srepo_content.get("_id") @@ -1338,33 +1620,49 @@ class K8sRepoLcm(LcmBase): db_k8srepo = None db_k8srepo_update = {} exc = None - operation_state = 'COMPLETED' - operation_details = '' + operation_state = "COMPLETED" + operation_details = "" try: step = "Getting k8srepo-id='{}' from db".format(k8srepo_id) self.logger.debug(logging_text + step) db_k8srepo = self.db.get_one("k8srepos", {"_id": k8srepo_id}) db_k8srepo_update["_admin.operationalState"] = "ENABLED" except Exception as e: - self.logger.error(logging_text + "Exit Exception {}".format(e), - exc_info=not isinstance(e, (LcmException, DbException, K8sException, N2VCException, - asyncio.CancelledError))) + self.logger.error( + logging_text + "Exit Exception {}".format(e), + exc_info=not isinstance( + e, + ( + LcmException, + DbException, + K8sException, + N2VCException, + asyncio.CancelledError, + ), + ), + ) exc = e finally: if exc and db_k8srepo: db_k8srepo_update["_admin.operationalState"] = "ERROR" - db_k8srepo_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_k8srepo_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the WIM 'create' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_k8srepo_update: self.update_db_2("k8srepos", k8srepo_id, db_k8srepo_update) # Register the K8srepo 'create' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('k8srepo', 'create', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "k8srepo", + "create", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) self.lcm_tasks.remove("k8srepo", k8srepo_id, order_id) @@ -1375,8 +1673,8 @@ class K8sRepoLcm(LcmBase): # If 'vim_content' does not include 'op_id', we a running a legacy NBI version. # In such a case, HA is not supported by NBI, 'op_id' is None, and lock_HA() will do nothing. # Register 'delete' task here for related future HA operations - op_id = k8srepo_content.pop('op_id', None) - if not self.lcm_tasks.lock_HA('k8srepo', 'delete', op_id): + op_id = k8srepo_content.pop("op_id", None) + if not self.lcm_tasks.lock_HA("k8srepo", "delete", op_id): return k8srepo_id = k8srepo_content.get("_id") @@ -1387,33 +1685,49 @@ class K8sRepoLcm(LcmBase): db_k8srepo_update = {} exc = None - operation_state = 'COMPLETED' - operation_details = '' + operation_state = "COMPLETED" + operation_details = "" try: step = "Getting k8srepo-id='{}' from db".format(k8srepo_id) self.logger.debug(logging_text + step) db_k8srepo = self.db.get_one("k8srepos", {"_id": k8srepo_id}) except Exception as e: - self.logger.error(logging_text + "Exit Exception {}".format(e), - exc_info=not isinstance(e, (LcmException, DbException, K8sException, N2VCException, - asyncio.CancelledError))) + self.logger.error( + logging_text + "Exit Exception {}".format(e), + exc_info=not isinstance( + e, + ( + LcmException, + DbException, + K8sException, + N2VCException, + asyncio.CancelledError, + ), + ), + ) exc = e finally: if exc and db_k8srepo: db_k8srepo_update["_admin.operationalState"] = "ERROR" - db_k8srepo_update["_admin.detailed-status"] = "ERROR {}: {}".format(step, exc) + db_k8srepo_update["_admin.detailed-status"] = "ERROR {}: {}".format( + step, exc + ) # Mark the WIM 'create' HA task as erroneous - operation_state = 'FAILED' + operation_state = "FAILED" operation_details = "ERROR {}: {}".format(step, exc) try: if db_k8srepo_update: self.update_db_2("k8srepos", k8srepo_id, db_k8srepo_update) # Register the K8srepo 'delete' HA task either # succesful or erroneous, or do nothing (if legacy NBI) - self.lcm_tasks.unlock_HA('k8srepo', 'delete', op_id, - operationState=operation_state, - detailed_status=operation_details) + self.lcm_tasks.unlock_HA( + "k8srepo", + "delete", + op_id, + operationState=operation_state, + detailed_status=operation_details, + ) self.db.del_one("k8srepos", {"_id": k8srepo_id}) except DbException as e: self.logger.error(logging_text + "Cannot update database: {}".format(e)) diff --git a/setup.py b/setup.py index c1ae182..664b32d 100644 --- a/setup.py +++ b/setup.py @@ -23,24 +23,26 @@ _name = "osm_lcm" # version is at first line of osm_lcm/html_public/version here = os.path.abspath(os.path.dirname(__file__)) # VERSION = "4.0.1rc1" -with open(os.path.join(here, 'README.rst')) as readme_file: +with open(os.path.join(here, "README.rst")) as readme_file: README = readme_file.read() setup( name=_name, - description='OSM Life Cycle Management module', + description="OSM Life Cycle Management module", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='osmsupport@etsi.org', - maintainer='ETSI OSM', - maintainer_email='osmsupport@etsi.org', - url='https://osm.etsi.org/gitweb/?p=osm/LCM.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="osmsupport@etsi.org", + maintainer="ETSI OSM", + maintainer_email="osmsupport@etsi.org", + url="https://osm.etsi.org/gitweb/?p=osm/LCM.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], ) diff --git a/tox.ini b/tox.ini index 630fbca..e9816cd 100644 --- a/tox.ini +++ b/tox.ini @@ -34,6 +34,7 @@ deps = black skip_install = true commands = - black --check --diff osm_lcm/ + - black --check --diff setup.py ####################################################################################### -- GitLab From 5f75f10f2308aa6a8cf36cfdeeb20dc95316c5ce Mon Sep 17 00:00:00 2001 From: aktas Date: Mon, 15 Mar 2021 11:26:10 +0300 Subject: [PATCH 30/35] Feature 10509 manual scaling for native k8s charm Also includes improvements for scale function Change-Id: I23f51b8c1b219681841d0b1f7f4db3a0d9ed4c7b Signed-off-by: aktas --- osm_lcm/data_utils/nsr.py | 14 + osm_lcm/data_utils/vnfd.py | 7 + osm_lcm/data_utils/vnfr.py | 8 + osm_lcm/ns.py | 624 ++++++++++++++++++++------- osm_lcm/tests/test_db_descriptors.py | 257 +++++++++++ osm_lcm/tests/test_ns.py | 36 +- 6 files changed, 786 insertions(+), 160 deletions(-) diff --git a/osm_lcm/data_utils/nsr.py b/osm_lcm/data_utils/nsr.py index f62b0b4..006713c 100644 --- a/osm_lcm/data_utils/nsr.py +++ b/osm_lcm/data_utils/nsr.py @@ -22,6 +22,20 @@ # contact: fbravo@whitestack.com ## +from osm_lcm.lcm_utils import get_iterable + def get_vlds(nsr): return nsr.get("vld", ()) + + +def get_deployed_kdu(nsr_deployed, kdu_name, member_vnf_index): + deployed_kdu = None + index = None + for index, deployed_kdu in enumerate(get_iterable(nsr_deployed, "K8s")): + if ( + kdu_name == deployed_kdu["kdu-name"] + and deployed_kdu["member-vnf-index"] == member_vnf_index + ): + break + return deployed_kdu, index diff --git a/osm_lcm/data_utils/vnfd.py b/osm_lcm/data_utils/vnfd.py index 5351c41..17a98a9 100644 --- a/osm_lcm/data_utils/vnfd.py +++ b/osm_lcm/data_utils/vnfd.py @@ -101,6 +101,13 @@ def get_vdu_profile(vnfd, vdu_profile_id): ) +def get_kdu_profile(vnfd, kdu_profile_id): + return list_utils.find_in_list( + vnfd.get("df", ())[0]["kdu-resource-profile"], + lambda kdu_profile: kdu_profile["id"] == kdu_profile_id, + ) + + def get_configuration(vnfd, entity_id): lcm_ops_config = vnfd.get("df")[0].get("lcm-operations-configuration") if not lcm_ops_config: diff --git a/osm_lcm/data_utils/vnfr.py b/osm_lcm/data_utils/vnfr.py index 7e4d164..fe98102 100644 --- a/osm_lcm/data_utils/vnfr.py +++ b/osm_lcm/data_utils/vnfr.py @@ -69,3 +69,11 @@ def get_vdur_index(db_vnfr, vdu_delta): return len([x for x in vdur_list if x.get("vdu-id-ref") == vdu_delta["id"]]) else: return 0 + + +def get_kdur(db_vnfr, kdu_name): + kdur_list = get_iterable(db_vnfr, "kdur") + if kdur_list: + return next(x for x in kdur_list if x.get("kdu-name") == kdu_name) + else: + return None diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 8b2be1e..ddd827e 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -31,6 +31,7 @@ from jinja2 import ( ) from osm_lcm import ROclient +from osm_lcm.data_utils.nsr import get_deployed_kdu from osm_lcm.ng_ro import NgRoClient, NgRoException from osm_lcm.lcm_utils import ( LcmException, @@ -54,9 +55,10 @@ from osm_lcm.data_utils.vnfd import ( get_scaling_aspect, get_number_of_instances, get_juju_ee_ref, + get_kdu_profile, ) from osm_lcm.data_utils.list_utils import find_in_list -from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index +from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur from osm_lcm.data_utils.dict_utils import parse_yaml_strings from osm_lcm.data_utils.database.vim_account import VimAccountDB from n2vc.k8s_helm_conn import K8sHelmConnector @@ -4947,14 +4949,6 @@ class NsLcm(LcmBase): self.update_db_2("nsrs", nsr_id, db_nsr_update) nsr_deployed = db_nsr["_admin"].get("deployed") - ####### - nsr_deployed = db_nsr["_admin"].get("deployed") - vnf_index = db_nslcmop["operationParams"].get("member_vnf_index") - # vdu_id = db_nslcmop["operationParams"].get("vdu_id") - # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index") - # vdu_name = db_nslcmop["operationParams"].get("vdu_name") - ####### - vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][ "scaleByStepData" ]["member-vnf-index"] @@ -5017,9 +5011,9 @@ class NsLcm(LcmBase): db_nsr_update[ "_admin.scaling-group.{}.name".format(admin_scale_index) ] = scaling_group - RO_scaling_info = [] - VCA_scaling_info = [] - vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []} + + vca_scaling_info = [] + scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []} if scaling_type == "SCALE_OUT": if "aspect-delta-details" not in scaling_descriptor: raise LcmException( @@ -5030,12 +5024,14 @@ class NsLcm(LcmBase): # count if max-instance-count is reached deltas = scaling_descriptor.get("aspect-delta-details")["deltas"] - vdu_scaling_info["scaling_direction"] = "OUT" - vdu_scaling_info["vdu-create"] = {} + scaling_info["scaling_direction"] = "OUT" + scaling_info["vdu-create"] = {} + scaling_info["kdu-create"] = {} for delta in deltas: - for vdu_delta in delta["vdu-delta"]: + for vdu_delta in delta.get("vdu-delta", {}): vdud = get_vdu(db_vnfd, vdu_delta["id"]) - vdu_index = get_vdur_index(db_vnfr, vdu_delta) + # vdu_index also provides the number of instance of the targeted vdu + vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta) cloud_init_text = self._get_vdu_cloud_init_content( vdud, db_vnfd ) @@ -5056,10 +5052,18 @@ class NsLcm(LcmBase): default_instance_num = get_number_of_instances( db_vnfd, vdud["id"] ) + instances_number = vdu_delta.get("number-of-instances", 1) + nb_scale_op += instances_number + + new_instance_count = nb_scale_op + default_instance_num + # Control if new count is over max and vdu count is less than max. + # Then assign new instance count + if new_instance_count > max_instance_count > vdu_count: + instances_number = new_instance_count - max_instance_count + else: + instances_number = instances_number - nb_scale_op += vdu_delta.get("number-of-instances", 1) - - if nb_scale_op + default_instance_num > max_instance_count: + if new_instance_count > max_instance_count: raise LcmException( "reached the limit of {} (max-instance-count) " "scaling-out operations for the " @@ -5081,7 +5085,7 @@ class NsLcm(LcmBase): vdud["id"], ) ) - VCA_scaling_info.append( + vca_scaling_info.append( { "osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, @@ -5089,33 +5093,102 @@ class NsLcm(LcmBase): "vdu_index": vdu_index + x, } ) - RO_scaling_info.append( + scaling_info["vdu-create"][vdu_delta["id"]] = instances_number + for kdu_delta in delta.get("kdu-resource-delta", {}): + kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"]) + kdu_name = kdu_profile["kdu-name"] + resource_name = kdu_profile["resource-name"] + + # Might have different kdus in the same delta + # Should have list for each kdu + if not scaling_info["kdu-create"].get(kdu_name, None): + scaling_info["kdu-create"][kdu_name] = [] + + kdur = get_kdur(db_vnfr, kdu_name) + if kdur.get("helm-chart"): + k8s_cluster_type = "helm-chart-v3" + self.logger.debug("kdur: {}".format(kdur)) + if ( + kdur.get("helm-version") + and kdur.get("helm-version") == "v2" + ): + k8s_cluster_type = "helm-chart" + raise NotImplementedError + elif kdur.get("juju-bundle"): + k8s_cluster_type = "juju-bundle" + else: + raise LcmException( + "kdu type for kdu='{}.{}' is neither helm-chart nor " + "juju-bundle. Maybe an old NBI version is running".format( + db_vnfr["member-vnf-index-ref"], kdu_name + ) + ) + + max_instance_count = 10 + if kdu_profile and "max-number-of-instances" in kdu_profile: + max_instance_count = kdu_profile.get( + "max-number-of-instances", 10 + ) + + nb_scale_op += kdu_delta.get("number-of-instances", 1) + deployed_kdu, _ = get_deployed_kdu( + nsr_deployed, kdu_name, vnf_index + ) + if deployed_kdu is None: + raise LcmException( + "KDU '{}' for vnf '{}' not deployed".format( + kdu_name, vnf_index + ) + ) + kdu_instance = deployed_kdu.get("kdu-instance") + instance_num = await self.k8scluster_map[ + k8s_cluster_type + ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id) + kdu_replica_count = instance_num + kdu_delta.get( + "number-of-instances", 1 + ) + + # Control if new count is over max and instance_num is less than max. + # Then assign max instance number to kdu replica count + if kdu_replica_count > max_instance_count > instance_num: + kdu_replica_count = max_instance_count + if kdu_replica_count > max_instance_count: + raise LcmException( + "reached the limit of {} (max-instance-count) " + "scaling-out operations for the " + "scaling-group-descriptor '{}'".format( + instance_num, scaling_group + ) + ) + + for x in range(kdu_delta.get("number-of-instances", 1)): + vca_scaling_info.append( + { + "osm_kdu_id": kdu_name, + "member-vnf-index": vnf_index, + "type": "create", + "kdu_index": instance_num + x - 1, + } + ) + scaling_info["kdu-create"][kdu_name].append( { - "osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, "type": "create", - "count": vdu_delta.get("number-of-instances", 1), + "k8s-cluster-type": k8s_cluster_type, + "resource-name": resource_name, + "scale": kdu_replica_count, } ) - if cloud_init_list: - RO_scaling_info[-1]["cloud_init"] = cloud_init_list - vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get( - "number-of-instances", 1 - ) - elif scaling_type == "SCALE_IN": - if ( - "min-instance-count" in scaling_descriptor - and scaling_descriptor["min-instance-count"] is not None - ): - min_instance_count = int(scaling_descriptor["min-instance-count"]) - - vdu_scaling_info["scaling_direction"] = "IN" - vdu_scaling_info["vdu-delete"] = {} deltas = scaling_descriptor.get("aspect-delta-details")["deltas"] + + scaling_info["scaling_direction"] = "IN" + scaling_info["vdu-delete"] = {} + scaling_info["kdu-delete"] = {} + for delta in deltas: - for vdu_delta in delta["vdu-delta"]: - vdu_index = get_vdur_index(db_vnfr, vdu_delta) + for vdu_delta in delta.get("vdu-delta", {}): + vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta) min_instance_count = 0 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"]) if vdu_profile and "min-number-of-instances" in vdu_profile: @@ -5124,26 +5197,25 @@ class NsLcm(LcmBase): default_instance_num = get_number_of_instances( db_vnfd, vdu_delta["id"] ) + instance_num = vdu_delta.get("number-of-instances", 1) + nb_scale_op -= instance_num - nb_scale_op -= vdu_delta.get("number-of-instances", 1) - if nb_scale_op + default_instance_num < min_instance_count: + new_instance_count = nb_scale_op + default_instance_num + + if new_instance_count < min_instance_count < vdu_count: + instances_number = min_instance_count - new_instance_count + else: + instances_number = instance_num + + if new_instance_count < min_instance_count: raise LcmException( "reached the limit of {} (min-instance-count) scaling-in operations for the " "scaling-group-descriptor '{}'".format( nb_scale_op, scaling_group ) ) - RO_scaling_info.append( - { - "osm_vdu_id": vdu_delta["id"], - "member-vnf-index": vnf_index, - "type": "delete", - "count": vdu_delta.get("number-of-instances", 1), - "vdu_index": vdu_index - 1, - } - ) for x in range(vdu_delta.get("number-of-instances", 1)): - VCA_scaling_info.append( + vca_scaling_info.append( { "osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, @@ -5151,17 +5223,93 @@ class NsLcm(LcmBase): "vdu_index": vdu_index - 1 - x, } ) - vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get( + scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number + for kdu_delta in delta.get("kdu-resource-delta", {}): + kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"]) + kdu_name = kdu_profile["kdu-name"] + resource_name = kdu_profile["resource-name"] + + if not scaling_info["kdu-delete"].get(kdu_name, None): + scaling_info["kdu-delete"][kdu_name] = [] + + kdur = get_kdur(db_vnfr, kdu_name) + if kdur.get("helm-chart"): + k8s_cluster_type = "helm-chart-v3" + self.logger.debug("kdur: {}".format(kdur)) + if ( + kdur.get("helm-version") + and kdur.get("helm-version") == "v2" + ): + k8s_cluster_type = "helm-chart" + raise NotImplementedError + elif kdur.get("juju-bundle"): + k8s_cluster_type = "juju-bundle" + else: + raise LcmException( + "kdu type for kdu='{}.{}' is neither helm-chart nor " + "juju-bundle. Maybe an old NBI version is running".format( + db_vnfr["member-vnf-index-ref"], kdur["kdu-name"] + ) + ) + + min_instance_count = 0 + if kdu_profile and "min-number-of-instances" in kdu_profile: + min_instance_count = kdu_profile["min-number-of-instances"] + + nb_scale_op -= kdu_delta.get("number-of-instances", 1) + deployed_kdu, _ = get_deployed_kdu( + nsr_deployed, kdu_name, vnf_index + ) + if deployed_kdu is None: + raise LcmException( + "KDU '{}' for vnf '{}' not deployed".format( + kdu_name, vnf_index + ) + ) + kdu_instance = deployed_kdu.get("kdu-instance") + instance_num = await self.k8scluster_map[ + k8s_cluster_type + ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id) + kdu_replica_count = instance_num - kdu_delta.get( "number-of-instances", 1 ) + if kdu_replica_count < min_instance_count < instance_num: + kdu_replica_count = min_instance_count + if kdu_replica_count < min_instance_count: + raise LcmException( + "reached the limit of {} (min-instance-count) scaling-in operations for the " + "scaling-group-descriptor '{}'".format( + instance_num, scaling_group + ) + ) + + for x in range(kdu_delta.get("number-of-instances", 1)): + vca_scaling_info.append( + { + "osm_kdu_id": kdu_name, + "member-vnf-index": vnf_index, + "type": "delete", + "kdu_index": instance_num - x - 1, + } + ) + scaling_info["kdu-delete"][kdu_name].append( + { + "member-vnf-index": vnf_index, + "type": "delete", + "k8s-cluster-type": k8s_cluster_type, + "resource-name": resource_name, + "scale": kdu_replica_count, + } + ) + # update VDU_SCALING_INFO with the VDUs to delete ip_addresses - vdu_delete = copy(vdu_scaling_info.get("vdu-delete")) - if vdu_scaling_info["scaling_direction"] == "IN": + vdu_delete = copy(scaling_info.get("vdu-delete")) + if scaling_info["scaling_direction"] == "IN": for vdur in reversed(db_vnfr["vdur"]): if vdu_delete.get(vdur["vdu-id-ref"]): vdu_delete[vdur["vdu-id-ref"]] -= 1 - vdu_scaling_info["vdu"].append( + scaling_info["vdu"].append( { "name": vdur.get("name") or vdur.get("vdu-name"), "vdu_id": vdur["vdu-id-ref"], @@ -5169,7 +5317,7 @@ class NsLcm(LcmBase): } ) for interface in vdur["interfaces"]: - vdu_scaling_info["vdu"][-1]["interface"].append( + scaling_info["vdu"][-1]["interface"].append( { "name": interface["name"], "ip_address": interface["ip-address"], @@ -5213,7 +5361,7 @@ class NsLcm(LcmBase): "primitive".format(scaling_group, vnf_config_primitive) ) - vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info} + vnfr_params = {"VDU_SCALE_INFO": scaling_info} if db_vnfr.get("additionalParamsForVnf"): vnfr_params.update(db_vnfr["additionalParamsForVnf"]) @@ -5315,24 +5463,33 @@ class NsLcm(LcmBase): ] = time() # SCALE-IN VCA - BEGIN - if VCA_scaling_info: + if vca_scaling_info: step = db_nslcmop_update[ "detailed-status" ] = "Deleting the execution environments" scale_process = "VCA" - for vdu_info in VCA_scaling_info: - if vdu_info["type"] == "delete": - member_vnf_index = str(vdu_info["member-vnf-index"]) + for vca_info in vca_scaling_info: + if vca_info["type"] == "delete": + member_vnf_index = str(vca_info["member-vnf-index"]) self.logger.debug( - logging_text + "vdu info: {}".format(vdu_info) - ) - vdu_id = vdu_info["osm_vdu_id"] - vdu_index = int(vdu_info["vdu_index"]) - stage[ - 1 - ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( - member_vnf_index, vdu_id, vdu_index + logging_text + "vdu info: {}".format(vca_info) ) + if vca_info.get("osm_vdu_id"): + vdu_id = vca_info["osm_vdu_id"] + vdu_index = int(vca_info["vdu_index"]) + stage[ + 1 + ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index + ) + else: + vdu_index = 0 + kdu_id = vca_info["osm_kdu_id"] + stage[ + 1 + ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format( + member_vnf_index, kdu_id, vdu_index + ) stage[2] = step = "Scaling in VCA" self._write_op_status(op_id=nslcmop_id, stage=stage) vca_update = db_nsr["_admin"]["deployed"]["VCA"] @@ -5414,117 +5571,165 @@ class NsLcm(LcmBase): # SCALE-IN VCA - END # SCALE RO - BEGIN - if RO_scaling_info: + if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"): scale_process = "RO" if self.ro_config.get("ng"): await self._scale_ng_ro( - logging_text, - db_nsr, - db_nslcmop, - db_vnfr, - vdu_scaling_info, - stage, + logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage ) - vdu_scaling_info.pop("vdu-create", None) - vdu_scaling_info.pop("vdu-delete", None) + scaling_info.pop("vdu-create", None) + scaling_info.pop("vdu-delete", None) scale_process = None + # SCALE RO - END + + # SCALE KDU - BEGIN + if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"): + scale_process = "KDU" + await self._scale_kdu( + logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info + ) + scaling_info.pop("kdu-create", None) + scaling_info.pop("kdu-delete", None) + + scale_process = None + # SCALE KDU - END + if db_nsr_update: self.update_db_2("nsrs", nsr_id, db_nsr_update) - # SCALE RO - END # SCALE-UP VCA - BEGIN - if VCA_scaling_info: + if vca_scaling_info: step = db_nslcmop_update[ "detailed-status" ] = "Creating new execution environments" scale_process = "VCA" - for vdu_info in VCA_scaling_info: - if vdu_info["type"] == "create": - member_vnf_index = str(vdu_info["member-vnf-index"]) + for vca_info in vca_scaling_info: + if vca_info["type"] == "create": + member_vnf_index = str(vca_info["member-vnf-index"]) self.logger.debug( - logging_text + "vdu info: {}".format(vdu_info) + logging_text + "vdu info: {}".format(vca_info) ) vnfd_id = db_vnfr["vnfd-ref"] - vdu_index = int(vdu_info["vdu_index"]) - deploy_params = {"OSM": get_osm_params(db_vnfr)} - if db_vnfr.get("additionalParamsForVnf"): - deploy_params.update( - parse_yaml_strings( - db_vnfr["additionalParamsForVnf"].copy() + if vca_info.get("osm_vdu_id"): + vdu_index = int(vca_info["vdu_index"]) + deploy_params = {"OSM": get_osm_params(db_vnfr)} + if db_vnfr.get("additionalParamsForVnf"): + deploy_params.update( + parse_yaml_strings( + db_vnfr["additionalParamsForVnf"].copy() + ) ) + descriptor_config = get_configuration( + db_vnfd, db_vnfd["id"] ) - descriptor_config = get_configuration(db_vnfd, db_vnfd["id"]) - if descriptor_config: - vdu_id = None - vdu_name = None - kdu_name = None - self._deploy_n2vc( - logging_text=logging_text - + "member_vnf_index={} ".format(member_vnf_index), - db_nsr=db_nsr, - db_vnfr=db_vnfr, - nslcmop_id=nslcmop_id, - nsr_id=nsr_id, - nsi_id=nsi_id, - vnfd_id=vnfd_id, - vdu_id=vdu_id, - kdu_name=kdu_name, - member_vnf_index=member_vnf_index, - vdu_index=vdu_index, - vdu_name=vdu_name, - deploy_params=deploy_params, - descriptor_config=descriptor_config, - base_folder=base_folder, - task_instantiation_info=tasks_dict_info, - stage=stage, - ) - vdu_id = vdu_info["osm_vdu_id"] - vdur = find_in_list( - db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id - ) - descriptor_config = get_configuration(db_vnfd, vdu_id) - if vdur.get("additionalParams"): - deploy_params_vdu = parse_yaml_strings( - vdur["additionalParams"] + if descriptor_config: + vdu_id = None + vdu_name = None + kdu_name = None + self._deploy_n2vc( + logging_text=logging_text + + "member_vnf_index={} ".format(member_vnf_index), + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage, + ) + vdu_id = vca_info["osm_vdu_id"] + vdur = find_in_list( + db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id ) - else: - deploy_params_vdu = deploy_params - deploy_params_vdu["OSM"] = get_osm_params( - db_vnfr, vdu_id, vdu_count_index=vdu_index - ) - if descriptor_config: - vdu_name = None - kdu_name = None - stage[ - 1 - ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( - member_vnf_index, vdu_id, vdu_index + descriptor_config = get_configuration(db_vnfd, vdu_id) + if vdur.get("additionalParams"): + deploy_params_vdu = parse_yaml_strings( + vdur["additionalParams"] + ) + else: + deploy_params_vdu = deploy_params + deploy_params_vdu["OSM"] = get_osm_params( + db_vnfr, vdu_id, vdu_count_index=vdu_index ) - stage[2] = step = "Scaling out VCA" - self._write_op_status(op_id=nslcmop_id, stage=stage) - self._deploy_n2vc( - logging_text=logging_text - + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + if descriptor_config: + vdu_name = None + kdu_name = None + stage[ + 1 + ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( member_vnf_index, vdu_id, vdu_index - ), - db_nsr=db_nsr, - db_vnfr=db_vnfr, - nslcmop_id=nslcmop_id, - nsr_id=nsr_id, - nsi_id=nsi_id, - vnfd_id=vnfd_id, - vdu_id=vdu_id, - kdu_name=kdu_name, - member_vnf_index=member_vnf_index, - vdu_index=vdu_index, - vdu_name=vdu_name, - deploy_params=deploy_params_vdu, - descriptor_config=descriptor_config, - base_folder=base_folder, - task_instantiation_info=tasks_dict_info, - stage=stage, - ) + ) + stage[2] = step = "Scaling out VCA" + self._write_op_status(op_id=nslcmop_id, stage=stage) + self._deploy_n2vc( + logging_text=logging_text + + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index + ), + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params_vdu, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage, + ) + else: + kdu_name = vca_info["osm_kdu_id"] + descriptor_config = get_configuration(db_vnfd, kdu_name) + if descriptor_config: + vdu_id = None + kdu_index = int(vca_info["kdu_index"]) + vdu_name = None + kdur = next( + x + for x in db_vnfr["kdur"] + if x["kdu-name"] == kdu_name + ) + deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} + if kdur.get("additionalParams"): + deploy_params_kdu = parse_yaml_strings( + kdur["additionalParams"] + ) + + self._deploy_n2vc( + logging_text=logging_text, + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=kdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params_kdu, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage, + ) # SCALE-UP VCA - END scale_process = None @@ -5551,7 +5756,7 @@ class NsLcm(LcmBase): vnf_config_primitive ) - vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info} + vnfr_params = {"VDU_SCALE_INFO": scaling_info} if db_vnfr.get("additionalParamsForVnf"): vnfr_params.update(db_vnfr["additionalParamsForVnf"]) @@ -5761,6 +5966,107 @@ class NsLcm(LcmBase): self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale") + async def _scale_kdu( + self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info + ): + _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete") + for kdu_name in _scaling_info: + for kdu_scaling_info in _scaling_info[kdu_name]: + deployed_kdu, index = get_deployed_kdu( + nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"] + ) + cluster_uuid = deployed_kdu["k8scluster-uuid"] + kdu_instance = deployed_kdu["kdu-instance"] + scale = int(kdu_scaling_info["scale"]) + k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"] + + db_dict = { + "collection": "nsrs", + "filter": {"_id": nsr_id}, + "path": "_admin.deployed.K8s.{}".format(index), + } + + step = "scaling application {}".format( + kdu_scaling_info["resource-name"] + ) + self.logger.debug(logging_text + step) + + if kdu_scaling_info["type"] == "delete": + kdu_config = get_configuration(db_vnfd, kdu_name) + if ( + kdu_config + and kdu_config.get("terminate-config-primitive") + and get_juju_ee_ref(db_vnfd, kdu_name) is None + ): + terminate_config_primitive_list = kdu_config.get( + "terminate-config-primitive" + ) + terminate_config_primitive_list.sort( + key=lambda val: int(val["seq"]) + ) + + for ( + terminate_config_primitive + ) in terminate_config_primitive_list: + primitive_params_ = self._map_primitive_params( + terminate_config_primitive, {}, {} + ) + step = "execute terminate config primitive" + self.logger.debug(logging_text + step) + await asyncio.wait_for( + self.k8scluster_map[k8s_cluster_type].exec_primitive( + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, + primitive_name=terminate_config_primitive["name"], + params=primitive_params_, + db_dict=db_dict, + vca_id=vca_id, + ), + timeout=600, + ) + + await asyncio.wait_for( + self.k8scluster_map[k8s_cluster_type].scale( + kdu_instance, + scale, + kdu_scaling_info["resource-name"], + vca_id=vca_id, + ), + timeout=self.timeout_vca_on_error, + ) + + if kdu_scaling_info["type"] == "create": + kdu_config = get_configuration(db_vnfd, kdu_name) + if ( + kdu_config + and kdu_config.get("initial-config-primitive") + and get_juju_ee_ref(db_vnfd, kdu_name) is None + ): + initial_config_primitive_list = kdu_config.get( + "initial-config-primitive" + ) + initial_config_primitive_list.sort( + key=lambda val: int(val["seq"]) + ) + + for initial_config_primitive in initial_config_primitive_list: + primitive_params_ = self._map_primitive_params( + initial_config_primitive, {}, {} + ) + step = "execute initial config primitive" + self.logger.debug(logging_text + step) + await asyncio.wait_for( + self.k8scluster_map[k8s_cluster_type].exec_primitive( + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, + primitive_name=initial_config_primitive["name"], + params=primitive_params_, + db_dict=db_dict, + vca_id=vca_id, + ), + timeout=600, + ) + async def _scale_ng_ro( self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage ): diff --git a/osm_lcm/tests/test_db_descriptors.py b/osm_lcm/tests/test_db_descriptors.py index a42449b..cc4f828 100644 --- a/osm_lcm/tests/test_db_descriptors.py +++ b/osm_lcm/tests/test_db_descriptors.py @@ -224,6 +224,66 @@ db_nslcmops_text = """ operationState: COMPLETED startTime: 1575034637.0445576 statusEnteredTime: 1575034663.8484545 + +- _admin: + created: 1575034637.044651 + modified: 1575034637.044651 + projects_read: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + projects_write: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + _id: 52770491-a765-40ce-97a1-c6e200bba7b3 + detailed-status: done + id: 52770491-a765-40ce-97a1-c6e200bba7b3 + isAutomaticInvocation: false + isCancelPending: false + lcmOperationType: instantiate + links: + nsInstance: /osm/nslcm/v1/ns_instances/c54b14cb-69a8-45bc-b011-d6bea187dc0a + self: /osm/nslcm/v1/ns_lcm_op_occs/52770491-a765-40ce-97a1-c6e200bba7b3 + nsInstanceId: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d + operationParams: + lcmOperationType: scale + nsInstanceId: c54b14cb-69a8-45bc-b011-d6bea187dc0a + scaleVnfData: + scaleByStepData: + member-vnf-index: native-kdu + scaling-group-descriptor: kdu_scaling_group + scaleVnfType: SCALE_OUT + scaleType: SCALE_VNF + operationState: COMPLETED + startTime: 1575034637.0445576 + statusEnteredTime: 1575034663.8484545 + +- _admin: + created: 1575034637.044651 + modified: 1575034637.044651 + projects_read: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + projects_write: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + _id: 4013bbd2-b151-40ee-bcef-7e24ce5432f6 + detailed-status: done + id: 4013bbd2-b151-40ee-bcef-7e24ce5432f6 + isAutomaticInvocation: false + isCancelPending: false + lcmOperationType: instantiate + links: + nsInstance: /osm/nslcm/v1/ns_instances/c54b14cb-69a8-45bc-b011-d6bea187dc0a + self: /osm/nslcm/v1/ns_lcm_op_occs/4013bbd2-b151-40ee-bcef-7e24ce5432f6 + nsInstanceId: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d + operationParams: + lcmOperationType: scale + nsInstanceId: c54b14cb-69a8-45bc-b011-d6bea187dc0a + scaleVnfData: + scaleByStepData: + member-vnf-index: native-kdu + scaling-group-descriptor: kdu_scaling_group_2 + scaleVnfType: SCALE_OUT + scaleType: SCALE_VNF + operationState: COMPLETED + startTime: 1575034637.0445576 + statusEnteredTime: 1575034663.8484545 """ db_nsrs_text = """ @@ -893,6 +953,73 @@ db_nsrs_text = """ vim-network-name: mgmt vnfd-id: - 7ab0d10d-8ce2-4c68-aef6-cc5a437a9c62 + +- _admin: + created: 1575034637.011233 + current-operation: null + deployed: + K8s: + - k8scluster-uuid: 73d96432-d692-40d2-8440-e0c73aee209c + kdu-instance: native-kdu-0 + kdu-model: native-kdu-0 + kdu-name: native-kdu + member-vnf-index: native-kdu + vnfr-id: 5ac34899-a23a-4b3c-918a-cd77acadbea6 + RO: + detailed-status: Deployed at VIM + nsd_id: b03a8de8-1898-4142-bc6d-3b0787df567d + nsr_id: b5ce3e00-8647-415d-afaa-d5a612cf3074 + nsr_status: ACTIVE + operational-status: running + vnfd: + - id: b9493dae-a4c9-4b96-8965-329581efb0a1 + member-vnf-index: native-kdu + VCA: [] + modified: 1575034637.011233 + nsState: INSTANTIATED + nslcmop: null + operation-type: null + projects_read: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + projects_write: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + _id: c54b14cb-69a8-45bc-b011-d6bea187dc0a + additionalParamsForNs: null + admin-status: ENABLED + config-status: configured + constituent-vnfr-ref: + - 5ac34899-a23a-4b3c-918a-cd77acadbea6 + create-time: 1575034636.9990137 + datacenter: ea958ba5-4e58-4405-bf42-6e3be15d4c3a + description: default description + detailed-status: done + id: c54b14cb-69a8-45bc-b011-d6bea187dc0a + instantiate_params: + nsDescription: default description + nsName: native-kdu + nsdId: d0f63683-9032-4c6f-8928-ffd4674b9f69 + vimAccountId: 74337dcb-ef54-41e7-bd2d-8c0d7fcd326f + name: native-kdu + name-ref: native-kdu + ns-instance-config-ref: c54b14cb-69a8-45bc-b011-d6bea187dc0a + nsd-id: d0f63683-9032-4c6f-8928-ffd4674b9f69 + nsd-name-ref: native-kdu_ns + nsd-ref: native-kdu_ns + operational-events: [] + operational-status: init + orchestration-progress: {} + resource-orchestrator: osmopenmano + short-name: native-kdu + ssh-authorized-key: null + vld: + - id: mgmtnet + name: null + status: ACTIVE + status-detailed: null + vim-id: 9b6a2ac4-767e-4ec9-9497-8ba63084c77f + vim-network-name: mgmt + vnfd-id: + - d96b1cdf-5ad6-49f7-bf65-907ada989293 """ ro_ns_text = """ @@ -1511,6 +1638,90 @@ db_vnfds_text = """ short-name: multikdu_knf vendor: Telefonica version: '1.0' + +- _admin: + created: 1575031727.5383403 + modified: 1575031727.5383403 + onboardingState: ONBOARDED + operationalState: ENABLED + projects_read: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + projects_write: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + storage: + descriptor: native-kdu_knf/native-kdu_vnfd.yaml + folder: d96b1cdf-5ad6-49f7-bf65-907ada989293 + fs: local + path: /app/storage/ + pkg-dir: native-kdu_knf + zipfile: native-kdu_knf.tar.gz + usageState: NOT_IN_USE + userDefinedData: {} + _id: d96b1cdf-5ad6-49f7-bf65-907ada989293 + connection-point: + - name: mgmt + description: KNF with two KDU using juju-bundle + df: + - id: native-kdu + kdu-resource-profile: + - id: scale-app + kdu-name: native-kdu + min-number-of-instances: 1 + resource-name: app + - id: scale-app2 + kdu-name: native-kdu + min-number-of-instances: 1 + max-number-of-instances: 10 + resource-name: app2 + scaling-aspect: + - id: kdu_scaling_group + name: kdu_scaling_group + max-scale-level: 10 + aspect-delta-details: + deltas: + - id: native-kdu-delta + kdu-resource-delta: + - id: scale-app + number-of-instances: 1 + - id: kdu_scaling_group_2 + name: kdu_scaling_group_2 + max-scale-level: 10 + aspect-delta-details: + deltas: + - id: native-kdu-delta + kdu-resource-delta: + - id: scale-app + number-of-instances: 1 + - id: scale-app2 + number-of-instances: 2 + lcm-operations-configuration: + operate-vnf-op-config: + day1-2: + - id: native-kdu + initial-config-primitive: + - name: changecontent + parameter: + - data-type: STRING + name: application-name + value: nginx + - data-type: STRING + name: customtitle + value: Initial Config Primitive + seq: '1' + id: native-kdu_knf + k8s-cluster: + nets: + - external-connection-point-ref: mgmt + id: mgmtnet + kdu: + - juju-bundle: stable/native-kdu + name: native-kdu + mgmt-interface: + cp: mgmt + name: native-kdu_knf + short-name: native-kdu_knf + vendor: Ulak Haberlesme A.S. + version: '1.0' """ db_vnfrs_text = """ @@ -1717,6 +1928,42 @@ db_vnfrs_text = """ vim-account-id: 74337dcb-ef54-41e7-bd2d-8c0d7fcd326f vnfd-id: 7ab0d10d-8ce2-4c68-aef6-cc5a437a9c62 vnfd-ref: multikdu_knf + +- _admin: + created: 1575034637.009597 + modified: 1575034637.009597 + nsState: NOT_INSTANTIATED + projects_read: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + projects_write: + - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4 + _id: 5ac34899-a23a-4b3c-918a-cd77acadbea6 + additionalParamsForVnf: null + connection-point: + - connection-point-id: null + id: null + name: mgmt + created-time: 1575034636.9990137 + id: 5ac34899-a23a-4b3c-918a-cd77acadbea6 + ip-address: null + k8s-cluster: + nets: + - external-connection-point-ref: mgmt + id: mgmtnet + ns-vld-id: mgmtnet + vim_net: internal + kdur: + - ip-address: null + juju-bundle: app-bundle + k8s-cluster: + id: e7169dab-f71a-4f1f-b82b-432605e8c4b3 + kdu-name: native-kdu + member-vnf-index-ref: native-kdu + nsr-id-ref: c54b14cb-69a8-45bc-b011-d6bea187dc0a + vdur: [] + vim-account-id: 74337dcb-ef54-41e7-bd2d-8c0d7fcd326f + vnfd-id: d96b1cdf-5ad6-49f7-bf65-907ada989293 + vnfd-ref: native-kdu_knf """ db_nslcmops_scale_text = """ @@ -1792,4 +2039,14 @@ test_ids = { "instantiate": "cf3aa178-7640-4174-b921-2330e6f2aad6", "terminate": None, }, + "TEST-NATIVE-KDU": { + "ns": "c54b14cb-69a8-45bc-b011-d6bea187dc0a", + "instantiate": "52770491-a765-40ce-97a1-c6e200bba7b3", + "terminate": None, + }, + "TEST-NATIVE-KDU-2": { + "ns": "c54b14cb-69a8-45bc-b011-d6bea187dc0a", + "instantiate": "4013bbd2-b151-40ee-bcef-7e24ce5432f6", + "terminate": None, + }, } diff --git a/osm_lcm/tests/test_ns.py b/osm_lcm/tests/test_ns.py index 192f8e0..62de111 100644 --- a/osm_lcm/tests/test_ns.py +++ b/osm_lcm/tests/test_ns.py @@ -394,6 +394,37 @@ class TestMyNS(asynctest.TestCase): self.assertEqual(return_value, expected_value) # print("scale_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status"))) + # Test scale() for native kdu + # this also includes testing _scale_kdu() + nsr_id = descriptors.test_ids["TEST-NATIVE-KDU"]["ns"] + nslcmop_id = descriptors.test_ids["TEST-NATIVE-KDU"]["instantiate"] + + self.my_ns.k8sclusterjuju.scale = asynctest.mock.CoroutineMock() + self.my_ns.k8sclusterjuju.exec_primitive = asynctest.mock.CoroutineMock() + self.my_ns.k8sclusterjuju.get_scale_count = asynctest.mock.CoroutineMock( + return_value=1 + ) + await self.my_ns.scale(nsr_id, nslcmop_id) + expected_value = "COMPLETED" + return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get( + "operationState" + ) + self.assertEqual(return_value, expected_value) + self.my_ns.k8sclusterjuju.scale.assert_called_once() + + # Test scale() for native kdu with 2 resource + nsr_id = descriptors.test_ids["TEST-NATIVE-KDU-2"]["ns"] + nslcmop_id = descriptors.test_ids["TEST-NATIVE-KDU-2"]["instantiate"] + + self.my_ns.k8sclusterjuju.get_scale_count.return_value = 2 + await self.my_ns.scale(nsr_id, nslcmop_id) + expected_value = "COMPLETED" + return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get( + "operationState" + ) + self.assertEqual(return_value, expected_value) + self.my_ns.k8sclusterjuju.scale.assert_called() + async def test_vca_status_refresh(self): nsr_id = descriptors.test_ids["TEST-A"]["ns"] nslcmop_id = descriptors.test_ids["TEST-A"]["instantiate"] @@ -415,7 +446,10 @@ class TestMyNS(asynctest.TestCase): "operate-vnf-op-config" ]["day1-2"] ): - if "juju" in v["execution-environment-list"][k]: + if ( + v.get("execution-environment-list") + and "juju" in v["execution-environment-list"][k] + ): expected_value = self.db.get_list("nsrs")[i][ "vcaStatus" ] -- GitLab From 9ad54a4e89136b65a6ae8aad7c5f4ae494256f68 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 28 May 2021 12:08:18 +0200 Subject: [PATCH 31/35] Fix 1533 (nscharms): read juju from descriptor In the VNFD, the juju section has been removed and included inside the execution environment list. Since then basic_12 (nscharms) has not been properly working because we are only looking for the execution-environment-liist key inside the descriptor, but that does not apply to the NSD. Additionally, I fixed the deletion of the nscharm, that was failing due to the member-vnf-index key not existing in the deployed VCA. Change-Id: Icb3220f28e373cf8ac3f978c9a3a83b179540512 Signed-off-by: David Garcia --- osm_lcm/ns.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index ddd827e..0d15378 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -3282,6 +3282,8 @@ class NsLcm(LcmBase): ) if "execution-environment-list" in descriptor_config: ee_list = descriptor_config.get("execution-environment-list", []) + elif "juju" in descriptor_config: + ee_list = [descriptor_config] # ns charms else: # other types as script are not supported ee_list = [] @@ -4035,8 +4037,13 @@ class NsLcm(LcmBase): for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")): config_descriptor = None - - vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr) + vca_member_vnf_index = vca.get("member-vnf-index") + vca_id = self.get_vca_id( + db_vnfrs_dict.get(vca_member_vnf_index) + if vca_member_vnf_index + else None, + db_nsr, + ) if not vca or not vca.get("ee_id"): continue if not vca.get("member-vnf-index"): -- GitLab From 4554a700d861c477360b6360920a5cb8c1f61718 Mon Sep 17 00:00:00 2001 From: romeromonser Date: Fri, 28 May 2021 12:00:08 +0200 Subject: [PATCH 32/35] Bugfix 1550: Setting a custom release name for Helm based kdus Change-Id: I38a04094d6b2327f0758451ed6d8619f8a121687 Signed-off-by: romeromonser --- osm_lcm/ns.py | 19 ++++++++++++------- osm_lcm/tests/test_ns.py | 1 + 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 0d15378..78c58f8 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -2848,13 +2848,16 @@ class NsLcm(LcmBase): "path": nsr_db_path, } - kdu_instance = self.k8scluster_map[ - k8sclustertype - ].generate_kdu_instance_name( - db_dict=db_dict_install, - kdu_model=k8s_instance_info["kdu-model"], - kdu_name=k8s_instance_info["kdu-name"], - ) + if k8s_instance_info.get("kdu-deployment-name"): + kdu_instance = k8s_instance_info.get("kdu-deployment-name") + else: + kdu_instance = self.k8scluster_map[ + k8sclustertype + ].generate_kdu_instance_name( + db_dict=db_dict_install, + kdu_model=k8s_instance_info["kdu-model"], + kdu_name=k8s_instance_info["kdu-name"], + ) self.update_db_2( "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} ) @@ -3093,6 +3096,7 @@ class NsLcm(LcmBase): if kdud["name"] == kdur["kdu-name"] ) namespace = kdur.get("k8s-namespace") + kdu_deployment_name = kdur.get("kdu-deployment-name") if kdur.get("helm-chart"): kdumodel = kdur["helm-chart"] # Default version: helm3, if helm-version is v2 assign v2 @@ -3205,6 +3209,7 @@ class NsLcm(LcmBase): "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel, "namespace": namespace, + "kdu-deployment-name": kdu_deployment_name, } db_path = "_admin.deployed.K8s.{}".format(index) db_nsr_update[db_path] = k8s_instance_info diff --git a/osm_lcm/tests/test_ns.py b/osm_lcm/tests/test_ns.py index 62de111..ef87a4c 100644 --- a/osm_lcm/tests/test_ns.py +++ b/osm_lcm/tests/test_ns.py @@ -754,6 +754,7 @@ class TestMyNS(asynctest.TestCase): "kdu-name": "ldap", "member-vnf-index": "multikdu", "namespace": None, + "kdu-deployment-name": None, } nsr_result = copy.deepcopy(db_nsr["_admin"]["deployed"]["K8s"][0]) -- GitLab From f31b66d93cf8183a02f3a4e0e92ea1a8c6b4a9b4 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Mon, 31 May 2021 15:39:04 +0200 Subject: [PATCH 33/35] Update Dockerfile.local to work with cloned common and N2VC Change-Id: I7bc5c2b551c52c2c0d3455deabe02412872ff162 Signed-off-by: garciadeblas --- Dockerfile.local | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Dockerfile.local b/Dockerfile.local index 737caad..d57235b 100644 --- a/Dockerfile.local +++ b/Dockerfile.local @@ -54,6 +54,10 @@ RUN git -C /app clone https://osm.etsi.org/gerrit/osm/common.git \ && python3 -m pip install -e /app/common # python3-pymongo python3-yaml pycrypto aiokafka +RUN python3 -m pip install \ + -r /app/common/requirements.txt \ + -r /app/N2VC/requirements.txt + RUN python3 -m pip install grpcio-tools grpclib RUN mkdir -p /app/storage/kafka && mkdir -p /app/log @@ -125,6 +129,9 @@ ENV OSMLCM_GLOBAL_LOGLEVEL DEBUG # Copy the current directory contents into the container at /app/LCM ADD . /app/LCM +RUN python3 -m pip install \ + -r requirements.txt + # Run app.py when the container launches CMD python3 -m osm_lcm.lcm -- GitLab From 586fe542e77b9d225ccfdc46d2fc9c96d8d2cd2b Mon Sep 17 00:00:00 2001 From: Dat Le Date: Thu, 3 Jun 2021 09:50:21 +0700 Subject: [PATCH 34/35] Fix Bug 1556 - redundant input param in calling _check_or_add_scale_suboperation Change-Id: I19e58252df4ba2dd18aa372d6043713866cae17b Signed-off-by: Dat Le --- osm_lcm/ns.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 78c58f8..cb281e4 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -5386,7 +5386,6 @@ class NsLcm(LcmBase): # Pre-scale retry check: Check if this sub-operation has been executed before op_index = self._check_or_add_scale_suboperation( db_nslcmop, - nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, @@ -5795,7 +5794,6 @@ class NsLcm(LcmBase): # Post-scale retry check: Check if this sub-operation has been executed before op_index = self._check_or_add_scale_suboperation( db_nslcmop, - nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, -- GitLab From 7b6d7f124aafc1cff25428bc1303e133a349ecf4 Mon Sep 17 00:00:00 2001 From: cuongmax Date: Sun, 20 Jun 2021 01:00:36 +0700 Subject: [PATCH 35/35] Fix bug read ns record: not found any nsr with filter Change-Id: I4fc642080dcefe18a6465ef5536e0cab4f04de75 Signed-off-by: cuongmax --- osm_lcm/ns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index cb281e4..cc0937f 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -237,7 +237,7 @@ class NsLcm(LcmBase): nsr_id = filter.get("_id") # read ns record from database - nsr = self.db.get_one(table="nsrs", q_filter=filter) + nsr = self.db.get_one(table="nslcmops", q_filter=filter) current_ns_status = nsr.get("nsState") # get vca status for NS @@ -320,7 +320,7 @@ class NsLcm(LcmBase): db_dict["nsState"] = "READY" # write to database - self.update_db_2("nsrs", nsr_id, db_dict) + self.update_db_2("nslcmops", nsr_id, db_dict) except (asyncio.CancelledError, asyncio.TimeoutError): raise -- GitLab