X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=763d2390acf8fbae9b780dd06a7b4882ce53db20;hb=3149f26911d1c239f674d5918f67ae802b0e671c;hp=5ca6354c6f78c78b85de83c29750b05070d37a04;hpb=9f9c6f2e8b8e978deaa4fb6e1482c0656ce4bd45;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 5ca6354..763d239 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -24,7 +24,7 @@ import traceback from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError from osm_lcm import ROclient -from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase +from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get from n2vc.k8s_helm_conn import K8sHelmConnector from osm_common.dbbase import DbException @@ -68,21 +68,6 @@ def populate_dict(target_dict, key_list, value): target_dict[key_list[-1]] = value -def deep_get(target_dict, key_list): - """ - Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None - Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None - :param target_dict: dictionary to be read - :param key_list: list of keys to read from target_dict - :return: The wanted value if exist, None otherwise - """ - for key in key_list: - if not isinstance(target_dict, dict) or key not in target_dict: - return None - target_dict = target_dict[key] - return target_dict - - class NsLcm(LcmBase): timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed total_deploy_timeout = 2 * 3600 # global timeout for deployment @@ -114,6 +99,8 @@ class NsLcm(LcmBase): self.vca_config['public_key'] = self.vca_config['pubkey'] if 'cacert' in self.vca_config: self.vca_config['ca_cert'] = self.vca_config['cacert'] + if 'apiproxy' in self.vca_config: + self.vca_config['api_proxy'] = self.vca_config['apiproxy'] # create N2VC connector self.n2vc = N2VCJujuConnector( @@ -124,10 +111,9 @@ class NsLcm(LcmBase): url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']), username=self.vca_config.get('user', None), vca_config=self.vca_config, - on_update_db=self._on_update_n2vc_db - # TODO - # New N2VC argument - # api_proxy=vca_config.get('apiproxy') + on_update_db=self._on_update_n2vc_db, + # ca_cert=self.vca_config.get('cacert'), + # api_proxy=self.vca_config.get('apiproxy'), ) self.k8sclusterhelm = K8sHelmConnector( @@ -383,6 +369,11 @@ class NsLcm(LcmBase): populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks", internal_vld_params["name"], "ip-profile"), ip_profile_2_RO(internal_vld_params["ip-profile"])) + if internal_vld_params.get("provider-network"): + + populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks", + internal_vld_params["name"], "provider-network"), + internal_vld_params["provider-network"].copy()) for icp_params in get_iterable(internal_vld_params, "internal-connection-point"): # look for interface @@ -416,6 +407,11 @@ class NsLcm(LcmBase): populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"), ip_profile_2_RO(vld_params["ip-profile"])) + if vld_params.get("provider-network"): + + populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"), + vld_params["provider-network"].copy()) + if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None: populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"), wim_account_2_RO(vld_params["wimAccountId"])), @@ -431,6 +427,7 @@ class NsLcm(LcmBase): RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]}) if RO_vld_sites: populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites) + if vld_params.get("vim-network-id"): RO_vld_sites = [] if isinstance(vld_params["vim-network-id"], dict): @@ -621,6 +618,57 @@ class NsLcm(LcmBase): else: raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index)) + @staticmethod + def _get_ns_config_info(vca_deployed_list): + """ + Generates a mapping between vnf,vdu elements and the N2VC id + :param vca_deployed_list: List of database _admin.deploy.VCA that contains this list + :return: a dictionary with {osm-config-mapping: {}} where its element contains: + "": for a vnf configuration, or + "..": for a vdu configuration + """ + mapping = {} + ns_config_info = {"osm-config-mapping": mapping} + for vca in vca_deployed_list: + if not vca["member-vnf-index"]: + continue + if not vca["vdu_id"]: + mapping[vca["member-vnf-index"]] = vca["application"] + else: + mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\ + vca["application"] + return ns_config_info + + @staticmethod + def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed): + """ + Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal + primitives as verify-ssh-credentials, or config when needed + :param desc_primitive_list: information of the descriptor + :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if + this element contains a ssh public key + :return: The modified list. Can ba an empty list, but always a list + """ + if desc_primitive_list: + primitive_list = desc_primitive_list.copy() + else: + primitive_list = [] + # look for primitive config, and get the position. None if not present + config_position = None + for index, primitive in enumerate(primitive_list): + if primitive["name"] == "config": + config_position = index + break + + # for NS, add always a config primitive if not present (bug 874) + if not vca_deployed["member-vnf-index"] and config_position is None: + primitive_list.insert(0, {"name": "config", "parameter": []}) + config_position = 0 + # for VNF/VDU add verify-ssh-credentials after config + if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"): + primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []}) + return primitive_list + async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref, n2vc_key_list): @@ -805,19 +853,32 @@ class NsLcm(LcmBase): step = "Deployed at VIM" self.logger.debug(logging_text + step) - # wait for ip addres at RO, and optionally, insert public key in virtual machine - # returns IP address - async def insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None): - - self.logger.debug(logging_text + "Starting insert_key_ro") + async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None): + """ + Wait for ip addres at RO, and optionally, insert public key in virtual machine + :param logging_text: prefix use for logging + :param nsr_id: + :param vnfr_id: + :param vdu_id: + :param vdu_index: + :param pub_key: public ssh key to inject, None to skip + :param user: user to apply the public ssh key + :return: IP address + """ + # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro") ro_nsr_id = None ip_address = None nb_tries = 0 target_vdu_id = None + ro_retries = 0 while True: + ro_retries += 1 + if ro_retries >= 360: # 1 hour + raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)) + await asyncio.sleep(10, loop=self.loop) # wait until NS is deployed at RO if not ro_nsr_id: @@ -829,31 +890,37 @@ class NsLcm(LcmBase): # get ip address if not target_vdu_id: db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}) - if not vdu_id: + + if not vdu_id: # for the VNF case ip_address = db_vnfr.get("ip-address") if not ip_address: continue - for vdur in get_iterable(db_vnfr, "vdur"): - if (vdur["vdu-id-ref"] == vdu_id and vdur["count-index"] == vdu_index) or \ - (ip_address and vdur.get("ip-address") == ip_address): - if vdur["status"] == "ACTIVE": - target_vdu_id = vdur["vdu-id-ref"] - elif vdur["status"] == "ERROR": - raise LcmException("Cannot inject ssh-key because target VM is in error state") - break - else: + vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None) + else: # VDU case + vdur = next((x for x in get_iterable(db_vnfr, "vdur") + if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None) + + if not vdur: raise LcmException("Not found vnfr_id={}, vdu_index={}, vdu_index={}".format( vnfr_id, vdu_id, vdu_index )) + if vdur.get("status") == "ACTIVE": + ip_address = vdur.get("ip-address") + if not ip_address: + continue + target_vdu_id = vdur["vdu-id-ref"] + elif vdur.get("status") == "ERROR": + raise LcmException("Cannot inject ssh-key because target VM is in error state") + if not target_vdu_id: continue - self.logger.debug(logging_text + "IP address={}".format(ip_address)) + # self.logger.debug(logging_text + "IP address={}".format(ip_address)) # inject public key into machine if pub_key and user: - self.logger.debug(logging_text + "Inserting RO key") + # self.logger.debug(logging_text + "Inserting RO key") try: ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index result_dict = await self.RO.create_action( @@ -872,10 +939,12 @@ class NsLcm(LcmBase): result.get("description"))) break except ROclient.ROClientException as e: + if not nb_tries: + self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds". + format(e, 20*10)) nb_tries += 1 - if nb_tries >= 10: + if nb_tries >= 20: raise LcmException("Reaching max tries injecting key. Error: {}".format(e)) - self.logger.debug(logging_text + "error injecting key: {}".format(e)) else: break @@ -885,16 +954,15 @@ class NsLcm(LcmBase): kdu_name, vdu_index, config_descriptor, deploy_params, base_folder): nsr_id = db_nsr["_id"] db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index) + vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"] vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index] db_dict = { 'collection': 'nsrs', 'filter': {'_id': nsr_id}, 'path': db_update_entry } - - - logging_text += "member_vnf_index={} vdu_id={}, vdu_index={} "\ - .format(db_vnfr["member-vnf-index-ref"], vdu_id, vdu_index) + logging_text += "member_vnf_index={} vdu_id={}, vdu_index={} ".format(db_vnfr["member-vnf-index-ref"], + vdu_id, vdu_index) step = "" try: @@ -930,35 +998,35 @@ class NsLcm(LcmBase): if is_proxy_charm: step = "create execution environment" self.logger.debug(logging_text + step) - ee_id, credentials = await self.n2vc.create_execution_environment( - namespace=namespace, - reuse_ee_id=ee_id, - db_dict=db_dict - ) - + ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace, + reuse_ee_id=ee_id, + db_dict=db_dict) else: - step = "register execution envioronment" - # TODO wait until deployed by RO, when IP address has been filled. By pooling???? - credentials = {} # TODO db_credentials["ip_address"] + step = "Waiting to VM being up and getting IP address" + self.logger.debug(logging_text + step) + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, + user=None, pub_key=None) + credentials = {"hostname": rw_mgmt_ip} # get username + username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were # merged. Meanwhile let's get username from initial-config-primitive - if config_descriptor.get("initial-config-primitive"): - for param in config_descriptor["initial-config-primitive"][0].get("parameter", ()): - if param["name"] == "ssh-username": - credentials["username"] = param["value"] - if config_descriptor.get("config-access") and config_descriptor["config-access"].get("ssh-access"): - if config_descriptor["config-access"]["ssh-access"].get("required"): - credentials["username"] = \ - config_descriptor["config-access"]["ssh-access"].get("default-user") - + if not username and config_descriptor.get("initial-config-primitive"): + for config_primitive in config_descriptor["initial-config-primitive"]: + for param in config_primitive.get("parameter", ()): + if param["name"] == "ssh-username": + username = param["value"] + break + if not username: + raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with " + "'config-access.ssh-access.default-user'") + credentials["username"] = username # n2vc_redesign STEP 3.2 + + step = "register execution environment {}".format(credentials) self.logger.debug(logging_text + step) - ee_id = await self.n2vc.register_execution_environment( - credentials=credentials, - namespace=namespace, - db_dict=db_dict - ) + ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace, + db_dict=db_dict) # for compatibility with MON/POL modules, the need model and application name at database # TODO ask to N2VC instead of assuming the format "model_name.application_name" @@ -970,59 +1038,58 @@ class NsLcm(LcmBase): db_update_entry + "ee_id": ee_id}) # n2vc_redesign STEP 3.3 - # TODO check if already done + step = "Install configuration Software" + # TODO check if already done self.logger.debug(logging_text + step) - await self.n2vc.install_configuration_sw( - ee_id=ee_id, - artifact_path=artifact_path, - db_dict=db_dict - ) + await self.n2vc.install_configuration_sw(ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict) # if SSH access is required, then get execution environment SSH public - required = deep_get(config_descriptor, ("config-access", "ssh-access", "required")) - if is_proxy_charm and required: - + if is_proxy_charm: # if native charm we have waited already to VM be UP pub_key = None - pub_key = await self.n2vc.get_ee_ssh_public__key( - ee_id=ee_id, - db_dict=db_dict - ) + user = None + if deep_get(config_descriptor, ("config-access", "ssh-access", "required")): + # Needed to inject a ssh key + user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) + step = "Install configuration Software, getting public ssh key" + pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict) + + step = "Insert public key into VM" + else: + step = "Waiting to VM being up and getting IP address" + self.logger.debug(logging_text + step) - user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) - # insert pub_key into VM # n2vc_redesign STEP 5.1 - step = "Insert public key into VM" - self.logger.debug(logging_text + step) + # wait for RO (ip-address) Insert pub_key into VM + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, + user=user, pub_key=pub_key) - # wait for RO (ip-address) - rw_mgmt_ip = await self.insert_key_ro( - logging_text=logging_text, - nsr_id=nsr_id, - vnfr_id=vnfr_id, - vdu_id=vdu_id, - vdu_index=vdu_index, - user=user, - pub_key=pub_key - ) + self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip)) - # store rw_mgmt_ip in deploy params for later substitution - self.logger.debug('rw_mgmt_ip={}'.format(rw_mgmt_ip)) + # store rw_mgmt_ip in deploy params for later replacement deploy_params["rw_mgmt_ip"] = rw_mgmt_ip # n2vc_redesign STEP 6 Execute initial config primitive - initial_config_primitive_list = config_descriptor.get('initial-config-primitive', []) step = 'execute initial config primitive' + initial_config_primitive_list = config_descriptor.get('initial-config-primitive') # sort initial config primitives by 'seq' try: initial_config_primitive_list.sort(key=lambda val: int(val['seq'])) - except Exception: - self.logger.warn(logging_text + 'Cannot sort by "seq" field' + step) + except Exception as e: + self.logger.error(logging_text + step + ": " + str(e)) + + # add config if not present for NS charm + initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list, + vca_deployed) for initial_config_primitive in initial_config_primitive_list: + # adding information on the vca_deployed if it is a NS execution environment + if not vca_deployed["member-vnf-index"]: + deploy_params["ns_config_info"] = self._get_ns_config_info(vca_deployed_list) # TODO check if already done primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params) + step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_) self.logger.debug(logging_text + step) await self.n2vc.exec_primitive( @@ -1077,13 +1144,11 @@ class NsLcm(LcmBase): nslcmop_operation_state = None db_vnfrs = {} # vnf's info indexed by member-index # n2vc_info = {} - # n2vc_key_list = [] # list of public keys to be injected as authorized to VMs task_instantiation_list = [] exc = None try: # wait for any previous tasks in process - step = "Waiting for previous tasks" - self.logger.debug(logging_text + step) + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds) @@ -1152,25 +1217,23 @@ class NsLcm(LcmBase): db_nsr_update["_admin.nsState"] = "INSTANTIATED" self.update_db_2("nsrs", nsr_id, db_nsr_update) self.logger.debug(logging_text + "Before deploy_kdus") - db_k8scluster_list = self.db.get_list("k8sclusters", {}) # Call to deploy_kdus in case exists the "vdu:kdu" param task_kdu = asyncio.ensure_future( self.deploy_kdus( logging_text=logging_text, nsr_id=nsr_id, - nsd=nsd, db_nsr=db_nsr, - db_nslcmop=db_nslcmop, db_vnfrs=db_vnfrs, - db_vnfds_ref=db_vnfds_ref, - db_k8scluster=db_k8scluster_list ) ) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDUs", task_kdu) task_instantiation_list.append(task_kdu) # n2vc_redesign STEP 1 Get VCA public ssh-key # feature 1429. Add n2vc public key to needed VMs - n2vc_key = await self.n2vc.get_public_key() + n2vc_key = self.n2vc.get_public_key() + n2vc_key_list = [n2vc_key] + if self.vca_config.get("public_key"): + n2vc_key_list.append(self.vca_config["public_key"]) # n2vc_redesign STEP 2 Deploy Network Scenario task_ro = asyncio.ensure_future( @@ -1182,7 +1245,7 @@ class NsLcm(LcmBase): db_nslcmop=db_nslcmop, db_vnfrs=db_vnfrs, db_vnfds_ref=db_vnfds_ref, - n2vc_key_list=[n2vc_key] + n2vc_key_list=n2vc_key_list ) ) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro) @@ -1208,10 +1271,7 @@ class NsLcm(LcmBase): # Get additional parameters deploy_params = {} if db_vnfr.get("additionalParamsForVnf"): - deploy_params = db_vnfr["additionalParamsForVnf"].copy() - for k, v in deploy_params.items(): - if isinstance(v, str) and v.startswith("!!yaml "): - deploy_params[k] = yaml.safe_load(v[7:]) + deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy()) descriptor_config = vnfd.get("vnf-configuration") if descriptor_config and descriptor_config.get("juju"): @@ -1238,6 +1298,11 @@ class NsLcm(LcmBase): for vdud in get_iterable(vnfd, 'vdu'): vdu_id = vdud["id"] descriptor_config = vdud.get('vdu-configuration') + vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None) + if vdur.get("additionalParams"): + deploy_params_vdu = self._format_additional_params(vdur["additionalParams"]) + else: + deploy_params_vdu = deploy_params if descriptor_config and descriptor_config.get("juju"): # look for vdu index in the db_vnfr["vdu"] section # for vdur_index, vdur in enumerate(db_vnfr["vdur"]): @@ -1264,7 +1329,7 @@ class NsLcm(LcmBase): member_vnf_index=member_vnf_index, vdu_index=vdu_index, vdu_name=vdu_name, - deploy_params=deploy_params, + deploy_params=deploy_params_vdu, descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_list=task_instantiation_list @@ -1315,13 +1380,11 @@ class NsLcm(LcmBase): kdu_name = None vdu_index = 0 vdu_name = None + # Get additional parameters deploy_params = {} if db_nsr.get("additionalParamsForNs"): - deploy_params = db_nsr["additionalParamsForNs"].copy() - for k, v in deploy_params.items(): - if isinstance(v, str) and v.startswith("!!yaml "): - deploy_params[k] = yaml.safe_load(v[7:]) + deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy()) base_folder = nsd["_admin"]["storage"] self._deploy_n2vc( logging_text=logging_text, @@ -1418,97 +1481,98 @@ class NsLcm(LcmBase): self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate") - async def deploy_kdus(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref, db_k8scluster): + async def deploy_kdus(self, logging_text, nsr_id, db_nsr, db_vnfrs): # Launch kdus if present in the descriptor - logging_text = "Deploy kdus: " - db_nsr_update = {} - db_nsr_update["_admin.deployed.K8s"] = [] - try: - # Look for all vnfds - # db_nsr_update["_admin.deployed.K8s"] = [] - vnf_update = [] - task_list = [] - for c_vnf in nsd.get("constituent-vnfd", ()): - vnfr = db_vnfrs[c_vnf["member-vnf-index"]] - member_vnf_index = c_vnf["member-vnf-index"] - vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']] - vnfd_ref = vnfd["id"] - desc_params = {} - step = "Checking kdu from vnf: {} - member-vnf-index: {}".format(vnfd_ref, member_vnf_index) - self.logger.debug(logging_text + step) - if vnfd.get("kdu"): - step = "vnf: {} has kdus".format(vnfd_ref) - self.logger.debug(logging_text + step) - for vnfr_name, vnfr_data in db_vnfrs.items(): - if vnfr_data["vnfd-ref"] == vnfd["id"]: - if vnfr_data.get("additionalParamsForVnf"): - desc_params = self._format_additional_params(vnfr_data["additionalParamsForVnf"]) - break - else: - raise LcmException("VNF descriptor not found with id: {}".format(vnfr_data["vnfd-ref"])) - self.logger.debug(logging_text + step) + k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}} - for kdur in vnfr.get("kdur"): - index = 0 - for k8scluster in db_k8scluster: - if kdur["k8s-cluster"]["id"] == k8scluster["_id"]: - cluster_uuid = k8scluster["cluster-uuid"] - break - else: - raise LcmException("K8scluster not found with id: {}".format(kdur["k8s-cluster"]["id"])) - self.logger.debug(logging_text + step) + def _get_cluster_id(cluster_id, cluster_type): + nonlocal k8scluster_id_2_uuic + if cluster_id in k8scluster_id_2_uuic[cluster_type]: + return k8scluster_id_2_uuic[cluster_type][cluster_id] - step = "Instantiate KDU {} in k8s cluster {}".format(kdur["kdu-name"], cluster_uuid) - self.logger.debug(logging_text + step) - for kdu in vnfd.get("kdu"): - if kdu.get("name") == kdur["kdu-name"]: - break - else: - raise LcmException("KDU not found with name: {} in VNFD {}".format(kdur["kdu-name"], - vnfd["name"])) - self.logger.debug(logging_text + step) - kdumodel = None - k8sclustertype = None - if kdu.get("helm-chart"): - kdumodel = kdu["helm-chart"] - k8sclustertype = "chart" - elif kdu.get("juju-bundle"): - kdumodel = kdu["juju-bundle"] - k8sclustertype = "juju" - k8s_instace_info = {"kdu-instance": None, "k8scluster-uuid": cluster_uuid, - "vnfr-id": vnfr["id"], "k8scluster-type": k8sclustertype, - "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel} - db_nsr_update["_admin.deployed.K8s"].append(k8s_instace_info) - db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s." - "{}".format(index)} - if k8sclustertype == "chart": - task = self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, - kdu_model=kdumodel, atomic=True, params=desc_params, - db_dict=db_dict, timeout=300) - else: - # TODO I need the juju connector in place - pass - task_list.append(task) - index += 1 + db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False) + if not db_k8scluster: + raise LcmException("K8s cluster {} cannot be found".format(cluster_id)) + k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id")) + if not k8s_id: + raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type)) + k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id + return k8s_id + + logging_text += "Deploy kdus: " + try: + db_nsr_update = {"_admin.deployed.K8s": []} self.update_db_2("nsrs", nsr_id, db_nsr_update) - done = None - pending = None - if len(task_list) > 0: - self.logger.debug('Waiting for terminate pending tasks...') - done, pending = await asyncio.wait(task_list, timeout=3600) - if not pending: - for fut in done: - k8s_instance = fut.result() - k8s_instace_info = {"kdu-instance": k8s_instance, "k8scluster-uuid": cluster_uuid, - "vnfr-id": vnfr["id"], "k8scluster-type": k8sclustertype, - "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel} - vnf_update.append(k8s_instace_info) - self.logger.debug('All tasks finished...') - else: - self.logger.info('There are pending tasks: {}'.format(pending)) - db_nsr_update["_admin.deployed.K8s"] = vnf_update + # Look for all vnfds + pending_tasks = {} + index = 0 + for vnfr_data in db_vnfrs.values(): + for kdur in get_iterable(vnfr_data, "kdur"): + desc_params = self._format_additional_params(kdur.get("additionalParams")) + kdumodel = None + k8sclustertype = None + error_text = None + cluster_uuid = None + if kdur.get("helm-chart"): + kdumodel = kdur["helm-chart"] + k8sclustertype = "chart" + k8sclustertype_full = "helm-chart" + elif kdur.get("juju-bundle"): + kdumodel = kdur["juju-bundle"] + k8sclustertype = "juju" + k8sclustertype_full = "juju-bundle" + else: + error_text = "kdu type is neither helm-chart not juju-bundle. Maybe an old NBI version is" \ + " running" + try: + if not error_text: + cluster_uuid = _get_cluster_id(kdur["k8s-cluster"]["id"], k8sclustertype_full) + except LcmException as e: + error_text = str(e) + step = "Instantiate KDU {} in k8s cluster {}".format(kdur["kdu-name"], cluster_uuid) + + k8s_instace_info = {"kdu-instance": None, "k8scluster-uuid": cluster_uuid, + "k8scluster-type": k8sclustertype, + "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel} + if error_text: + k8s_instace_info["detailed-status"] = error_text + db_nsr_update["_admin.deployed.K8s.{}".format(index)] = k8s_instace_info + self.update_db_2("nsrs", nsr_id, db_nsr_update) + if error_text: + continue + + db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s." + "{}".format(index)} + if k8sclustertype == "chart": + task = asyncio.ensure_future( + self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel, atomic=True, + params=desc_params, db_dict=db_dict, timeout=3600) + ) + else: + # TODO juju-bundle connector in place + pass + pending_tasks[task] = "_admin.deployed.K8s.{}.".format(index) + index += 1 + if not pending_tasks: + return + self.logger.debug(logging_text + 'Waiting for terminate pending tasks...') + pending_list = list(pending_tasks.keys()) + while pending_list: + done_list, pending_list = await asyncio.wait(pending_list, timeout=30*60, + return_when=asyncio.FIRST_COMPLETED) + if not done_list: # timeout + for task in pending_list: + db_nsr_update[pending_tasks(task) + "detailed-status"] = "Timeout" + break + for task in done_list: + exc = task.exception() + if exc: + db_nsr_update[pending_tasks[task] + "detailed-status"] = "{}".format(exc) + else: + db_nsr_update[pending_tasks[task] + "kdu-instance"] = task.result() + except Exception as e: self.logger.critical(logging_text + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e)) raise LcmException("{} Exit Exception {} while '{}': {}".format(logging_text, type(e).__name__, step, e)) @@ -1516,6 +1580,7 @@ class NsLcm(LcmBase): # TODO Write in data base if db_nsr_update: self.update_db_2("nsrs", nsr_id, db_nsr_update) + def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id, kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config, base_folder, task_instantiation_list): @@ -1622,11 +1687,10 @@ class NsLcm(LcmBase): return nslcmop def _format_additional_params(self, params): - + params = params or {} for key, value in params.items(): if str(value).startswith("!!yaml "): params[key] = yaml.safe_load(value[7:]) - return params def _get_terminate_primitive_params(self, seq, vnf_index): @@ -1883,6 +1947,7 @@ class NsLcm(LcmBase): pending_tasks = [] try: # wait for any previous tasks in process + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id) step = "Getting nslcmop={} from db".format(nslcmop_id) @@ -1919,19 +1984,23 @@ class NsLcm(LcmBase): # Delete from k8scluster step = "delete kdus" self.logger.debug(logging_text + step) - print(nsr_deployed) + # print(nsr_deployed) if nsr_deployed: - for kdu in nsr_deployed.get("K8s"): + for kdu in nsr_deployed.get("K8s", ()): + kdu_instance = kdu.get("kdu-instance") + if not kdu_instance: + continue if kdu.get("k8scluster-type") == "chart": - task_delete_kdu_instance = asyncio.ensure_future(self.k8sclusterhelm.uninstall( - cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu.get("kdu-instance"))) + task_delete_kdu_instance = asyncio.ensure_future( + self.k8sclusterhelm.uninstall(cluster_uuid=kdu.get("k8scluster-uuid"), + kdu_instance=kdu_instance)) elif kdu.get("k8scluster-type") == "juju": # TODO Juju connector needed - pass + continue else: - msg = "k8scluster-type not defined" - raise LcmException(msg) - + self.error(logging_text + "Unknown k8s deployment type {}". + format(kdu.get("k8scluster-type"))) + continue pending_tasks.append(task_delete_kdu_instance) except LcmException as e: msg = "Failed while deleting KDUs from NS: {}".format(e) @@ -2153,6 +2222,11 @@ class NsLcm(LcmBase): width=256) elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "): calculated_params[param_name] = calculated_params[param_name][7:] + + # add always ns_config_info if primitive name is config + if primitive_desc["name"] == "config": + if "ns_config_info" in instantiation_params: + calculated_params["ns_config_info"] = instantiation_params["ns_config_info"] return calculated_params async def _ns_execute_primitive(self, db_deployed, member_vnf_index, vdu_id, vdu_name, vdu_count_index, @@ -2230,6 +2304,7 @@ class NsLcm(LcmBase): exc = None try: # wait for any previous tasks in process + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) step = "Getting information from database" @@ -2275,14 +2350,10 @@ class NsLcm(LcmBase): break elif kdu_name: self.logger.debug(logging_text + "Checking actions in KDUs") - desc_params = {} - if vnf_index: - if db_vnfr.get("additionalParamsForVnf") and db_vnfr["additionalParamsForVnf"].\ - get("member-vnf-index") == vnf_index: - desc_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"]. - get("additionalParams")) - if primitive_params: - desc_params.update(primitive_params) + kdur = next((x for x in db_vnfr["kdur"] if x["kdu_name"] == kdu_name), None) + desc_params = self._format_additional_params(kdur.get("additionalParams")) or {} + if primitive_params: + desc_params.update(primitive_params) # TODO Check if we will need something at vnf level index = 0 for kdu in get_iterable(nsr_deployed, "K8s"): @@ -2301,9 +2372,10 @@ class NsLcm(LcmBase): if kdu.get("k8scluster-type") == "chart": output = await self.k8sclusterhelm.upgrade(cluster_uuid=kdu.get("k8scluster-uuid"), - kdu_instance=kdu.get("kdu-instance"), - atomic=True, kdu_model=kdu_model, - params=desc_params, db_dict=db_dict, timeout=300) + kdu_instance=kdu.get("kdu-instance"), + atomic=True, kdu_model=kdu_model, + params=desc_params, db_dict=db_dict, + timeout=300) elif kdu.get("k8scluster-type") == "juju": # TODO Juju connector needed pass @@ -2367,10 +2439,14 @@ class NsLcm(LcmBase): desc_params = {} if vnf_index: if db_vnfr.get("additionalParamsForVnf"): - desc_params.update(db_vnfr["additionalParamsForVnf"]) + desc_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"]) + if vdu_id: + vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None) + if vdur.get("additionalParams"): + desc_params = self._format_additional_params(vdur["additionalParams"]) else: if db_nsr.get("additionalParamsForNs"): - desc_params.update(db_nsr["additionalParamsForNs"]) + desc_params.update(self._format_additional_params(db_nsr["additionalParamsForNs"])) # TODO check if ns is in a proper status output, detail = await self._ns_execute_primitive( @@ -2458,6 +2534,7 @@ class NsLcm(LcmBase): vnfr_scaled = False try: # wait for any previous tasks in process + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) step = "Getting nslcmop from database" @@ -2477,9 +2554,9 @@ class NsLcm(LcmBase): ####### nsr_deployed = db_nsr["_admin"].get("deployed") vnf_index = db_nslcmop["operationParams"].get("member_vnf_index") - vdu_id = db_nslcmop["operationParams"].get("vdu_id") - vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index") - vdu_name = db_nslcmop["operationParams"].get("vdu_name") + # vdu_id = db_nslcmop["operationParams"].get("vdu_id") + # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index") + # vdu_name = db_nslcmop["operationParams"].get("vdu_name") ####### RO_nsr_id = nsr_deployed["RO"]["nsr_id"]