X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=193cab60876b52d5836947254d7f4d4ed58a912c;hb=c61e7813da16b2dc42d7ca4f625efe720096ab54;hp=5ca6354c6f78c78b85de83c29750b05070d37a04;hpb=9f9c6f2e8b8e978deaa4fb6e1482c0656ce4bd45;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 5ca6354..193cab6 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -114,6 +114,8 @@ class NsLcm(LcmBase): self.vca_config['public_key'] = self.vca_config['pubkey'] if 'cacert' in self.vca_config: self.vca_config['ca_cert'] = self.vca_config['cacert'] + if 'apiproxy' in self.vca_config: + self.vca_config['api_proxy'] = self.vca_config['apiproxy'] # create N2VC connector self.n2vc = N2VCJujuConnector( @@ -124,10 +126,9 @@ class NsLcm(LcmBase): url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']), username=self.vca_config.get('user', None), vca_config=self.vca_config, - on_update_db=self._on_update_n2vc_db - # TODO - # New N2VC argument - # api_proxy=vca_config.get('apiproxy') + on_update_db=self._on_update_n2vc_db, + # ca_cert=self.vca_config.get('cacert'), + # api_proxy=self.vca_config.get('apiproxy'), ) self.k8sclusterhelm = K8sHelmConnector( @@ -383,6 +384,11 @@ class NsLcm(LcmBase): populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks", internal_vld_params["name"], "ip-profile"), ip_profile_2_RO(internal_vld_params["ip-profile"])) + if internal_vld_params.get("provider-network"): + + populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks", + internal_vld_params["name"], "provider-network"), + internal_vld_params["provider-network"].copy()) for icp_params in get_iterable(internal_vld_params, "internal-connection-point"): # look for interface @@ -416,6 +422,11 @@ class NsLcm(LcmBase): populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"), ip_profile_2_RO(vld_params["ip-profile"])) + if vld_params.get("provider-network"): + + populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"), + vld_params["provider-network"].copy()) + if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None: populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"), wim_account_2_RO(vld_params["wimAccountId"])), @@ -431,6 +442,7 @@ class NsLcm(LcmBase): RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]}) if RO_vld_sites: populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites) + if vld_params.get("vim-network-id"): RO_vld_sites = [] if isinstance(vld_params["vim-network-id"], dict): @@ -621,6 +633,57 @@ class NsLcm(LcmBase): else: raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index)) + @staticmethod + def _get_ns_config_info(vca_deployed_list): + """ + Generates a mapping between vnf,vdu elements and the N2VC id + :param vca_deployed_list: List of database _admin.deploy.VCA that contains this list + :return: a dictionary with {osm-config-mapping: {}} where its element contains: + "": for a vnf configuration, or + "..": for a vdu configuration + """ + mapping = {} + ns_config_info = {"osm-config-mapping": mapping} + for vca in vca_deployed_list: + if not vca["member-vnf-index"]: + continue + if not vca["vdu_id"]: + mapping[vca["member-vnf-index"]] = vca["application"] + else: + mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\ + vca["application"] + return ns_config_info + + @staticmethod + def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed): + """ + Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal + primitives as verify-ssh-credentials, or config when needed + :param desc_primitive_list: information of the descriptor + :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if + this element contains a ssh public key + :return: The modified list. Can ba an empty list, but always a list + """ + if desc_primitive_list: + primitive_list = desc_primitive_list.copy() + else: + primitive_list = [] + # look for primitive config, and get the position. None if not present + config_position = None + for index, primitive in enumerate(primitive_list): + if primitive["name"] == "config": + config_position = index + break + + # for NS, add always a config primitive if not present (bug 874) + if not vca_deployed["member-vnf-index"] and config_position is None: + primitive_list.insert(0, {"name": "config", "parameter": []}) + config_position = 0 + # for VNF/VDU add verify-ssh-credentials after config + if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"): + primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []}) + return primitive_list + async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref, n2vc_key_list): @@ -805,12 +868,20 @@ class NsLcm(LcmBase): step = "Deployed at VIM" self.logger.debug(logging_text + step) - # wait for ip addres at RO, and optionally, insert public key in virtual machine - # returns IP address - async def insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None): - - self.logger.debug(logging_text + "Starting insert_key_ro") + async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None): + """ + Wait for ip addres at RO, and optionally, insert public key in virtual machine + :param logging_text: prefix use for logging + :param nsr_id: + :param vnfr_id: + :param vdu_id: + :param vdu_index: + :param pub_key: public ssh key to inject, None to skip + :param user: user to apply the public ssh key + :return: IP address + """ + # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro") ro_nsr_id = None ip_address = None nb_tries = 0 @@ -836,9 +907,9 @@ class NsLcm(LcmBase): for vdur in get_iterable(db_vnfr, "vdur"): if (vdur["vdu-id-ref"] == vdu_id and vdur["count-index"] == vdu_index) or \ (ip_address and vdur.get("ip-address") == ip_address): - if vdur["status"] == "ACTIVE": + if vdur.get("status") == "ACTIVE": target_vdu_id = vdur["vdu-id-ref"] - elif vdur["status"] == "ERROR": + elif vdur.get("status") == "ERROR": raise LcmException("Cannot inject ssh-key because target VM is in error state") break else: @@ -849,11 +920,11 @@ class NsLcm(LcmBase): if not target_vdu_id: continue - self.logger.debug(logging_text + "IP address={}".format(ip_address)) + # self.logger.debug(logging_text + "IP address={}".format(ip_address)) # inject public key into machine if pub_key and user: - self.logger.debug(logging_text + "Inserting RO key") + # self.logger.debug(logging_text + "Inserting RO key") try: ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index result_dict = await self.RO.create_action( @@ -872,10 +943,12 @@ class NsLcm(LcmBase): result.get("description"))) break except ROclient.ROClientException as e: + if not nb_tries: + self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds". + format(e, 20*10)) nb_tries += 1 - if nb_tries >= 10: + if nb_tries >= 20: raise LcmException("Reaching max tries injecting key. Error: {}".format(e)) - self.logger.debug(logging_text + "error injecting key: {}".format(e)) else: break @@ -885,16 +958,15 @@ class NsLcm(LcmBase): kdu_name, vdu_index, config_descriptor, deploy_params, base_folder): nsr_id = db_nsr["_id"] db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index) + vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"] vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index] db_dict = { 'collection': 'nsrs', 'filter': {'_id': nsr_id}, 'path': db_update_entry } - - - logging_text += "member_vnf_index={} vdu_id={}, vdu_index={} "\ - .format(db_vnfr["member-vnf-index-ref"], vdu_id, vdu_index) + logging_text += "member_vnf_index={} vdu_id={}, vdu_index={} ".format(db_vnfr["member-vnf-index-ref"], + vdu_id, vdu_index) step = "" try: @@ -930,35 +1002,35 @@ class NsLcm(LcmBase): if is_proxy_charm: step = "create execution environment" self.logger.debug(logging_text + step) - ee_id, credentials = await self.n2vc.create_execution_environment( - namespace=namespace, - reuse_ee_id=ee_id, - db_dict=db_dict - ) - + ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace, + reuse_ee_id=ee_id, + db_dict=db_dict) else: - step = "register execution envioronment" - # TODO wait until deployed by RO, when IP address has been filled. By pooling???? - credentials = {} # TODO db_credentials["ip_address"] + step = "Waiting to VM being up and getting IP address" + self.logger.debug(logging_text + step) + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, + user=None, pub_key=None) + credentials = {"hostname": rw_mgmt_ip} # get username + username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were # merged. Meanwhile let's get username from initial-config-primitive - if config_descriptor.get("initial-config-primitive"): - for param in config_descriptor["initial-config-primitive"][0].get("parameter", ()): - if param["name"] == "ssh-username": - credentials["username"] = param["value"] - if config_descriptor.get("config-access") and config_descriptor["config-access"].get("ssh-access"): - if config_descriptor["config-access"]["ssh-access"].get("required"): - credentials["username"] = \ - config_descriptor["config-access"]["ssh-access"].get("default-user") - + if not username and config_descriptor.get("initial-config-primitive"): + for config_primitive in config_descriptor["initial-config-primitive"]: + for param in config_primitive.get("parameter", ()): + if param["name"] == "ssh-username": + username = param["value"] + break + if not username: + raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with " + "'config-access.ssh-access.default-user'") + credentials["username"] = username # n2vc_redesign STEP 3.2 + + step = "register execution environment {}".format(credentials) self.logger.debug(logging_text + step) - ee_id = await self.n2vc.register_execution_environment( - credentials=credentials, - namespace=namespace, - db_dict=db_dict - ) + ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace, + db_dict=db_dict) # for compatibility with MON/POL modules, the need model and application name at database # TODO ask to N2VC instead of assuming the format "model_name.application_name" @@ -970,59 +1042,58 @@ class NsLcm(LcmBase): db_update_entry + "ee_id": ee_id}) # n2vc_redesign STEP 3.3 - # TODO check if already done + step = "Install configuration Software" + # TODO check if already done self.logger.debug(logging_text + step) - await self.n2vc.install_configuration_sw( - ee_id=ee_id, - artifact_path=artifact_path, - db_dict=db_dict - ) + await self.n2vc.install_configuration_sw(ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict) # if SSH access is required, then get execution environment SSH public - required = deep_get(config_descriptor, ("config-access", "ssh-access", "required")) - if is_proxy_charm and required: - + if is_proxy_charm: # if native charm we have waited already to VM be UP pub_key = None - pub_key = await self.n2vc.get_ee_ssh_public__key( - ee_id=ee_id, - db_dict=db_dict - ) + user = None + if deep_get(config_descriptor, ("config-access", "ssh-access", "required")): + # Needed to inject a ssh key + user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) + step = "Install configuration Software, getting public ssh key" + pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict) + + step = "Insert public key into VM" + else: + step = "Waiting to VM being up and getting IP address" + self.logger.debug(logging_text + step) - user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) - # insert pub_key into VM # n2vc_redesign STEP 5.1 - step = "Insert public key into VM" - self.logger.debug(logging_text + step) + # wait for RO (ip-address) Insert pub_key into VM + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, + user=user, pub_key=pub_key) - # wait for RO (ip-address) - rw_mgmt_ip = await self.insert_key_ro( - logging_text=logging_text, - nsr_id=nsr_id, - vnfr_id=vnfr_id, - vdu_id=vdu_id, - vdu_index=vdu_index, - user=user, - pub_key=pub_key - ) + self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip)) - # store rw_mgmt_ip in deploy params for later substitution - self.logger.debug('rw_mgmt_ip={}'.format(rw_mgmt_ip)) + # store rw_mgmt_ip in deploy params for later replacement deploy_params["rw_mgmt_ip"] = rw_mgmt_ip # n2vc_redesign STEP 6 Execute initial config primitive - initial_config_primitive_list = config_descriptor.get('initial-config-primitive', []) step = 'execute initial config primitive' + initial_config_primitive_list = config_descriptor.get('initial-config-primitive') # sort initial config primitives by 'seq' try: initial_config_primitive_list.sort(key=lambda val: int(val['seq'])) - except Exception: - self.logger.warn(logging_text + 'Cannot sort by "seq" field' + step) + except Exception as e: + self.logger.error(logging_text + step + ": " + str(e)) + + # add config if not present for NS charm + initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list, + vca_deployed) for initial_config_primitive in initial_config_primitive_list: + # adding information on the vca_deployed if it is a NS execution environment + if not vca_deployed["member-vnf-index"]: + deploy_params["ns_config_info"] = self._get_ns_config_info(vca_deployed_list) # TODO check if already done primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params) + step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_) self.logger.debug(logging_text + step) await self.n2vc.exec_primitive( @@ -1077,13 +1148,11 @@ class NsLcm(LcmBase): nslcmop_operation_state = None db_vnfrs = {} # vnf's info indexed by member-index # n2vc_info = {} - # n2vc_key_list = [] # list of public keys to be injected as authorized to VMs task_instantiation_list = [] exc = None try: # wait for any previous tasks in process - step = "Waiting for previous tasks" - self.logger.debug(logging_text + step) + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds) @@ -1170,7 +1239,10 @@ class NsLcm(LcmBase): task_instantiation_list.append(task_kdu) # n2vc_redesign STEP 1 Get VCA public ssh-key # feature 1429. Add n2vc public key to needed VMs - n2vc_key = await self.n2vc.get_public_key() + n2vc_key = self.n2vc.get_public_key() + n2vc_key_list = [n2vc_key] + if self.vca_config.get("public_key"): + n2vc_key_list.append(self.vca_config["public_key"]) # n2vc_redesign STEP 2 Deploy Network Scenario task_ro = asyncio.ensure_future( @@ -1182,7 +1254,7 @@ class NsLcm(LcmBase): db_nslcmop=db_nslcmop, db_vnfrs=db_vnfrs, db_vnfds_ref=db_vnfds_ref, - n2vc_key_list=[n2vc_key] + n2vc_key_list=n2vc_key_list ) ) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro) @@ -1315,6 +1387,7 @@ class NsLcm(LcmBase): kdu_name = None vdu_index = 0 vdu_name = None + # Get additional parameters deploy_params = {} if db_nsr.get("additionalParamsForNs"): @@ -1483,9 +1556,9 @@ class NsLcm(LcmBase): db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s." "{}".format(index)} if k8sclustertype == "chart": - task = self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, - kdu_model=kdumodel, atomic=True, params=desc_params, - db_dict=db_dict, timeout=300) + task = self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel, + atomic=True, params=desc_params, + db_dict=db_dict, timeout=300) else: # TODO I need the juju connector in place pass @@ -1516,6 +1589,7 @@ class NsLcm(LcmBase): # TODO Write in data base if db_nsr_update: self.update_db_2("nsrs", nsr_id, db_nsr_update) + def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id, kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config, base_folder, task_instantiation_list): @@ -1883,6 +1957,7 @@ class NsLcm(LcmBase): pending_tasks = [] try: # wait for any previous tasks in process + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id) step = "Getting nslcmop={} from db".format(nslcmop_id) @@ -2153,6 +2228,11 @@ class NsLcm(LcmBase): width=256) elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "): calculated_params[param_name] = calculated_params[param_name][7:] + + # add always ns_config_info if primitive name is config + if primitive_desc["name"] == "config": + if "ns_config_info" in instantiation_params: + calculated_params["ns_config_info"] = instantiation_params["ns_config_info"] return calculated_params async def _ns_execute_primitive(self, db_deployed, member_vnf_index, vdu_id, vdu_name, vdu_count_index, @@ -2230,6 +2310,7 @@ class NsLcm(LcmBase): exc = None try: # wait for any previous tasks in process + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) step = "Getting information from database" @@ -2301,9 +2382,10 @@ class NsLcm(LcmBase): if kdu.get("k8scluster-type") == "chart": output = await self.k8sclusterhelm.upgrade(cluster_uuid=kdu.get("k8scluster-uuid"), - kdu_instance=kdu.get("kdu-instance"), - atomic=True, kdu_model=kdu_model, - params=desc_params, db_dict=db_dict, timeout=300) + kdu_instance=kdu.get("kdu-instance"), + atomic=True, kdu_model=kdu_model, + params=desc_params, db_dict=db_dict, + timeout=300) elif kdu.get("k8scluster-type") == "juju": # TODO Juju connector needed pass @@ -2458,6 +2540,7 @@ class NsLcm(LcmBase): vnfr_scaled = False try: # wait for any previous tasks in process + step = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id) step = "Getting nslcmop from database" @@ -2477,9 +2560,9 @@ class NsLcm(LcmBase): ####### nsr_deployed = db_nsr["_admin"].get("deployed") vnf_index = db_nslcmop["operationParams"].get("member_vnf_index") - vdu_id = db_nslcmop["operationParams"].get("vdu_id") - vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index") - vdu_name = db_nslcmop["operationParams"].get("vdu_name") + # vdu_id = db_nslcmop["operationParams"].get("vdu_id") + # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index") + # vdu_name = db_nslcmop["operationParams"].get("vdu_name") ####### RO_nsr_id = nsr_deployed["RO"]["nsr_id"]