+ else: # total_deploy_timeout
+ raise ROclient.ROClientException("Timeout waiting ns to be ready")
+
+ step = "Updating NSR"
+ self.ns_update_nsr(db_nsr_update, db_nsr, desc)
+
+ db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
+ db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
+ db_nsr_update["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
+
+ step = "Deployed at VIM"
+ self.logger.debug(logging_text + step)
+
+ async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
+ """
+ Wait for ip addres at RO, and optionally, insert public key in virtual machine
+ :param logging_text: prefix use for logging
+ :param nsr_id:
+ :param vnfr_id:
+ :param vdu_id:
+ :param vdu_index:
+ :param pub_key: public ssh key to inject, None to skip
+ :param user: user to apply the public ssh key
+ :return: IP address
+ """
+
+ # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
+ ro_nsr_id = None
+ ip_address = None
+ nb_tries = 0
+ target_vdu_id = None
+ ro_retries = 0
+
+ while True:
+
+ ro_retries += 1
+ if ro_retries >= 360: # 1 hour
+ raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
+
+ await asyncio.sleep(10, loop=self.loop)
+ # wait until NS is deployed at RO
+ if not ro_nsr_id:
+ db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
+ ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
+ if not ro_nsr_id:
+ continue
+
+ # get ip address
+ if not target_vdu_id:
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+
+ if not vdu_id: # for the VNF case
+ ip_address = db_vnfr.get("ip-address")
+ if not ip_address:
+ continue
+ vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
+ else: # VDU case
+ vdur = next((x for x in get_iterable(db_vnfr, "vdur")
+ if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
+
+ if not vdur:
+ raise LcmException("Not found vnfr_id={}, vdu_index={}, vdu_index={}".format(
+ vnfr_id, vdu_id, vdu_index
+ ))
+
+ if vdur.get("status") == "ACTIVE":
+ ip_address = vdur.get("ip-address")
+ if not ip_address:
+ continue
+ target_vdu_id = vdur["vdu-id-ref"]
+ elif vdur.get("status") == "ERROR":
+ raise LcmException("Cannot inject ssh-key because target VM is in error state")
+
+ if not target_vdu_id:
+ continue
+
+ # self.logger.debug(logging_text + "IP address={}".format(ip_address))
+
+ # inject public key into machine
+ if pub_key and user:
+ # self.logger.debug(logging_text + "Inserting RO key")
+ try:
+ ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
+ result_dict = await self.RO.create_action(
+ item="ns",
+ item_id_name=ro_nsr_id,
+ descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
+ )
+ # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
+ if not result_dict or not isinstance(result_dict, dict):
+ raise LcmException("Unknown response from RO when injecting key")
+ for result in result_dict.values():
+ if result.get("vim_result") == 200:
+ break
+ else:
+ raise ROclient.ROClientException("error injecting key: {}".format(
+ result.get("description")))
+ break
+ except ROclient.ROClientException as e:
+ if not nb_tries:
+ self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
+ format(e, 20*10))
+ nb_tries += 1
+ if nb_tries >= 20:
+ raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
+ else:
+ break
+
+ return ip_address
+
+ async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
+ """
+ Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
+ """
+ my_vca = vca_deployed_list[vca_index]
+ if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
+ # vdu or kdu: no dependencies
+ return
+ timeout = 300
+ while timeout >= 0:
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
+ configuration_status_list = db_nsr["configurationStatus"]
+ for index, vca_deployed in enumerate(configuration_status_list):
+ if index == vca_index:
+ # myself
+ continue
+ if not my_vca.get("member-vnf-index") or \
+ (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
+ internal_status = configuration_status_list[index].get("status")
+ if internal_status == 'READY':
+ continue
+ elif internal_status == 'BROKEN':
+ raise LcmException("Configuration aborted because dependent charm/s has failed")
+ else:
+ break
+ else:
+ # no dependencies, return
+ return
+ await asyncio.sleep(10)
+ timeout -= 1
+
+ raise LcmException("Configuration aborted because dependent charm/s timeout")
+
+ async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name,
+ vdu_index, config_descriptor, deploy_params, base_folder, nslcmop_id):
+ nsr_id = db_nsr["_id"]
+ db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
+ vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
+ vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
+ db_dict = {
+ 'collection': 'nsrs',
+ 'filter': {'_id': nsr_id},
+ 'path': db_update_entry
+ }
+ step = ""
+ try:
+
+ element_type = 'NS'
+ element_under_configuration = nsr_id
+
+ vnfr_id = None
+ if db_vnfr:
+ vnfr_id = db_vnfr["_id"]
+
+ namespace = "{nsi}.{ns}".format(
+ nsi=nsi_id if nsi_id else "",
+ ns=nsr_id)
+
+ if vnfr_id:
+ element_type = 'VNF'
+ element_under_configuration = vnfr_id
+ namespace += ".{}".format(vnfr_id)
+ if vdu_id:
+ namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
+ element_type = 'VDU'
+ element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
+
+ # Get artifact path
+ artifact_path = "{}/{}/charms/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ config_descriptor["juju"]["charm"]
+ )
+
+ is_proxy_charm = deep_get(config_descriptor, ('juju', 'charm')) is not None
+ if deep_get(config_descriptor, ('juju', 'proxy')) is False:
+ is_proxy_charm = False
+
+ # n2vc_redesign STEP 3.1
+
+ # find old ee_id if exists
+ ee_id = vca_deployed.get("ee_id")
+
+ # create or register execution environment in VCA
+ if is_proxy_charm:
+
+ await self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='CREATING',
+ element_under_configuration=element_under_configuration,
+ element_type=element_type
+ )
+
+ step = "create execution environment"
+ self.logger.debug(logging_text + step)
+ ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace,
+ reuse_ee_id=ee_id,
+ db_dict=db_dict)
+
+ else:
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+ user=None, pub_key=None)
+ credentials = {"hostname": rw_mgmt_ip}
+ # get username
+ username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
+ # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+ # merged. Meanwhile let's get username from initial-config-primitive
+ if not username and config_descriptor.get("initial-config-primitive"):
+ for config_primitive in config_descriptor["initial-config-primitive"]:
+ for param in config_primitive.get("parameter", ()):
+ if param["name"] == "ssh-username":
+ username = param["value"]
+ break
+ if not username:
+ raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with "
+ "'config-access.ssh-access.default-user'")
+ credentials["username"] = username
+ # n2vc_redesign STEP 3.2
+
+ await self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='REGISTERING',
+ element_under_configuration=element_under_configuration,
+ element_type=element_type
+ )
+
+ step = "register execution environment {}".format(credentials)
+ self.logger.debug(logging_text + step)
+ ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace,
+ db_dict=db_dict)
+
+ # for compatibility with MON/POL modules, the need model and application name at database
+ # TODO ask to N2VC instead of assuming the format "model_name.application_name"
+ ee_id_parts = ee_id.split('.')
+ model_name = ee_id_parts[0]
+ application_name = ee_id_parts[1]
+ self.update_db_2("nsrs", nsr_id, {db_update_entry + "model": model_name,
+ db_update_entry + "application": application_name,
+ db_update_entry + "ee_id": ee_id})
+
+ # n2vc_redesign STEP 3.3
+
+ step = "Install configuration Software"
+
+ await self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='INSTALLING SW',
+ element_under_configuration=element_under_configuration,
+ element_type=element_type
+ )
+
+ # TODO check if already done
+ self.logger.debug(logging_text + step)
+ await self.n2vc.install_configuration_sw(ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict)
+
+ # if SSH access is required, then get execution environment SSH public
+ if is_proxy_charm: # if native charm we have waited already to VM be UP
+ pub_key = None
+ user = None
+ if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
+ # Needed to inject a ssh key
+ user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
+ step = "Install configuration Software, getting public ssh key"
+ pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
+
+ step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
+ else:
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+
+ # n2vc_redesign STEP 5.1
+ # wait for RO (ip-address) Insert pub_key into VM
+ if vnfr_id:
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+ user=user, pub_key=pub_key)
+ else:
+ rw_mgmt_ip = None # This is for a NS configuration
+
+ self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
+
+ # store rw_mgmt_ip in deploy params for later replacement
+ deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+
+ # n2vc_redesign STEP 6 Execute initial config primitive
+ step = 'execute initial config primitive'
+ initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
+
+ # sort initial config primitives by 'seq'
+ try:
+ initial_config_primitive_list.sort(key=lambda val: int(val['seq']))
+ except Exception as e:
+ self.logger.error(logging_text + step + ": " + str(e))
+
+ # add config if not present for NS charm
+ initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
+ vca_deployed)
+
+ # wait for dependent primitives execution (NS -> VNF -> VDU)
+ if initial_config_primitive_list:
+ await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
+
+ # stage, in function of element type: vdu, kdu, vnf or ns
+ my_vca = vca_deployed_list[vca_index]
+ if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
+ # VDU or KDU
+ stage = 'Stage 3/5: running Day-1 primitives for VDU'
+ elif my_vca.get("member-vnf-index"):
+ # VNF
+ stage = 'Stage 4/5: running Day-1 primitives for VNF'
+ else:
+ # NS
+ stage = 'Stage 5/5: running Day-1 primitives for NS'
+
+ await self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='EXECUTING PRIMITIVE'
+ )
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage=stage
+ )
+
+ for initial_config_primitive in initial_config_primitive_list:
+ # adding information on the vca_deployed if it is a NS execution environment
+ if not vca_deployed["member-vnf-index"]:
+ deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
+ # TODO check if already done
+ primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
+
+ step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
+ self.logger.debug(logging_text + step)
+ await self.n2vc.exec_primitive(
+ ee_id=ee_id,
+ primitive_name=initial_config_primitive["name"],
+ params_dict=primitive_params_,
+ db_dict=db_dict
+ )
+
+ # TODO register in database that primitive is done
+
+ step = "instantiated at VCA"
+ self.logger.debug(logging_text + step)
+
+ await self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='READY'
+ )
+
+ except Exception as e: # TODO not use Exception but N2VC exception
+ # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
+ await self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='BROKEN'
+ )
+ raise Exception("{} {}".format(step, e)) from e
+ # TODO raise N2VC exception with 'step' extra information
+
+ def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
+ error_description: str = None):
+ try:
+ db_dict = dict()
+ if ns_state:
+ db_dict["nsState"] = ns_state
+ db_dict["currentOperation"] = current_operation
+ db_dict["currentOperationID"] = current_operation_id
+ db_dict["errorDescription"] = error_description
+ self.update_db_2("nsrs", nsr_id, db_dict)
+ except Exception as e:
+ self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
+
+ def _write_op_status(self, op_id: str, stage: str = None, error_message: str = None, queuePosition: int = 0):
+ try:
+ db_dict = dict()
+ db_dict['queuePosition'] = queuePosition
+ db_dict['stage'] = stage
+ if error_message:
+ db_dict['errorMessage'] = error_message
+ self.update_db_2("nslcmops", op_id, db_dict)
+ except Exception as e:
+ self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
+
+ def _write_all_config_status(self, nsr_id: str, status: str):
+ try:
+ # nsrs record
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ # configurationStatus
+ config_status = db_nsr.get('configurationStatus')
+ if config_status:
+ # update status
+ db_dict = dict()
+ db_dict['configurationStatus'] = list()
+ for c in config_status:
+ c['status'] = status
+ db_dict['configurationStatus'].append(c)
+ self.update_db_2("nsrs", nsr_id, db_dict)
+
+ except Exception as e:
+ self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
+
+ async def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str,
+ element_under_configuration: str = None, element_type: str = None):
+
+ # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
+ # .format(vca_index, status))
+
+ try:
+ db_path = 'configurationStatus.{}.'.format(vca_index)
+ db_dict = dict()
+ db_dict[db_path + 'status'] = status
+ if element_under_configuration:
+ db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
+ if element_type:
+ db_dict[db_path + 'elementType'] = element_type
+ self.update_db_2("nsrs", nsr_id, db_dict)
+ except Exception as e:
+ self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
+ .format(status, nsr_id, vca_index, e))
+
+ async def instantiate(self, nsr_id, nslcmop_id):
+ """
+
+ :param nsr_id: ns instance to deploy
+ :param nslcmop_id: operation to run
+ :return:
+ """
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
+ if not task_is_locked_by_me:
+ self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
+ return
+
+ logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+
+ # get all needed from database
+
+ # database nsrs record
+ db_nsr = None
+
+ # database nslcmops record
+ db_nslcmop = None
+
+ # update operation on nsrs
+ db_nsr_update = {"_admin.nslcmop": nslcmop_id,
+ "_admin.current-operation": nslcmop_id,
+ "_admin.operation-type": "instantiate"}
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ # update operation on nslcmops
+ db_nslcmop_update = {}
+
+ nslcmop_operation_state = None
+ db_vnfrs = {} # vnf's info indexed by member-index
+ # n2vc_info = {}
+ task_instantiation_list = []
+ task_instantiation_info = {} # from task to info text
+ exc = None
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
+ # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
+
+ # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state="BUILDING",
+ current_operation="INSTANTIATING",
+ current_operation_id=nslcmop_id
+ )
+
+ # read from db: operation
+ step = "Getting nslcmop={} from db".format(nslcmop_id)
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+
+ # read from db: ns
+ step = "Getting nsr={} from db".format(nsr_id)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ # nsd is replicated into ns (no db read)
+ nsd = db_nsr["nsd"]
+ # nsr_name = db_nsr["name"] # TODO short-name??
+
+ # read from db: vnf's of this ns
+ step = "Getting vnfrs from db"
+ self.logger.debug(logging_text + step)
+ db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+
+ # read from db: vnfd's for every vnf
+ db_vnfds_ref = {} # every vnfd data indexed by vnf name
+ db_vnfds = {} # every vnfd data indexed by vnf id
+ db_vnfds_index = {} # every vnfd data indexed by vnf member-index
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage='Stage 1/5: preparation of the environment',
+ queuePosition=0
+ )
+
+ # for each vnf in ns, read vnfd
+ for vnfr in db_vnfrs_list:
+ db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
+ vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
+ vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
+ # if we haven't this vnfd, read it from db
+ if vnfd_id not in db_vnfds:
+ # read from cb
+ step = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
+ self.logger.debug(logging_text + step)
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+
+ # store vnfd
+ db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
+ db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
+ db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
+
+ # Get or generates the _admin.deployed.VCA list
+ vca_deployed_list = None
+ if db_nsr["_admin"].get("deployed"):
+ vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
+ if vca_deployed_list is None:
+ vca_deployed_list = []
+ configuration_status_list = []
+ db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ db_nsr_update["configurationStatus"] = configuration_status_list
+ # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
+ elif isinstance(vca_deployed_list, dict):
+ # maintain backward compatibility. Change a dict to list at database
+ vca_deployed_list = list(vca_deployed_list.values())
+ db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
+
+ db_nsr_update["detailed-status"] = "creating"
+ db_nsr_update["operational-status"] = "init"