check_juju_bundle_existence,
get_charm_artifact_path,
get_ee_id_parts,
+ vld_to_ro_ip_profile,
)
from osm_lcm.data_utils.nsd import (
get_ns_configuration_relation_list,
return None
def _on_update_ro_db(self, nsrs_id, ro_descriptor):
-
# self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
try:
)
async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
-
# remove last dot from path (if exists)
if path.endswith("."):
path = path[:-1]
# self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
# .format(table, filter, path, updated_data))
try:
-
nsr_id = filter.get("_id")
# read ns record from database
return wim_account
def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
-
db_vdu_push_list = []
template_vdur = []
db_update = {"_admin.modified": time()}
start_deploy,
timeout_ns_deploy,
):
-
db_vims = {}
def get_vim_account(vim_account_id):
target_vim, target_vld, vld_params, target_sdn
):
if vld_params.get("ip-profile"):
- target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
- "ip-profile"
- ]
+ target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
+ vld_params["ip-profile"]
+ )
if vld_params.get("provider-network"):
target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
"provider-network"
lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
)
if target_vld:
-
if vnf_params.get("vimAccountId") not in a_vld.get(
"vim_info", {}
):
# check if this network needs SDN assist
if vld.get("pci-interfaces"):
db_vim = get_vim_account(ns_params["vimAccountId"])
- sdnc_id = db_vim["config"].get("sdn-controller")
- if sdnc_id:
- sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
- target_sdn = "sdn:{}".format(sdnc_id)
- target_vld["vim_info"][target_sdn] = {
- "sdn": True,
- "target_vim": target_vim,
- "vlds": [sdn_vld],
- "type": vld.get("type"),
- }
+ if vim_config := db_vim.get("config"):
+ if sdnc_id := vim_config.get("sdn-controller"):
+ sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
+ target_sdn = "sdn:{}".format(sdnc_id)
+ target_vld["vim_info"][target_sdn] = {
+ "sdn": True,
+ "target_vim": target_vim,
+ "vlds": [sdn_vld],
+ "type": vld.get("type"),
+ }
nsd_vnf_profiles = get_vnf_profiles(nsd)
for nsd_vnf_profile in nsd_vnf_profiles:
and nsd_vlp.get("virtual-link-protocol-data")
and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
):
- ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
+ vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
"l3-protocol-data"
]
- ip_profile_dest_data = {}
- if "ip-version" in ip_profile_source_data:
- ip_profile_dest_data["ip-version"] = ip_profile_source_data[
- "ip-version"
- ]
- if "cidr" in ip_profile_source_data:
- ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
- "cidr"
- ]
- if "gateway-ip" in ip_profile_source_data:
- ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
- "gateway-ip"
- ]
- if "dhcp-enabled" in ip_profile_source_data:
- ip_profile_dest_data["dhcp-params"] = {
- "enabled": ip_profile_source_data["dhcp-enabled"]
- }
- vld_params["ip-profile"] = ip_profile_dest_data
# update vld_params with instantiation params
vld_instantiation_params = find_in_list(
and vnfd_vlp.get("virtual-link-protocol-data")
and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
):
- ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
+ vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
"l3-protocol-data"
]
- ip_profile_dest_data = {}
- if "ip-version" in ip_profile_source_data:
- ip_profile_dest_data["ip-version"] = ip_profile_source_data[
- "ip-version"
- ]
- if "cidr" in ip_profile_source_data:
- ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
- "cidr"
- ]
- if "gateway-ip" in ip_profile_source_data:
- ip_profile_dest_data[
- "gateway-address"
- ] = ip_profile_source_data["gateway-ip"]
- if "dhcp-enabled" in ip_profile_source_data:
- ip_profile_dest_data["dhcp-params"] = {
- "enabled": ip_profile_source_data["dhcp-enabled"]
- }
-
- vld_params["ip-profile"] = ip_profile_dest_data
# update vld_params with instantiation params
if vnf_params:
vld_instantiation_params = find_in_list(
}
desc = await self.RO.deploy(nsr_id, target)
action_id = desc["action_id"]
- db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
self.logger.debug(
logging_text
stage,
operation="termination",
)
-
- db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
# delete all nsr
await self.RO.delete(nsr_id)
- except Exception as e:
- if isinstance(e, NgRoException) and e.http_code == 404: # not found
+ except NgRoException as e:
+ if e.http_code == 404: # not found
db_nsr_update["_admin.deployed.RO.nsr_id"] = None
db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
- db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
self.logger.debug(
logging_text + "RO_action_id={} already deleted".format(action_id)
)
- elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
+ elif e.http_code == 409: # conflict
failed_detail.append("delete conflict: {}".format(e))
self.logger.debug(
logging_text
logging_text
+ "RO_action_id={} delete error: {}".format(action_id, e)
)
+ except Exception as e:
+ failed_detail.append("delete error: {}".format(e))
+ self.logger.error(
+ logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
+ )
if failed_detail:
stage[2] = "Error deleting from VIM"
"""
self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
- ro_nsr_id = None
ip_address = None
- nb_tries = 0
target_vdu_id = None
ro_retries = 0
while True:
-
ro_retries += 1
if ro_retries >= 360: # 1 hour
raise LcmException(
self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
return ip_address
try:
- ro_vm_id = "{}-{}".format(
- db_vnfr["member-vnf-index-ref"], target_vdu_id
- ) # TODO add vdu_index
- if self.ro_config.ng:
- target = {
- "action": {
- "action": "inject_ssh_key",
- "key": pub_key,
- "user": user,
- },
- "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
- }
- desc = await self.RO.deploy(nsr_id, target)
- action_id = desc["action_id"]
- await self._wait_ng_ro(
- nsr_id, action_id, timeout=600, operation="instantiation"
- )
- break
- else:
- # wait until NS is deployed at RO
- if not ro_nsr_id:
- db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
- ro_nsr_id = deep_get(
- db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
- )
- if not ro_nsr_id:
- continue
- result_dict = await self.RO.create_action(
- item="ns",
- item_id_name=ro_nsr_id,
- descriptor={
- "add_public_key": pub_key,
- "vms": [ro_vm_id],
- "user": user,
- },
- )
- # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
- if not result_dict or not isinstance(result_dict, dict):
- raise LcmException(
- "Unknown response from RO when injecting key"
- )
- for result in result_dict.values():
- if result.get("vim_result") == 200:
- break
- else:
- raise ROclient.ROClientException(
- "error injecting key: {}".format(
- result.get("description")
- )
- )
- break
+ target = {
+ "action": {
+ "action": "inject_ssh_key",
+ "key": pub_key,
+ "user": user,
+ },
+ "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
+ }
+ desc = await self.RO.deploy(nsr_id, target)
+ action_id = desc["action_id"]
+ await self._wait_ng_ro(
+ nsr_id, action_id, timeout=600, operation="instantiation"
+ )
+ break
except NgRoException as e:
raise LcmException(
"Reaching max tries injecting key. Error: {}".format(e)
)
- except ROclient.ROClientException as e:
- if not nb_tries:
- self.logger.debug(
- logging_text
- + "error injecting key: {}. Retrying until {} seconds".format(
- e, 20 * 10
- )
- )
- nb_tries += 1
- if nb_tries >= 20:
- raise LcmException(
- "Reaching max tries injecting key. Error: {}".format(e)
- )
else:
break
}
step = ""
try:
-
element_type = "NS"
element_under_configuration = nsr_id
vca_id = self.get_vca_id(db_vnfr, db_nsr)
# create or register execution environment in VCA
if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
-
self._write_configuration_status(
nsr_id=nsr_id,
vca_index=vca_index,
)
# add relations for this VCA (wait for other peers related with this VCA)
- await self._add_vca_relations(
+ is_relation_added = await self._add_vca_relations(
logging_text=logging_text,
nsr_id=nsr_id,
vca_type=vca_type,
vca_index=vca_index,
)
+ if not is_relation_added:
+ raise LcmException("Relations could not be added to VCA.")
+
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
vnfr_id=vnfr_id,
nsr_id=nsr_id,
target_ip=rw_mgmt_ip,
+ element_type=element_type,
vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
vdu_id=vdu_id,
vdu_index=vdu_index,
self._write_configuration_status(
nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
)
- raise LcmException("{} {}".format(step, e)) from e
+ raise LcmException("{}. {}".format(step, e)) from e
def _write_ns_status(
self,
element_type: str = None,
other_update: dict = None,
):
-
# self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
# .format(vca_index, status))
# update operation on nslcmops
db_nslcmop_update = {}
+ timeout_ns_deploy = self.timeout.ns_deploy
+
nslcmop_operation_state = None
db_vnfrs = {} # vnf's info indexed by member-index
# n2vc_info = {}
ns_params = db_nslcmop.get("operationParams")
if ns_params and ns_params.get("timeout_ns_deploy"):
timeout_ns_deploy = ns_params["timeout_ns_deploy"]
- else:
- timeout_ns_deploy = self.timeout.ns_deploy
# read from db: ns
stage[1] = "Getting nsr={} from db.".format(nsr_id)
cached_vnfds: Dict[str, Any],
) -> List[Relation]:
relations = []
+ if vca.target_element == "ns":
+ self.logger.debug("VCA is a NS charm, not a VNF.")
+ return relations
vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
vnf_profile_id = vnf_profile["id"]
vnfd_id = vnf_profile["vnfd-id"]
requirer_vca_id,
relation.requirer.endpoint,
)
- await self.vca_map[vca_type].add_relation(
- provider=provider_relation_endpoint,
- requirer=requirer_relation_endpoint,
- )
- # remove entry from relations list
+ try:
+ await self.vca_map[vca_type].add_relation(
+ provider=provider_relation_endpoint,
+ requirer=requirer_relation_endpoint,
+ )
+ except N2VCException as exception:
+ self.logger.error(exception)
+ raise LcmException(exception)
return True
return False
vca_index: int,
timeout: int = 3600,
) -> bool:
-
# steps:
# 1. find all relations for this VCA
# 2. wait for other peers related
timeout: int = 600,
vca_id: str = None,
):
-
try:
k8sclustertype = k8s_instance_info["k8scluster-type"]
# Instantiate kdu
pass
self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
- async def _terminate_RO(
- self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
- ):
- """
- Terminates a deployment from RO
- :param logging_text:
- :param nsr_deployed: db_nsr._admin.deployed
- :param nsr_id:
- :param nslcmop_id:
- :param stage: list of string with the content to write on db_nslcmop.detailed-status.
- this method will update only the index 2, but it will write on database the concatenated content of the list
- :return:
- """
- db_nsr_update = {}
- failed_detail = []
- ro_nsr_id = ro_delete_action = None
- if nsr_deployed and nsr_deployed.get("RO"):
- ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
- ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
- try:
- if ro_nsr_id:
- stage[2] = "Deleting ns from VIM."
- db_nsr_update["detailed-status"] = " ".join(stage)
- self._write_op_status(nslcmop_id, stage)
- self.logger.debug(logging_text + stage[2])
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- self._write_op_status(nslcmop_id, stage)
- desc = await self.RO.delete("ns", ro_nsr_id)
- ro_delete_action = desc["action_id"]
- db_nsr_update[
- "_admin.deployed.RO.nsr_delete_action_id"
- ] = ro_delete_action
- db_nsr_update["_admin.deployed.RO.nsr_id"] = None
- db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
- if ro_delete_action:
- # wait until NS is deleted from VIM
- stage[2] = "Waiting ns deleted from VIM."
- detailed_status_old = None
- self.logger.debug(
- logging_text
- + stage[2]
- + " RO_id={} ro_delete_action={}".format(
- ro_nsr_id, ro_delete_action
- )
- )
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- self._write_op_status(nslcmop_id, stage)
-
- delete_timeout = 20 * 60 # 20 minutes
- while delete_timeout > 0:
- desc = await self.RO.show(
- "ns",
- item_id_name=ro_nsr_id,
- extra_item="action",
- extra_item_id=ro_delete_action,
- )
-
- # deploymentStatus
- self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
-
- ns_status, ns_status_info = self.RO.check_action_status(desc)
- if ns_status == "ERROR":
- raise ROclient.ROClientException(ns_status_info)
- elif ns_status == "BUILD":
- stage[2] = "Deleting from VIM {}".format(ns_status_info)
- elif ns_status == "ACTIVE":
- db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
- db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
- break
- else:
- assert (
- False
- ), "ROclient.check_action_status returns unknown {}".format(
- ns_status
- )
- if stage[2] != detailed_status_old:
- detailed_status_old = stage[2]
- db_nsr_update["detailed-status"] = " ".join(stage)
- self._write_op_status(nslcmop_id, stage)
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- await asyncio.sleep(5, loop=self.loop)
- delete_timeout -= 5
- else: # delete_timeout <= 0:
- raise ROclient.ROClientException(
- "Timeout waiting ns deleted from VIM"
- )
-
- except Exception as e:
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- if (
- isinstance(e, ROclient.ROClientException) and e.http_code == 404
- ): # not found
- db_nsr_update["_admin.deployed.RO.nsr_id"] = None
- db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
- db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
- self.logger.debug(
- logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
- )
- elif (
- isinstance(e, ROclient.ROClientException) and e.http_code == 409
- ): # conflict
- failed_detail.append("delete conflict: {}".format(e))
- self.logger.debug(
- logging_text
- + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
- )
- else:
- failed_detail.append("delete error: {}".format(e))
- self.logger.error(
- logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
- )
-
- # Delete nsd
- if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
- ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
- try:
- stage[2] = "Deleting nsd from RO."
- db_nsr_update["detailed-status"] = " ".join(stage)
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- self._write_op_status(nslcmop_id, stage)
- await self.RO.delete("nsd", ro_nsd_id)
- self.logger.debug(
- logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
- )
- db_nsr_update["_admin.deployed.RO.nsd_id"] = None
- except Exception as e:
- if (
- isinstance(e, ROclient.ROClientException) and e.http_code == 404
- ): # not found
- db_nsr_update["_admin.deployed.RO.nsd_id"] = None
- self.logger.debug(
- logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
- )
- elif (
- isinstance(e, ROclient.ROClientException) and e.http_code == 409
- ): # conflict
- failed_detail.append(
- "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
- )
- self.logger.debug(logging_text + failed_detail[-1])
- else:
- failed_detail.append(
- "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
- )
- self.logger.error(logging_text + failed_detail[-1])
-
- if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
- for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
- if not vnf_deployed or not vnf_deployed["id"]:
- continue
- try:
- ro_vnfd_id = vnf_deployed["id"]
- stage[
- 2
- ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
- vnf_deployed["member-vnf-index"], ro_vnfd_id
- )
- db_nsr_update["detailed-status"] = " ".join(stage)
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- self._write_op_status(nslcmop_id, stage)
- await self.RO.delete("vnfd", ro_vnfd_id)
- self.logger.debug(
- logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
- )
- db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
- except Exception as e:
- if (
- isinstance(e, ROclient.ROClientException) and e.http_code == 404
- ): # not found
- db_nsr_update[
- "_admin.deployed.RO.vnfd.{}.id".format(index)
- ] = None
- self.logger.debug(
- logging_text
- + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
- )
- elif (
- isinstance(e, ROclient.ROClientException) and e.http_code == 409
- ): # conflict
- failed_detail.append(
- "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
- )
- self.logger.debug(logging_text + failed_detail[-1])
- else:
- failed_detail.append(
- "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
- )
- self.logger.error(logging_text + failed_detail[-1])
-
- if failed_detail:
- stage[2] = "Error deleting from VIM"
- else:
- stage[2] = "Deleted from VIM"
- db_nsr_update["detailed-status"] = " ".join(stage)
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- self._write_op_status(nslcmop_id, stage)
-
- if failed_detail:
- raise LcmException("; ".join(failed_detail))
-
async def terminate(self, nsr_id, nslcmop_id):
# Try to lock HA task here
task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
)
)
- else:
- task_delete_ro = asyncio.ensure_future(
- self._terminate_RO(
- logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
- )
- )
- tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
+ tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
# rest of staff will be done at finally
nslcmop_operation_state = None
error_description_nslcmop = None
exc = None
+ step = ""
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
del desc_params["kdu_model"]
else:
kdu_model = kdu.get("kdu-model")
- parts = kdu_model.split(sep=":")
- if len(parts) == 2:
- kdu_model = parts[0]
+ if kdu_model.count("/") < 2: # helm chart is not embedded
+ parts = kdu_model.split(sep=":")
+ if len(parts) == 2:
+ kdu_model = parts[0]
if desc_params.get("kdu_atomic_upgrade"):
atomic_upgrade = desc_params.get(
"kdu_atomic_upgrade"
raise
except Exception as e:
-
self.logger.debug("Error upgrading charm {}".format(path))
return "FAILED", "Error upgrading charm {}: {}".format(path, e)
exc = None
change_type = "updated"
detailed_status = ""
+ member_vnf_index = None
try:
# wait for any previous tasks in process
nsr_deployed = db_nsr["_admin"].get("deployed")
if update_type == "CHANGE_VNFPKG":
-
# Get the input parameters given through update request
vnf_instance_id = db_nslcmop["operationParams"][
"changeVnfPackageData"
step = "Checking if revision has changed in VNFD"
if current_vnf_revision != latest_vnfd_revision:
-
change_type = "policy_updated"
# There is new revision of VNFD, update operation is required
step = "Getting descriptor config"
if current_vnfd.get("kdu"):
-
search_key = "kdu_name"
else:
search_key = "vnfd_id"
# There could be several charm used in the same VNF
for ee_item in ee_list:
if ee_item.get("juju"):
-
step = "Getting charm name"
charm_name = ee_item["juju"].get("charm")
if find_software_version(current_vnfd) != find_software_version(
latest_vnfd
):
-
step = "Checking if existing VNF has charm"
for current_charm_path, target_charm_path in list(
charm_artifact_paths
current_charm_path, target_charm_path
)
):
-
step = "Checking whether VNF uses juju bundle"
if check_juju_bundle_existence(current_vnfd):
-
raise LcmException(
"Charm upgrade is not supported for the instance which"
" uses juju-bundle: {}".format(
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
}
- if change_type in ("vnf_terminated", "policy_updated"):
+ if (
+ change_type in ("vnf_terminated", "policy_updated")
+ and member_vnf_index
+ ):
msg.update({"vnf_member_index": member_vnf_index})
await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
except Exception as e:
vnfr_id: str,
nsr_id: str,
target_ip: str,
+ element_type: str,
vnf_member_index: str = "",
vdu_id: str = "",
vdu_index: int = None,
vnfr_id (str): VNFR ID where this EE applies
nsr_id (str): NSR ID where this EE applies
target_ip (str): VDU/KDU instance IP address
+ element_type (str): NS or VNF or VDU or KDU
vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
Returns:
_type_: Prometheus jobs
"""
- self.logger.debug(f"KDU: {kdu_name}; KDU INDEX: {kdu_index}")
+ # default the vdur and kdur names to an empty string, to avoid any later
+ # problem with Prometheus when the element type is not VDU or KDU
+ vdur_name = ""
+ kdur_name = ""
+
# look if exist a file called 'prometheus*.j2' and
artifact_content = self.fs.dir_ls(artifact_path)
job_file = next(
with self.fs.file_open((artifact_path, job_file), "r") as f:
job_data = f.read()
- vdur_name = ""
- kdur_name = ""
- for r in range(360):
- db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
- if vdu_id and vdu_index is not None:
- vdur = next(
- (
- x
- for x in get_iterable(db_vnfr, "vdur")
- if (
- x.get("vdu-id-ref") == vdu_id
- and x.get("count-index") == vdu_index
- )
- ),
- {},
- )
- if vdur.get("name"):
- vdur_name = vdur.get("name")
- break
- if kdu_name and kdu_index is not None:
- kdur = next(
- (
- x
- for x in get_iterable(db_vnfr, "kdur")
- if (
- x.get("kdu-name") == kdu_name
- and x.get("count-index") == kdu_index
- )
- ),
- {},
- )
- if kdur.get("name"):
- kdur_name = kdur.get("name")
- break
+ # obtain the VDUR or KDUR, if the element type is VDU or KDU
+ if element_type in ("VDU", "KDU"):
+ for _ in range(360):
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ if vdu_id and vdu_index is not None:
+ vdur = next(
+ (
+ x
+ for x in get_iterable(db_vnfr, "vdur")
+ if (
+ x.get("vdu-id-ref") == vdu_id
+ and x.get("count-index") == vdu_index
+ )
+ ),
+ {},
+ )
+ if vdur.get("name"):
+ vdur_name = vdur.get("name")
+ break
+ if kdu_name and kdu_index is not None:
+ kdur = next(
+ (
+ x
+ for x in get_iterable(db_vnfr, "kdur")
+ if (
+ x.get("kdu-name") == kdu_name
+ and x.get("count-index") == kdu_index
+ )
+ ),
+ {},
+ )
+ if kdur.get("name"):
+ kdur_name = kdur.get("name")
+ break
- await asyncio.sleep(10, loop=self.loop)
- else:
- if vdu_id and vdu_index is not None:
- raise LcmException(
- f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
- )
- if kdu_name and kdu_index is not None:
- raise LcmException(
- f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
- )
+ await asyncio.sleep(10, loop=self.loop)
+ else:
+ if vdu_id and vdu_index is not None:
+ raise LcmException(
+ f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
+ )
+ if kdu_name and kdu_index is not None:
+ raise LcmException(
+ f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
+ )
# TODO get_service
_, _, service = ee_id.partition(".") # remove prefix "namespace."
"VNF_MEMBER_INDEX": vnf_member_index,
"VDUR_NAME": vdur_name,
"KDUR_NAME": kdur_name,
+ "ELEMENT_TYPE": element_type,
}
job_list = parse_job(job_data, variables)
# ensure job_name is using the vnfr_id. Adding the metadata nsr_id
}
step = ""
try:
-
element_type = "NS"
element_under_configuration = nsr_id