self.n2vc = N2VCJujuConnector(
log=self.logger,
loop=self.loop,
- url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
- username=self.vca_config.get('user', None),
- vca_config=self.vca_config,
on_update_db=self._on_update_n2vc_db,
fs=self.fs,
db=self.db
self.conn_helm_ee = LCMHelmConn(
log=self.logger,
loop=self.loop,
- url=None,
- username=None,
vca_config=self.vca_config,
on_update_db=self._on_update_n2vc_db
)
juju_command=self.vca_config.get("jujupath"),
log=self.logger,
loop=self.loop,
- on_update_db=None,
- vca_config=self.vca_config,
+ on_update_db=self._on_update_k8s_db,
fs=self.fs,
db=self.db
)
except Exception as e:
self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
- async def _on_update_n2vc_db(self, table, filter, path, updated_data):
+ async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
# remove last dot from path (if exists)
if path.endswith('.'):
# self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
# .format(table, filter, path, updated_data))
-
try:
nsr_id = filter.get('_id')
current_ns_status = nsr.get('nsState')
# get vca status for NS
- status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
+ status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False, vca_id=vca_id)
# vcaStatus
db_dict = dict()
db_dict['vcaStatus'] = status_dict
+ await self.n2vc.update_vca_status(db_dict['vcaStatus'], vca_id=vca_id)
# update configurationStatus for this VCA
try:
except Exception as e:
self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
+ async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None, vca_id=None):
+ """
+ Updating vca status in NSR record
+ :param cluster_uuid: UUID of a k8s cluster
+ :param kdu_instance: The unique name of the KDU instance
+ :param filter: To get nsr_id
+ :return: none
+ """
+
+ # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
+ # .format(cluster_uuid, kdu_instance, filter))
+
+ try:
+ nsr_id = filter.get('_id')
+
+ # get vca status for NS
+ vca_status = await self.k8sclusterjuju.status_kdu(
+ cluster_uuid,
+ kdu_instance,
+ complete_status=True,
+ yaml_format=False,
+ vca_id=vca_id,
+ )
+ # vcaStatus
+ db_dict = dict()
+ db_dict['vcaStatus'] = {nsr_id: vca_status}
+
+ await self.k8sclusterjuju.update_vca_status(
+ db_dict['vcaStatus'],
+ kdu_instance,
+ vca_id=vca_id,
+ )
+
+ # write to database
+ self.update_db_2("nsrs", nsr_id, db_dict)
+
+ except (asyncio.CancelledError, asyncio.TimeoutError):
+ raise
+ except Exception as e:
+ self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
+
@staticmethod
def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
try:
for param in ("vim-network-name", "vim-network-id"):
if vld_params.get(param):
if isinstance(vld_params[param], dict):
- for vim, vim_net in vld_params[param]:
+ for vim, vim_net in vld_params[param].items():
other_target_vim = "vim:" + vim
populate_dict(target_vld["vim_info"], (other_target_vim, param.replace("-", "_")), vim_net)
else: # isinstance str
# check at nsd descriptor, if there is an ip-profile
vld_params = {}
- virtual_link_profiles = get_virtual_link_profiles(nsd)
+ nsd_vlp = find_in_list(
+ get_virtual_link_profiles(nsd),
+ lambda a_link_profile: a_link_profile["virtual-link-desc-id"] == vld["id"])
+ if nsd_vlp and nsd_vlp.get("virtual-link-protocol-data") and \
+ nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"):
+ ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"]["l3-protocol-data"]
+ ip_profile_dest_data = {}
+ if "ip-version" in ip_profile_source_data:
+ ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"]
+ if "cidr" in ip_profile_source_data:
+ ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"]
+ if "gateway-ip" in ip_profile_source_data:
+ ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"]
+ if "dhcp-enabled" in ip_profile_source_data:
+ ip_profile_dest_data["dhcp-params"] = {
+ "enabled": ip_profile_source_data["dhcp-enabled"]
+ }
+ vld_params["ip-profile"] = ip_profile_dest_data
- for vlp in virtual_link_profiles:
- ip_profile = find_in_list(nsd["ip-profiles"],
- lambda profile: profile["name"] == vlp["ip-profile-ref"])
- vld_params["ip-profile"] = ip_profile["ip-profile-params"]
# update vld_params with instantiation params
vld_instantiation_params = find_in_list(get_iterable(ns_params, "vld"),
lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]))
raise LcmException("Configuration aborted because dependent charm/s timeout")
+ def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
+ return (
+ deep_get(db_vnfr, ("vca-id",)) or
+ deep_get(db_nsr, ("instantiate_params", "vcaId"))
+ )
+
async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
ee_config_descriptor):
# find old ee_id if exists
ee_id = vca_deployed.get("ee_id")
- vim_account_id = (
- deep_get(db_vnfr, ("vim-account-id",)) or
- deep_get(deploy_params, ("OSM", "vim_account_id"))
- )
- vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id)
- vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id)
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
# create or register execution environment in VCA
if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
namespace=namespace,
artifact_path=artifact_path,
db_dict=db_dict,
- cloud_name=vca_k8s_cloud,
- credential_name=vca_k8s_cloud_credential,
+ vca_id=vca_id,
)
- elif vca_type == "helm" or vca_type == "helm-v3":
+ elif vca_type == "helm" or vca_type == "helm-v3":
ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
namespace=namespace,
reuse_ee_id=ee_id,
namespace=namespace,
reuse_ee_id=ee_id,
db_dict=db_dict,
- cloud_name=vca_cloud,
- credential_name=vca_cloud_credential,
+ vca_id=vca_id,
)
elif vca_type == "native_charm":
credentials=credentials,
namespace=namespace,
db_dict=db_dict,
- cloud_name=vca_cloud,
- credential_name=vca_cloud_credential,
+ vca_id=vca_id,
)
# for compatibility with MON/POL modules, the need model and application name at database
db_dict=db_dict,
config=config,
num_units=num_units,
+ vca_id=vca_id,
)
# write in db flag of configuration_sw already installed
# add relations for this VCA (wait for other peers related with this VCA)
await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
- vca_index=vca_index, vca_type=vca_type)
+ vca_index=vca_index, vca_id=vca_id, vca_type=vca_type)
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
# Needed to inject a ssh key
user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
step = "Install configuration Software, getting public ssh key"
- pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
+ pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
+ ee_id=ee_id,
+ db_dict=db_dict,
+ vca_id=vca_id
+ )
step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
else:
ee_id=ee_id,
primitive_name=initial_config_primitive["name"],
params_dict=primitive_params_,
- db_dict=db_dict
+ db_dict=db_dict,
+ vca_id=vca_id,
)
# Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
if check_if_terminated_needed:
# wait for any previous tasks in process
await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
- stage[1] = "Sync filesystem from database."
- self.fs.sync() # TODO, make use of partial sync, only for the needed packages
-
# STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
stage[1] = "Reading from database."
# nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ self.fs.sync(db_nsr["nsd-id"])
db_nsr["nsd"] = nsd
# nsr_name = db_nsr["name"] # TODO short-name??
db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
vnfd_id = vnfr["vnfd-id"]
vnfd_ref = vnfr["vnfd-ref"]
+ self.fs.sync(vnfd_id)
# if we haven't this vnfd, read it from db
if vnfd_id not in db_vnfds:
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
- async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
- timeout: int = 3600, vca_type: str = None) -> bool:
+ async def _add_vca_relations(
+ self,
+ logging_text,
+ nsr_id,
+ vca_index: int,
+ timeout: int = 3600,
+ vca_type: str = None,
+ vca_id: str = None,
+ ) -> bool:
# steps:
# 1. find all relations for this VCA
db_vnfd_list = db_nsr.get('vnfd-id')
if db_vnfd_list:
for vnfd in db_vnfd_list:
+ db_vnf_relations = None
db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
- db_vnf_relations = get_configuration(db_vnfd, db_vnfd["id"]).get("relation", [])
+ db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
+ if db_vnf_configuration:
+ db_vnf_relations = db_vnf_configuration.get("relation", [])
if db_vnf_relations:
for r in db_vnf_relations:
# check if this VCA is in the relation
ee_id_1=from_vca_ee_id,
ee_id_2=to_vca_ee_id,
endpoint_1=from_vca_endpoint,
- endpoint_2=to_vca_endpoint)
+ endpoint_2=to_vca_endpoint,
+ vca_id=vca_id,
+ )
# remove entry from relations list
ns_relations.remove(r)
else:
ee_id_1=from_vca_ee_id,
ee_id_2=to_vca_ee_id,
endpoint_1=from_vca_endpoint,
- endpoint_2=to_vca_endpoint)
+ endpoint_2=to_vca_endpoint,
+ vca_id=vca_id,
+ )
# remove entry from relations list
vnf_relations.remove(r)
else:
return False
async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
- vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
+ vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600,
+ vca_id: str = None):
try:
k8sclustertype = k8s_instance_info["k8scluster-type"]
"filter": {"_id": nsr_id},
"path": nsr_db_path}
- kdu_instance = await self.k8scluster_map[k8sclustertype].install(
+ kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name(
+ db_dict=db_dict_install,
+ kdu_model=k8s_instance_info["kdu-model"],
+ kdu_name=k8s_instance_info["kdu-name"],
+ )
+ self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
+ await self.k8scluster_map[k8sclustertype].install(
cluster_uuid=k8s_instance_info["k8scluster-uuid"],
kdu_model=k8s_instance_info["kdu-model"],
atomic=True,
db_dict=db_dict_install,
timeout=timeout,
kdu_name=k8s_instance_info["kdu-name"],
- namespace=k8s_instance_info["namespace"])
+ namespace=k8s_instance_info["namespace"],
+ kdu_instance=kdu_instance,
+ vca_id=vca_id,
+ )
self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
# Obtain services to obtain management service ip
# Obtain management service info (if exists)
vnfr_update_dict = {}
+ kdu_config = get_configuration(vnfd, kdud["name"])
+ if kdu_config:
+ target_ee_list = kdu_config.get("execution-environment-list", [])
+ else:
+ target_ee_list = []
+
if services:
vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
vnfr_update_dict["ip-address"] = ip
+ if find_in_list(
+ target_ee_list,
+ lambda ee: ee.get("external-connection-point-ref", "") == service_external_cp
+ ):
+ vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
break
else:
self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
cluster_uuid=k8s_instance_info["k8scluster-uuid"],
kdu_instance=kdu_instance,
primitive_name=initial_config_primitive["name"],
- params=primitive_params_, db_dict={}),
- timeout=timeout)
+ params=primitive_params_, db_dict=db_dict_install,
+ vca_id=vca_id,
+ ),
+ timeout=timeout
+ )
except Exception as e:
# Prepare update db with error and raise exception
updated_v3_cluster_list = []
for vnfr_data in db_vnfrs.values():
+ vca_id = self.get_vca_id(vnfr_data, {})
for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
# Step 0: Prepare and set parameters
desc_params = parse_yaml_strings(kdur.get("additionalParams"))
vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id)
task = asyncio.ensure_future(
self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id,
- k8s_instance_info, k8params=desc_params, timeout=600))
+ k8s_instance_info, k8params=desc_params, timeout=600, vca_id=vca_id))
self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
return vca["ee_id"]
- async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
- vca_index, destroy_ee=True, exec_primitives=True, scaling_in=False):
+ async def destroy_N2VC(
+ self,
+ logging_text,
+ db_nslcmop,
+ vca_deployed,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=True,
+ scaling_in=False,
+ vca_id: str = None,
+ ):
"""
Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
:param logging_text:
mapped_primitive_params)
# Sub-operations: Call _ns_execute_primitive() instead of action()
try:
- result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
- mapped_primitive_params,
- vca_type=vca_type)
+ result, result_detail = await self._ns_execute_primitive(
+ vca_deployed["ee_id"], primitive,
+ mapped_primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
except LcmException:
# this happens when VCA is not deployed. In this case it is not needed to terminate
continue
await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
if destroy_ee:
- await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"], scaling_in=scaling_in)
+ await self.vca_map[vca_type].delete_execution_environment(
+ vca_deployed["ee_id"],
+ scaling_in=scaling_in,
+ vca_id=vca_id,
+ )
- async def _delete_all_N2VC(self, db_nsr: dict):
+ async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
namespace = "." + db_nsr["_id"]
try:
- await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
+ await self.n2vc.delete_namespace(
+ namespace=namespace,
+ total_timeout=self.timeout_charm_delete,
+ vca_id=vca_id,
+ )
except N2VCNotFound: # already deleted. Skip
pass
self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
stage[1] = "Getting vnf descriptors from db."
db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+ db_vnfrs_dict = {db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list}
db_vnfds_from_id = {}
db_vnfds_from_member_index = {}
# Loop over VNFRs
for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
config_descriptor = None
+
+ vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr)
if not vca or not vca.get("ee_id"):
continue
if not vca.get("member-vnf-index"):
# self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
# vca_index, vca.get("ee_id"), vca_type, destroy_ee))
task = asyncio.ensure_future(
- self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
- destroy_ee, exec_terminate_primitives))
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee,
+ exec_terminate_primitives,
+ vca_id=vca_id,
+ )
+ )
tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
# wait for pending tasks of terminate primitives
if nsr_deployed.get("VCA"):
stage[1] = "Deleting all execution environments."
self.logger.debug(logging_text + stage[1])
- task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
- timeout=self.timeout_charm_delete))
+ vca_id = self.get_vca_id({}, db_nsr)
+ task_delete_ee = asyncio.ensure_future(
+ asyncio.wait_for(
+ self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
+ timeout=self.timeout_charm_delete
+ )
+ )
# task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
tasks_dict_info[task_delete_ee] = "Terminating all VCA"
continue
kdu_instance = kdu.get("kdu-instance")
if kdu.get("k8scluster-type") in self.k8scluster_map:
+ # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
+ vca_id = self.get_vca_id({}, db_nsr)
task_delete_kdu_instance = asyncio.ensure_future(
self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
cluster_uuid=kdu.get("k8scluster-uuid"),
- kdu_instance=kdu_instance))
+ kdu_instance=kdu_instance,
+ vca_id=vca_id,
+ )
+ )
else:
self.logger.error(logging_text + "Unknown k8s deployment type {}".
format(kdu.get("k8scluster-type")))
.format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
return ee_id, vca_type
- async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0, retries_interval=30,
- timeout=None, vca_type=None, db_dict=None) -> (str, str):
+ async def _ns_execute_primitive(
+ self,
+ ee_id,
+ primitive,
+ primitive_params,
+ retries=0,
+ retries_interval=30,
+ timeout=None,
+ vca_type=None,
+ db_dict=None,
+ vca_id: str = None,
+ ) -> (str, str):
try:
if primitive == "config":
primitive_params = {"params": primitive_params}
params_dict=primitive_params,
progress_timeout=self.timeout_progress_primitive,
total_timeout=self.timeout_primitive,
- db_dict=db_dict),
+ db_dict=db_dict,
+ vca_id=vca_id,
+ ),
timeout=timeout or self.timeout_primitive)
# execution was OK
break
except Exception as e:
return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
+ async def vca_status_refresh(self, nsr_id, nslcmop_id):
+ """
+ Updating the vca_status with latest juju information in nsrs record
+ :param: nsr_id: Id of the nsr
+ :param: nslcmop_id: Id of the nslcmop
+ :return: None
+ """
+
+ self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ vca_id = self.get_vca_id({}, db_nsr)
+ if db_nsr['_admin']['deployed']['K8s']:
+ for k8s_index, k8s in enumerate(db_nsr['_admin']['deployed']['K8s']):
+ cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
+ await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id}, vca_id=vca_id)
+ else:
+ for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']):
+ table, filter = "nsrs", {"_id": nsr_id}
+ path = "_admin.deployed.VCA.{}.".format(vca_index)
+ await self._on_update_n2vc_db(table, filter, path, {})
+
+ self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
+
async def action(self, nsr_id, nslcmop_id):
# Try to lock HA task here
task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
step = "Getting nsd from database"
db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
# for backward compatibility
if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
detailed_status = await asyncio.wait_for(
self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
cluster_uuid=kdu.get("k8scluster-uuid"),
- kdu_instance=kdu.get("kdu-instance")),
- timeout=timeout_ns_action)
+ kdu_instance=kdu.get("kdu-instance"),
+ vca_id=vca_id,
+ ),
+ timeout=timeout_ns_action
+ )
else:
kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
kdu_instance=kdu_instance,
primitive_name=primitive_name,
params=params, db_dict=db_dict,
- timeout=timeout_ns_action),
- timeout=timeout_ns_action)
+ timeout=timeout_ns_action,
+ vca_id=vca_id,
+ ),
+ timeout=timeout_ns_action
+ )
if detailed_status:
nslcmop_operation_state = 'COMPLETED'
ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], member_vnf_index=vnf_index,
vdu_id=vdu_id, vdu_count_index=vdu_count_index,
ee_descriptor_id=ee_descriptor_id)
- db_nslcmop_notif = {"collection": "nslcmops",
- "filter": {"_id": nslcmop_id},
- "path": "admin.VCA"}
+ for vca_index, vca_deployed in enumerate(db_nsr['_admin']['deployed']['VCA']):
+ if vca_deployed.get("member-vnf-index") == vnf_index:
+ db_dict = {"collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": "_admin.deployed.VCA.{}.".format(vca_index)}
+ break
nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
ee_id,
primitive=primitive_name,
primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
timeout=timeout_ns_action,
vca_type=vca_type,
- db_dict=db_nslcmop_notif)
+ db_dict=db_dict,
+ vca_id=vca_id,
+ )
db_nslcmop_update["detailed-status"] = detailed_status
error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
step = "Getting vnfr from database"
db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
+
step = "Getting vnfd from database"
db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
vdu_count_index=None,
ee_descriptor_id=ee_descriptor_id)
result, result_detail = await self._ns_execute_primitive(
- ee_id, primitive_name, primitive_params, vca_type)
+ ee_id, primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
vnf_config_primitive, result, result_detail))
# Update operationState = COMPLETED | FAILED
operation_params = db_nslcmop.get("operationParams") or {}
exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
vca.get("needed_terminate"))
- task = asyncio.ensure_future(asyncio.wait_for(
- self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor,
- vca_index, destroy_ee=True,
- exec_primitives=exec_terminate_primitives,
- scaling_in=True), timeout=self.timeout_charm_delete))
- # wait before next removal
- await asyncio.sleep(30)
+ task = asyncio.ensure_future(
+ asyncio.wait_for(
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=exec_terminate_primitives,
+ scaling_in=True,
+ vca_id=vca_id,
+ ),
+ timeout=self.timeout_charm_delete
+ )
+ )
tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
del vca_update[vca_index]
del config_update[vca_index]
task_instantiation_info=tasks_dict_info,
stage=stage
)
- # TODO: scaling for kdu is not implemented yet.
- kdu_name = vdu_info["osm_vdu_id"]
- descriptor_config = get_configuration(db_vnfd, kdu_name)
- if descriptor_config:
- vdu_id = None
- vdu_index = vdu_index
- vdu_name = None
- kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
- deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
- if kdur.get("additionalParams"):
- deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"])
-
- self._deploy_n2vc(
- logging_text=logging_text,
- db_nsr=db_nsr,
- db_vnfr=db_vnfr,
- nslcmop_id=nslcmop_id,
- nsr_id=nsr_id,
- nsi_id=nsi_id,
- vnfd_id=vnfd_id,
- vdu_id=vdu_id,
- kdu_name=kdu_name,
- member_vnf_index=member_vnf_index,
- vdu_index=vdu_index,
- vdu_name=vdu_name,
- deploy_params=deploy_params_kdu,
- descriptor_config=descriptor_config,
- base_folder=base_folder,
- task_instantiation_info=tasks_dict_info,
- stage=stage
- )
# SCALE-UP VCA - END
scale_process = None
vdu_count_index=None,
ee_descriptor_id=ee_descriptor_id)
result, result_detail = await self._ns_execute_primitive(
- ee_id, primitive_name, primitive_params, vca_type)
+ ee_id,
+ primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
vnf_config_primitive, result, result_detail))
# Update operationState = COMPLETED | FAILED