X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=aa1802efa2ebdbaa425549d75768bdcfa7355224;hb=17cd492d2220d51830ba0c139c33fda5208e3313;hp=d267065928f53731030f5bc6c07a50d7ff03d3fe;hpb=28b0bf87c3fe2bd99cd3d5665eae484df8c3b44c;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index d267065..aa1802e 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -103,6 +103,11 @@ from osm_common.fsbase import FsException from osm_lcm.data_utils.database.database import Database from osm_lcm.data_utils.filesystem.filesystem import Filesystem +from osm_lcm.data_utils.wim import ( + get_sdn_ports, + get_target_wim_attrs, + select_feasible_wim_account, +) from n2vc.n2vc_juju_conn import N2VCJujuConnector from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException @@ -121,14 +126,16 @@ __author__ = "Alfonso Tierno " class NsLcm(LcmBase): - timeout_vca_on_error = ( + timeout_scale_on_error = ( 5 * 60 ) # Time for charm from first time at blocked,error status to mark as failed + timeout_scale_on_error_outer_factor = 1.05 # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns timeout_ns_terminate = 1800 # default global timeout for un deployment a ns timeout_ns_heal = 1800 # default global timeout for un deployment a ns timeout_charm_delete = 10 * 60 - timeout_primitive = 30 * 60 # timeout for primitive execution + timeout_primitive = 30 * 60 # Timeout for primitive execution + timeout_primitive_outer_factor = 1.05 # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine timeout_ns_update = 30 * 60 # timeout for ns update timeout_progress_primitive = ( 10 * 60 @@ -855,9 +862,30 @@ class NsLcm(LcmBase): target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[ "provider-network" ]["sdn-ports"] - if vld_params.get("wimAccountId"): - target_wim = "wim:{}".format(vld_params["wimAccountId"]) - target_vld["vim_info"][target_wim] = {} + + # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs + # if wim_account_id is specified in vld_params, validate if it is feasible. + wim_account_id, db_wim = select_feasible_wim_account( + db_nsr, db_vnfrs, target_vld, vld_params, self.logger + ) + + if wim_account_id: + # WIM is needed and a feasible one was found, populate WIM target and SDN ports + self.logger.info("WIM selected: {:s}".format(str(wim_account_id))) + # update vld_params with correct WIM account Id + vld_params["wimAccountId"] = wim_account_id + + target_wim = "wim:{}".format(wim_account_id) + target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params) + sdn_ports = get_sdn_ports(vld_params, db_wim) + if len(sdn_ports) > 0: + target_vld["vim_info"][target_wim] = target_wim_attrs + target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports + + self.logger.debug( + "Target VLD with WIM data: {:s}".format(str(target_vld)) + ) + for param in ("vim-network-name", "vim-network-id"): if vld_params.get(param): if isinstance(vld_params[param], dict): @@ -889,6 +917,8 @@ class NsLcm(LcmBase): None, ) vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None) + if not vdur: + return for a_index, a_vld in enumerate(target["ns"]["vld"]): target_vld = find_in_list( get_iterable(vdur, "interfaces"), @@ -1905,6 +1935,7 @@ class NsLcm(LcmBase): db_dict=db_dict, config=osm_config, artifact_path=artifact_path, + chart_model=vca_name, vca_type=vca_type, ) else: @@ -2181,6 +2212,8 @@ class NsLcm(LcmBase): # STEP 7 Configure metrics if vca_type == "helm" or vca_type == "helm-v3": + # TODO: review for those cases where the helm chart is a reference and + # is not part of the NF package prometheus_jobs = await self.extract_prometheus_scrape_jobs( ee_id=ee_id, artifact_path=artifact_path, @@ -5253,12 +5286,17 @@ class NsLcm(LcmBase): parts = kdu_model.split(sep=":") if len(parts) == 2: kdu_model = parts[0] + if desc_params.get("kdu_atomic_upgrade"): + atomic_upgrade = desc_params.get("kdu_atomic_upgrade").lower() in ("yes", "true", "1") + del desc_params["kdu_atomic_upgrade"] + else: + atomic_upgrade = True detailed_status = await asyncio.wait_for( self.k8scluster_map[kdu["k8scluster-type"]].upgrade( cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu.get("kdu-instance"), - atomic=True, + atomic=atomic_upgrade, kdu_model=kdu_model, params=desc_params, db_dict=db_dict, @@ -7183,23 +7221,27 @@ class NsLcm(LcmBase): primitive_name=terminate_config_primitive["name"], params=primitive_params_, db_dict=db_dict, + total_timeout=self.timeout_primitive, vca_id=vca_id, ), - timeout=600, + timeout=self.timeout_primitive + * self.timeout_primitive_outer_factor, ) await asyncio.wait_for( self.k8scluster_map[k8s_cluster_type].scale( - kdu_instance, - scale, - kdu_scaling_info["resource-name"], + kdu_instance=kdu_instance, + scale=scale, + resource_name=kdu_scaling_info["resource-name"], + total_timeout=self.timeout_scale_on_error, vca_id=vca_id, cluster_uuid=cluster_uuid, kdu_model=kdu_model, atomic=True, db_dict=db_dict, ), - timeout=self.timeout_vca_on_error, + timeout=self.timeout_scale_on_error + * self.timeout_scale_on_error_outer_factor, ) if kdu_scaling_info["type"] == "create":