X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=8ef5f2acc05525cd78d41896dbe5331a9f0089c8;hb=refs%2Ftags%2Fv10.1.1rc2;hp=6df1c0af241a45d488faae88fc724b798703a7c1;hpb=1b9c6ab4759c4fe341a4ed5f103c5ec68e14e619;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 6df1c0a..8ef5f2a 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -70,11 +70,6 @@ from osm_common.fsbase import FsException from osm_lcm.data_utils.database.database import Database from osm_lcm.data_utils.filesystem.filesystem import Filesystem -from osm_lcm.data_utils.wim import ( - get_sdn_ports, - get_target_wim_attrs, - select_feasible_wim_account, -) from n2vc.n2vc_juju_conn import N2VCJujuConnector from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException @@ -811,30 +806,9 @@ class NsLcm(LcmBase): target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[ "provider-network" ]["sdn-ports"] - - # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs - # if wim_account_id is specified in vld_params, validate if it is feasible. - wim_account_id, db_wim = select_feasible_wim_account( - db_nsr, db_vnfrs, target_vld, vld_params, self.logger - ) - - if wim_account_id: - # WIM is needed and a feasible one was found, populate WIM target and SDN ports - self.logger.info("WIM selected: {:s}".format(str(wim_account_id))) - # update vld_params with correct WIM account Id - vld_params["wimAccountId"] = wim_account_id - - target_wim = "wim:{}".format(wim_account_id) - target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params) - sdn_ports = get_sdn_ports(vld_params, db_wim) - if len(sdn_ports) > 0: - target_vld["vim_info"][target_wim] = target_wim_attrs - target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports - - self.logger.debug( - "Target VLD with WIM data: {:s}".format(str(target_vld)) - ) - + if vld_params.get("wimAccountId"): + target_wim = "wim:{}".format(vld_params["wimAccountId"]) + target_vld["vim_info"][target_wim] = {} for param in ("vim-network-name", "vim-network-id"): if vld_params.get(param): if isinstance(vld_params[param], dict): @@ -2981,9 +2955,45 @@ class NsLcm(LcmBase): kdu_model=k8s_instance_info["kdu-model"], kdu_name=k8s_instance_info["kdu-name"], ) + + # Update the nsrs table with the kdu-instance value self.update_db_2( - "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} + item="nsrs", + _id=nsr_id, + _desc={nsr_db_path + ".kdu-instance": kdu_instance}, ) + + # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or + # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace + # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous + # namespace, this first verification could be removed, and the next step would be done for any kind + # of KNF. + # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based + # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027) + if k8sclustertype in ("juju", "juju-bundle"): + # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means + # that the user passed a namespace which he wants its KDU to be deployed in) + if ( + self.db.count( + table="nsrs", + q_filter={ + "_id": nsr_id, + "_admin.projects_write": k8s_instance_info["namespace"], + "_admin.projects_read": k8s_instance_info["namespace"], + }, + ) + > 0 + ): + self.logger.debug( + f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}" + ) + self.update_db_2( + item="nsrs", + _id=nsr_id, + _desc={f"{nsr_db_path}.namespace": kdu_instance}, + ) + k8s_instance_info["namespace"] = kdu_instance + await self.k8scluster_map[k8sclustertype].install( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_model=k8s_instance_info["kdu-model"], @@ -2996,9 +3006,6 @@ class NsLcm(LcmBase): kdu_instance=kdu_instance, vca_id=vca_id, ) - self.update_db_2( - "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} - ) # Obtain services to obtain management service ip services = await self.k8scluster_map[k8sclustertype].get_services(