find_software_version,
)
from osm_lcm.data_utils.list_utils import find_in_list
-from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
+from osm_lcm.data_utils.vnfr import (
+ get_osm_params,
+ get_vdur_index,
+ get_kdur,
+ get_volumes_from_instantiation_params,
+)
from osm_lcm.data_utils.dict_utils import parse_yaml_strings
from osm_lcm.data_utils.database.vim_account import VimAccountDB
from n2vc.definitions import RelationEndpoint
def _get_vdu_additional_params(self, db_vnfr, vdu_id):
vdur = next(
- (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]),
- {}
+ (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
)
additional_params = vdur.get("additionalParams")
return parse_yaml_strings(additional_params)
)
if not vdur:
# Read the template saved in the db:
- self.logger.debug(f"No vdur in the database. Using the vdur-template to scale")
+ self.logger.debug(
+ "No vdur in the database. Using the vdur-template to scale"
+ )
vdur_template = db_vnfr.get("vdur-template")
if not vdur_template:
raise LcmException(
- "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
- vdu_id
+ "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
+ vdu_id
)
)
vdur = vdur_template[0]
- #Delete a template from the database after using it
- self.db.set_one("vnfrs",
- {"_id": db_vnfr["_id"]},
- None,
- pull={"vdur-template": {"_id": vdur['_id']}}
- )
+ # Delete a template from the database after using it
+ self.db.set_one(
+ "vnfrs",
+ {"_id": db_vnfr["_id"]},
+ None,
+ pull={"vdur-template": {"_id": vdur["_id"]}},
+ )
for count in range(vdu_count):
vdur_copy = deepcopy(vdur)
vdur_copy["status"] = "BUILD"
if vdu_delete:
if len(db_vnfr["vdur"]) == 1:
# The scale will move to 0 instances
- self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur")
+ self.logger.debug(
+ "Scaling to 0 !, creating the template with the last vdur"
+ )
template_vdur = [db_vnfr["vdur"][0]]
for vdu_id, vdu_count in vdu_delete.items():
if mark_delete:
vdur["vim_info"] = {target_vim: {}}
# instantiation parameters
- # if vnf_params:
- # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
- # vdud["id"]), None)
+ if vnf_params:
+ vdu_instantiation_params = find_in_list(
+ get_iterable(vnf_params, "vdu"),
+ lambda i_vdu: i_vdu["id"] == vdud["id"],
+ )
+ if vdu_instantiation_params:
+ # Parse the vdu_volumes from the instantiation params
+ vdu_volumes = get_volumes_from_instantiation_params(
+ vdu_instantiation_params, vdud
+ )
+ vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
vdur_list.append(vdur)
target_vnf["vdur"] = vdur_list
target["vnf"].append(target_vnf)
step = "Waiting to VM being up and getting IP address"
self.logger.debug(logging_text + step)
+ # default rw_mgmt_ip to None, avoiding the non definition of the variable
+ rw_mgmt_ip = None
+
# n2vc_redesign STEP 5.1
# wait for RO (ip-address) Insert pub_key into VM
if vnfr_id:
},
vca_id=vca_id,
)
- else:
+
+ # This verification is needed in order to avoid trying to add a public key
+ # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
+ # for a KNF and not for its KDUs, the previous verification gives False, and the code
+ # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
+ # or it is a KNF)
+ elif db_vnfr.get('vdur'):
rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
logging_text,
nsr_id,
pub_key=pub_key,
)
- else:
- rw_mgmt_ip = None # This is for a NS configuration
-
self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
# store rw_mgmt_ip in deploy params for later replacement
kdu_model=k8s_instance_info["kdu-model"],
kdu_name=k8s_instance_info["kdu-name"],
)
+
+ # Update the nsrs table with the kdu-instance value
self.update_db_2(
- "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
+ item="nsrs",
+ _id=nsr_id,
+ _desc={nsr_db_path + ".kdu-instance": kdu_instance},
)
+
+ # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
+ # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
+ # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
+ # namespace, this first verification could be removed, and the next step would be done for any kind
+ # of KNF.
+ # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
+ # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
+ if k8sclustertype in ("juju", "juju-bundle"):
+ # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
+ # that the user passed a namespace which he wants its KDU to be deployed in)
+ if (
+ self.db.count(
+ table="nsrs",
+ q_filter={
+ "_id": nsr_id,
+ "_admin.projects_write": k8s_instance_info["namespace"],
+ "_admin.projects_read": k8s_instance_info["namespace"],
+ },
+ )
+ > 0
+ ):
+ self.logger.debug(
+ f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
+ )
+ self.update_db_2(
+ item="nsrs",
+ _id=nsr_id,
+ _desc={f"{nsr_db_path}.namespace": kdu_instance},
+ )
+ k8s_instance_info["namespace"] = kdu_instance
+
await self.k8scluster_map[k8sclustertype].install(
cluster_uuid=k8s_instance_info["k8scluster-uuid"],
kdu_model=k8s_instance_info["kdu-model"],
kdu_instance=kdu_instance,
vca_id=vca_id,
)
- self.update_db_2(
- "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
- )
# Obtain services to obtain management service ip
services = await self.k8scluster_map[k8sclustertype].get_services(
# There is new revision of VNFD, update operation is required
current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
- latest_vnfd_path = vnfd_id
+ latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
step = "Removing the VNFD packages if they exist in the local path"
shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
base_folder,
charm_name,
charm_type,
+ latest_vnfd_revision,
)
)
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
- await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
self._write_ns_status(
nsr_id=nsr_id,
ns_state=None,
current_operation="MIGRATING",
- current_operation_id=nslcmop_id
+ current_operation_id=nslcmop_id,
)
step = "Getting nslcmop from database"
- self.logger.debug(step + " after having waited for previous tasks to be completed")
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
migrate_params = db_nslcmop.get("operationParams")
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
- self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ self.logger.critical(
+ "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+ )
finally:
self._write_ns_status(
nsr_id=nsr_id,
current_operation_id=None,
)
if exc:
- db_nslcmop_update[
- "detailed-status"
- ] = "FAILED {}: {}".format(step, exc)
+ db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
nslcmop_operation_state = "FAILED"
else:
nslcmop_operation_state = "COMPLETED"