populate_dict,
check_juju_bundle_existence,
get_charm_artifact_path,
+ get_ee_id_parts,
)
from osm_lcm.data_utils.nsd import (
get_ns_configuration_relation_list,
get_juju_ee_ref,
get_kdu_resource_profile,
find_software_version,
+ check_helm_ee_in_ns,
)
from osm_lcm.data_utils.list_utils import find_in_list
from osm_lcm.data_utils.vnfr import (
from osm_lcm.data_utils.database.database import Database
from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.wim import (
+ get_sdn_ports,
+ get_target_wim_attrs,
+ select_feasible_wim_account,
+)
from n2vc.n2vc_juju_conn import N2VCJujuConnector
from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
class NsLcm(LcmBase):
- timeout_vca_on_error = (
+ timeout_scale_on_error = (
5 * 60
) # Time for charm from first time at blocked,error status to mark as failed
+ timeout_scale_on_error_outer_factor = 1.05 # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
timeout_ns_heal = 1800 # default global timeout for un deployment a ns
timeout_charm_delete = 10 * 60
- timeout_primitive = 30 * 60 # timeout for primitive execution
+ timeout_primitive = 30 * 60 # Timeout for primitive execution
+ timeout_primitive_outer_factor = 1.05 # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
timeout_ns_update = 30 * 60 # timeout for ns update
timeout_progress_primitive = (
10 * 60
) # timeout for some progress in a primitive execution
timeout_migrate = 1800 # default global timeout for migrating vnfs
timeout_operate = 1800 # default global timeout for migrating vnfs
- timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
+ timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
SUBOPERATION_STATUS_NOT_FOUND = -1
SUBOPERATION_STATUS_NEW = -2
SUBOPERATION_STATUS_SKIP = -3
def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
try:
env = Environment(
- undefined=StrictUndefined,
- autoescape=select_autoescape(default_for_string=True, default=True),
- )
+ undefined=StrictUndefined,
+ autoescape=select_autoescape(default_for_string=True, default=True),
+ )
template = env.from_string(cloud_init_text)
return template.render(additional_params or {})
except UndefinedError as e:
target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
"provider-network"
]["sdn-ports"]
- if vld_params.get("wimAccountId"):
- target_wim = "wim:{}".format(vld_params["wimAccountId"])
- target_vld["vim_info"][target_wim] = {}
+
+ # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
+ # if wim_account_id is specified in vld_params, validate if it is feasible.
+ wim_account_id, db_wim = select_feasible_wim_account(
+ db_nsr, db_vnfrs, target_vld, vld_params, self.logger
+ )
+
+ if wim_account_id:
+ # WIM is needed and a feasible one was found, populate WIM target and SDN ports
+ self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
+ # update vld_params with correct WIM account Id
+ vld_params["wimAccountId"] = wim_account_id
+
+ target_wim = "wim:{}".format(wim_account_id)
+ target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
+ sdn_ports = get_sdn_ports(vld_params, db_wim)
+ if len(sdn_ports) > 0:
+ target_vld["vim_info"][target_wim] = target_wim_attrs
+ target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
+
+ self.logger.debug(
+ "Target VLD with WIM data: {:s}".format(str(target_vld))
+ )
+
for param in ("vim-network-name", "vim-network-id"):
if vld_params.get(param):
if isinstance(vld_params[param], dict):
None,
)
vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
+ if not vdur:
+ return
for a_index, a_vld in enumerate(target["ns"]["vld"]):
target_vld = find_in_list(
get_iterable(vdur, "interfaces"),
self.logger.debug("RO return > {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
- operation="instantiation"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ timeout_ns_deploy,
+ stage,
+ operation="instantiation",
)
# Updating NSR
# wait until done
delete_timeout = 20 * 60 # 20 minutes
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
- operation="termination"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ delete_timeout,
+ stage,
+ operation="termination",
)
db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
}
desc = await self.RO.deploy(nsr_id, target)
action_id = desc["action_id"]
- await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
+ await self._wait_ng_ro(
+ nsr_id, action_id, timeout=600, operation="instantiation"
+ )
break
else:
# wait until NS is deployed at RO
db_dict=db_dict,
config=osm_config,
artifact_path=artifact_path,
+ chart_model=vca_name,
vca_type=vca_type,
)
else:
# for a KNF and not for its KDUs, the previous verification gives False, and the code
# jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
# or it is a KNF)
- elif db_vnfr.get('vdur'):
+ elif db_vnfr.get("vdur"):
rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
logging_text,
nsr_id,
# STEP 7 Configure metrics
if vca_type == "helm" or vca_type == "helm-v3":
+ # TODO: review for those cases where the helm chart is a reference and
+ # is not part of the NF package
prometheus_jobs = await self.extract_prometheus_scrape_jobs(
ee_id=ee_id,
artifact_path=artifact_path,
stage[1] = "Deploying Execution Environments."
self.logger.debug(logging_text + stage[1])
+ # create namespace and certificate if any helm based EE is present in the NS
+ if check_helm_ee_in_ns(db_vnfds):
+ # TODO: create EE namespace
+ # create TLS certificates
+ await self.vca_map["helm-v3"].create_tls_certificate(
+ secret_name="ee-tls-{}".format(nsr_id),
+ dns_prefix="*",
+ nsr_id=nsr_id,
+ usage="server auth",
+ )
+
nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
for vnf_profile in get_vnf_profiles(nsd):
vnfd_id = vnf_profile["vnfd-id"]
# task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
tasks_dict_info[task_delete_ee] = "Terminating all VCA"
+ # Delete Namespace and Certificates if necessary
+ if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
+ await self.vca_map["helm-v3"].delete_tls_certificate(
+ certificate_name=db_nslcmop["nsInstanceId"],
+ )
+ # TODO: Delete namespace
+
# Delete from k8scluster
stage[1] = "Deleting KDUs."
self.logger.debug(logging_text + stage[1])
await asyncio.sleep(retries_interval, loop=self.loop)
else:
if isinstance(e, asyncio.TimeoutError):
- e = N2VCException(message="Timed out waiting for action to complete")
- return "FAILED", getattr(e, 'message', repr(e))
+ e = N2VCException(
+ message="Timed out waiting for action to complete"
+ )
+ return "FAILED", getattr(e, "message", repr(e))
return "COMPLETED", output
parts = kdu_model.split(sep=":")
if len(parts) == 2:
kdu_model = parts[0]
+ if desc_params.get("kdu_atomic_upgrade"):
+ atomic_upgrade = desc_params.get(
+ "kdu_atomic_upgrade"
+ ).lower() in ("yes", "true", "1")
+ del desc_params["kdu_atomic_upgrade"]
+ else:
+ atomic_upgrade = True
detailed_status = await asyncio.wait_for(
self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
cluster_uuid=kdu.get("k8scluster-uuid"),
kdu_instance=kdu.get("kdu-instance"),
- atomic=True,
+ atomic=atomic_upgrade,
kdu_model=kdu_model,
params=desc_params,
db_dict=db_dict,
"member-vnf-index": member_vnf_index,
"type": "delete",
"vdu_index": count_index,
- })
+ }
+ )
scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
scaling_info["vdu"].append(
{
"name": vdu.get("name") or vdu.get("vdu-name"),
"vdu_id": vdu["vdu-id-ref"],
"interface": [],
- })
+ }
+ )
for interface in vdu["interfaces"]:
scaling_info["vdu"][index]["interface"].append(
{
"name": interface["name"],
"ip_address": interface["ip-address"],
"mac_address": interface.get("mac-address"),
- })
+ }
+ )
self.logger.info("NS update scaling info{}".format(scaling_info))
stage[2] = "Terminating VDUs"
if scaling_info.get("vdu-delete"):
# scale_process = "RO"
if self.ro_config.get("ng"):
await self._scale_ng_ro(
- logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
+ logging_text,
+ db_nsr,
+ update_db_nslcmops,
+ db_vnfr,
+ scaling_info,
+ stage,
)
- async def remove_vnf(
- self, nsr_id, nslcmop_id, vnf_instance_id
- ):
+ async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
"""This method is to Remove VNF instances from NS.
Args:
if check_vnfr_count > 1:
stage = ["", "", ""]
step = "Getting nslcmop from database"
- self.logger.debug(step + " after having waited for previous tasks to be completed")
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
# db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
"vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
- await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
+ await self.terminate_vdus(
+ db_vnfr,
+ member_vnf_index,
+ db_nsr,
+ update_db_nslcmops,
+ stage,
+ logging_text,
+ )
constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
constituent_vnfr.remove(db_vnfr.get("_id"))
- db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
+ db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
+ "constituent-vnfr-ref"
+ )
self.update_db_2("nsrs", nsr_id, db_nsr_update)
self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
self.update_db_2("nsrs", nsr_id, db_nsr_update)
return "COMPLETED", "Done"
else:
step = "Terminate VNF Failed with"
- raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
- vnf_instance_id))
+ raise LcmException(
+ "{} Cannot terminate the last VNF in this NS.".format(
+ vnf_instance_id
+ )
+ )
except (LcmException, asyncio.CancelledError):
raise
except Exception as e:
return "FAILED", "Error removing VNF {}".format(e)
async def _ns_redeploy_vnf(
- self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
+ self,
+ nsr_id,
+ nslcmop_id,
+ db_vnfd,
+ db_vnfr,
+ db_nsr,
):
"""This method updates and redeploys VNF instances
# Terminate old VNF resources
update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
- await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
+ await self.terminate_vdus(
+ db_vnfr,
+ member_vnf_index,
+ db_nsr,
+ update_db_nslcmops,
+ stage,
+ logging_text,
+ )
# old_vnfd_id = db_vnfr["vnfd-id"]
# new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
# new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
# new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
- new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
+ new_vnfr_update = {
+ "revision": latest_vnfd_revision,
+ "connection-point": new_vnfr_cp,
+ "vdur": new_vdur,
+ "ip-address": "",
+ }
self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
updated_db_vnfr = self.db.get_one(
- "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
+ "vnfrs",
+ {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
)
# Instantiate new VNF resources
scaling_info["kdu-create"] = {}
vdud_instantiate_list = db_vnfd["vdu"]
for index, vdud in enumerate(vdud_instantiate_list):
- cloud_init_text = self._get_vdu_cloud_init_content(
- vdud, db_vnfd
- )
+ cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
if cloud_init_text:
additional_params = (
self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
scaling_info["vdu-create"][vdud["id"]] = count_index
if self.ro_config.get("ng"):
self.logger.debug(
- "New Resources to be deployed: {}".format(scaling_info))
+ "New Resources to be deployed: {}".format(scaling_info)
+ )
await self._scale_ng_ro(
- logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
+ logging_text,
+ db_nsr,
+ update_db_nslcmops,
+ updated_db_vnfr,
+ scaling_info,
+ stage,
)
return "COMPLETED", "Done"
except (LcmException, asyncio.CancelledError):
current_charm_artifact_path,
target_charm_artifact_path,
charm_artifact_paths,
- ) = ([], [], [])
+ helm_artifacts,
+ ) = ([], [], [], [])
step = "Checking if revision has changed in VNFD"
if current_vnf_revision != latest_vnfd_revision:
step = (
"Get the charm-type, charm-id, ee-id if there is deployed VCA"
)
- base_folder = latest_vnfd["_admin"]["storage"]
+ current_base_folder = current_vnfd["_admin"]["storage"]
+ latest_base_folder = latest_vnfd["_admin"]["storage"]
- for charm_index, charm_deployed in enumerate(
+ for vca_index, vca_deployed in enumerate(
get_iterable(nsr_deployed, "VCA")
):
vnf_index = db_vnfr.get("member-vnf-index-ref")
# Getting charm-id and charm-type
- if charm_deployed.get("member-vnf-index") == vnf_index:
- charm_id = self.get_vca_id(db_vnfr, db_nsr)
- charm_type = charm_deployed.get("type")
+ if vca_deployed.get("member-vnf-index") == vnf_index:
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
+ vca_type = vca_deployed.get("type")
+ vdu_count_index = vca_deployed.get("vdu_count_index")
# Getting ee-id
- ee_id = charm_deployed.get("ee_id")
+ ee_id = vca_deployed.get("ee_id")
step = "Getting descriptor config"
descriptor_config = get_configuration(
step = "Setting Charm artifact paths"
current_charm_artifact_path.append(
get_charm_artifact_path(
- base_folder,
+ current_base_folder,
charm_name,
- charm_type,
+ vca_type,
current_vnf_revision,
)
)
target_charm_artifact_path.append(
get_charm_artifact_path(
- base_folder,
+ latest_base_folder,
charm_name,
- charm_type,
+ vca_type,
latest_vnfd_revision,
)
)
+ elif ee_item.get("helm-chart"):
+ # add chart to list and all parameters
+ step = "Getting helm chart name"
+ chart_name = ee_item.get("helm-chart")
+ if (
+ ee_item.get("helm-version")
+ and ee_item.get("helm-version") == "v2"
+ ):
+ vca_type = "helm"
+ else:
+ vca_type = "helm-v3"
+ step = "Setting Helm chart artifact paths"
+
+ helm_artifacts.append(
+ {
+ "current_artifact_path": get_charm_artifact_path(
+ current_base_folder,
+ chart_name,
+ vca_type,
+ current_vnf_revision,
+ ),
+ "target_artifact_path": get_charm_artifact_path(
+ latest_base_folder,
+ chart_name,
+ vca_type,
+ latest_vnfd_revision,
+ ),
+ "ee_id": ee_id,
+ "vca_index": vca_index,
+ "vdu_index": vdu_count_index,
+ }
+ )
charm_artifact_paths = zip(
current_charm_artifact_path, target_charm_artifact_path
# based on new descriptor
step = "Redeploying VNF"
member_vnf_index = db_vnfr["member-vnf-index-ref"]
- (
- result,
- detailed_status
- ) = await self._ns_redeploy_vnf(
- nsr_id,
- nslcmop_id,
- latest_vnfd,
- db_vnfr,
- db_nsr
+ (result, detailed_status) = await self._ns_redeploy_vnf(
+ nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
)
if result == "FAILED":
nslcmop_operation_state = result
detailed_status,
) = await self._ns_charm_upgrade(
ee_id=ee_id,
- charm_id=charm_id,
- charm_type=charm_type,
+ charm_id=vca_id,
+ charm_type=vca_type,
path=self.fs.path + target_charm_path,
timeout=timeout_seconds,
)
detailed_status = "Done"
db_nslcmop_update["detailed-status"] = "Done"
+ # helm base EE
+ for item in helm_artifacts:
+ if not (
+ item["current_artifact_path"]
+ and item["target_artifact_path"]
+ and self.check_charm_hash_changed(
+ item["current_artifact_path"],
+ item["target_artifact_path"],
+ )
+ ):
+ continue
+ db_update_entry = "_admin.deployed.VCA.{}.".format(
+ item["vca_index"]
+ )
+ vnfr_id = db_vnfr["_id"]
+ osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
+ db_dict = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": db_update_entry,
+ }
+ vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
+ await self.vca_map[vca_type].upgrade_execution_environment(
+ namespace=namespace,
+ helm_id=helm_id,
+ db_dict=db_dict,
+ config=osm_config,
+ artifact_path=item["target_artifact_path"],
+ vca_type=vca_type,
+ )
+ vnf_id = db_vnfr.get("vnfd-ref")
+ config_descriptor = get_configuration(latest_vnfd, vnf_id)
+ self.logger.debug("get ssh key block")
+ rw_mgmt_ip = None
+ if deep_get(
+ config_descriptor,
+ ("config-access", "ssh-access", "required"),
+ ):
+ # Needed to inject a ssh key
+ user = deep_get(
+ config_descriptor,
+ ("config-access", "ssh-access", "default-user"),
+ )
+ step = (
+ "Install configuration Software, getting public ssh key"
+ )
+ pub_key = await self.vca_map[
+ vca_type
+ ].get_ee_ssh_public__key(
+ ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
+ )
+
+ step = (
+ "Insert public key into VM user={} ssh_key={}".format(
+ user, pub_key
+ )
+ )
+ self.logger.debug(logging_text + step)
+
+ # wait for RO (ip-address) Insert pub_key into VM
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ None,
+ item["vdu_index"],
+ user=user,
+ pub_key=pub_key,
+ )
+
+ initial_config_primitive_list = config_descriptor.get(
+ "initial-config-primitive"
+ )
+ config_primitive = next(
+ (
+ p
+ for p in initial_config_primitive_list
+ if p["name"] == "config"
+ ),
+ None,
+ )
+ if not config_primitive:
+ continue
+
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if rw_mgmt_ip:
+ deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(
+ parse_yaml_strings(
+ db_vnfr["additionalParamsForVnf"].copy()
+ )
+ )
+ primitive_params_ = self._map_primitive_params(
+ config_primitive, {}, deploy_params
+ )
+
+ step = "execute primitive '{}' params '{}'".format(
+ config_primitive["name"], primitive_params_
+ )
+ self.logger.debug(logging_text + step)
+ await self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name=config_primitive["name"],
+ params_dict=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ vca_type=vca_type,
+ )
+
+ step = "Updating policies"
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+ detailed_status = "Done"
+ db_nslcmop_update["detailed-status"] = "Done"
+
# If nslcmop_operation_state is None, so any operation is not failed.
if not nslcmop_operation_state:
nslcmop_operation_state = "COMPLETED"
db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
member_vnf_index = db_vnfr["member-vnf-index-ref"]
step = "Removing VNF"
- (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
+ (result, detailed_status) = await self.remove_vnf(
+ nsr_id, nslcmop_id, vnf_instance_id
+ )
if result == "FAILED":
nslcmop_operation_state = result
error_description_nslcmop = detailed_status
)
elif update_type == "OPERATE_VNF":
- vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
- operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
- additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
+ vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
+ "vnfInstanceId"
+ ]
+ operation_type = db_nslcmop["operationParams"]["operateVnfData"][
+ "changeStateTo"
+ ]
+ additional_param = db_nslcmop["operationParams"]["operateVnfData"][
+ "additionalParam"
+ ]
(result, detailed_status) = await self.rebuild_start_stop(
nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
- )
+ )
if result == "FAILED":
nslcmop_operation_state = result
error_description_nslcmop = detailed_status
primitive_name=terminate_config_primitive["name"],
params=primitive_params_,
db_dict=db_dict,
+ total_timeout=self.timeout_primitive,
vca_id=vca_id,
),
- timeout=600,
+ timeout=self.timeout_primitive
+ * self.timeout_primitive_outer_factor,
)
await asyncio.wait_for(
self.k8scluster_map[k8s_cluster_type].scale(
- kdu_instance,
- scale,
- kdu_scaling_info["resource-name"],
+ kdu_instance=kdu_instance,
+ scale=scale,
+ resource_name=kdu_scaling_info["resource-name"],
+ total_timeout=self.timeout_scale_on_error,
vca_id=vca_id,
cluster_uuid=cluster_uuid,
kdu_model=kdu_model,
atomic=True,
db_dict=db_dict,
),
- timeout=self.timeout_vca_on_error,
+ timeout=self.timeout_scale_on_error
+ * self.timeout_scale_on_error_outer_factor,
)
if kdu_scaling_info["type"] == "create":
job["vnfr_id"] = vnfr_id
return job_list
- async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
+ async def rebuild_start_stop(
+ self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
+ ):
logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
self.logger.info(logging_text + "Enter")
stage = ["Preparing the environment", ""]
vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
vdur = find_in_list(
vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
- )
+ )
if vdur:
vdu_vim_name = vdur["name"]
vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
# wait for any previous tasks in process
stage[1] = "Waiting for previous operations to terminate"
self.logger.info(stage[1])
- await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
stage[1] = "Reading from database."
self.logger.info(stage[1])
nsr_id=nsr_id,
ns_state=None,
current_operation=operation_type.upper(),
- current_operation_id=nslcmop_id
+ current_operation_id=nslcmop_id,
)
self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
"vdu_index": additional_param["count-index"],
"vdu_id": vdur["id"],
"target_vim": target_vim,
- "vim_account_id": vim_account_id
+ "vim_account_id": vim_account_id,
}
}
stage[1] = "Sending rebuild request to RO... {}".format(desc)
self.logger.info("response from RO: {}".format(result_dict))
action_id = result_dict["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy,
- self.timeout_operate, None, "start_stop_rebuild",
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ self.timeout_operate,
+ None,
+ "start_stop_rebuild",
)
return "COMPLETED", "Done"
except (ROclient.ROClientException, DbException, LcmException) as e:
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
- self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ self.logger.critical(
+ "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+ )
return "FAILED", "Error in operate VNF {}".format(exc)
def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
self.logger.debug("RO return > {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
- operation="migrate"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ self.timeout_migrate,
+ operation="migrate",
)
except (ROclient.ROClientException, DbException, LcmException) as e:
self.logger.error("Exit Exception {}".format(e))
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
-
async def heal(self, nsr_id, nslcmop_id):
"""
Heal NS
self.update_db_2("nsrs", nsr_id, db_nsr_update)
step = "Sending heal order to VIM"
- task_ro = asyncio.ensure_future(
- self.heal_RO(
- logging_text=logging_text,
- nsr_id=nsr_id,
- db_nslcmop=db_nslcmop,
- stage=stage,
- )
+ await self.heal_RO(
+ logging_text=logging_text,
+ nsr_id=nsr_id,
+ db_nslcmop=db_nslcmop,
+ stage=stage,
)
- self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
- tasks_dict_info[task_ro] = "Healing at VIM"
-
# VCA tasks
# read from db: nsd
stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
member_vnf_index = db_vnfr.get("member-vnf-index-ref")
# Check each target VDU and deploy N2VC
- for target_vdu in target_vnf["additionalParams"].get("vdu", None):
+ target_vdu_list = target_vnf.get("additionalParams", {}).get(
+ "vdu", []
+ )
+ if not target_vdu_list:
+ # Codigo nuevo para crear diccionario
+ target_vdu_list = []
+ for existing_vdu in db_vnfr.get("vdur"):
+ vdu_name = existing_vdu.get("vdu-name", None)
+ vdu_index = existing_vdu.get("count-index", 0)
+ vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
+ "run-day1", False
+ )
+ vdu_to_be_healed = {
+ "vdu-id": vdu_name,
+ "count-index": vdu_index,
+ "run-day1": vdu_run_day1,
+ }
+ target_vdu_list.append(vdu_to_be_healed)
+ for target_vdu in target_vdu_list:
deploy_params_vdu = target_vdu
# Set run-day1 vnf level value if not vdu level value exists
- if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
- deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
+ if not deploy_params_vdu.get("run-day1") and target_vnf[
+ "additionalParams"
+ ].get("run-day1"):
+ deploy_params_vdu["run-day1"] = target_vnf[
+ "additionalParams"
+ ].get("run-day1")
vdu_name = target_vdu.get("vdu-id", None)
# TODO: Get vdu_id from vdud.
vdu_id = vdu_name
# For multi instance VDU count-index is mandatory
# For single session VDU count-indes is 0
- vdu_index = target_vdu.get("count-index",0)
+ vdu_index = target_vdu.get("count-index", 0)
# n2vc_redesign STEP 3 to 6 Deploy N2VC
stage[1] = "Deploying Execution Environments."
vnf_ip_address = db_vnfr.get("ip-address")
target_instance = None
for instance in db_vnfr.get("vdur", None):
- if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
+ if (
+ instance["vdu-name"] == vdu_name
+ and instance["count-index"] == vdu_index
+ ):
target_instance = instance
break
if vnf_ip_address == target_instance.get("ip-address"):
self._heal_n2vc(
- logging_text=logging_text
- + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
- member_vnf_index, vdu_name, vdu_index
- ),
- db_nsr=db_nsr,
- db_vnfr=db_vnfr,
- nslcmop_id=nslcmop_id,
- nsr_id=nsr_id,
- nsi_id=nsi_id,
- vnfd_id=vnfd_ref,
- vdu_id=None,
- kdu_name=None,
- member_vnf_index=member_vnf_index,
- vdu_index=0,
- vdu_name=None,
- deploy_params=deploy_params_vdu,
- descriptor_config=descriptor_config,
- base_folder=base_folder,
- task_instantiation_info=tasks_dict_info,
- stage=stage,
- )
+ logging_text=logging_text
+ + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
+ member_vnf_index, vdu_name, vdu_index
+ ),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_ref,
+ vdu_id=None,
+ kdu_name=None,
+ member_vnf_index=member_vnf_index,
+ vdu_index=0,
+ vdu_name=None,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
# VDU Level charm. Normal case with native charms.
descriptor_config = get_configuration(vnfd, vdu_name)
db_nsr_update["config-status"] = old_config_status
db_nsr_update[
"detailed-status"
- ] = "FAILED healing nslcmop={} {}: {}".format(
- nslcmop_id, step, exc
- )
+ ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
for task, task_name in tasks_dict_info.items():
if not task.done() or task.cancelled() or task.exception():
if task_name.startswith(self.task_name_deploy_vca):
:param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
:return: None or exception
"""
+
def get_vim_account(vim_account_id):
nonlocal db_vims
if vim_account_id in db_vims:
if ns_params and ns_params.get("timeout_ns_heal"):
timeout_ns_heal = ns_params["timeout_ns_heal"]
else:
- timeout_ns_heal = self.timeout.get(
- "ns_heal", self.timeout_ns_heal
- )
+ timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
db_vims = {}
target = {
"action_id": nslcmop_id,
}
- self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
+ self.logger.warning(
+ "db_nslcmop={} and timeout_ns_heal={}".format(
+ db_nslcmop, timeout_ns_heal
+ )
+ )
target.update(db_nslcmop.get("operationParams", {}))
self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
action_id = desc["action_id"]
# waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
- operation="healing"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_heal,
+ timeout_ns_heal,
+ stage,
+ operation="healing",
)
# Updating NSR
except Exception as e:
stage[2] = "ERROR healing at VIM"
- #self.set_vnfr_at_error(db_vnfrs, str(e))
+ # self.set_vnfr_at_error(db_vnfrs, str(e))
self.logger.error(
"Error healing at VIM {}".format(e),
exc_info=not isinstance(
status="INSTALLING SW",
element_under_configuration=element_under_configuration,
element_type=element_type,
- #other_update=db_nsr_update,
+ # other_update=db_nsr_update,
other_update=None,
)
# n2vc_redesign STEP 5.1
# wait for RO (ip-address) Insert pub_key into VM
# IMPORTANT: We need do wait for RO to complete healing operation.
- await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
+ await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
if vnfr_id:
if kdu_name:
rw_mgmt_ip = await self.wait_kdu_up(
# Day1 operations.
# get run-day1 operation parameter
- runDay1 = deploy_params.get("run-day1",False)
- self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
+ runDay1 = deploy_params.get("run-day1", False)
+ self.logger.debug(
+ "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
+ )
if runDay1:
# n2vc_redesign STEP 6 Execute initial config primitive
step = "execute initial config primitive"
# wait for dependent primitives execution (NS -> VNF -> VDU)
if initial_config_primitive_list:
- await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
+ await self._wait_dependent_n2vc(
+ nsr_id, vca_deployed_list, vca_index
+ )
# stage, in function of element type: vdu, kdu, vnf or ns
my_vca = vca_deployed_list[vca_index]
if check_if_terminated_needed:
if config_descriptor.get("terminate-config-primitive"):
self.update_db_2(
- "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
+ "nsrs",
+ nsr_id,
+ {db_update_entry + "needed_terminate": True},
)
check_if_terminated_needed = False
start_time = time()
while time() <= start_time + timeout:
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
- operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
+ operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
+ "operational-status"
+ ]
self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
if operational_status_ro != "healing":
break
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
- await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
self._write_ns_status(
nsr_id=nsr_id,
ns_state=None,
current_operation="VerticalScale",
- current_operation_id=nslcmop_id
+ current_operation_id=nslcmop_id,
)
step = "Getting nslcmop from database"
- self.logger.debug(step + " after having waited for previous tasks to be completed")
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
operationParams = db_nslcmop.get("operationParams")
target = {}
self.logger.debug("RO return > {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
- operation="verticalscale"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ self.timeout_verticalscale,
+ operation="verticalscale",
)
except (ROclient.ROClientException, DbException, LcmException) as e:
self.logger.error("Exit Exception {}".format(e))
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
- self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ self.logger.critical(
+ "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+ )
finally:
self._write_ns_status(
nsr_id=nsr_id,
current_operation_id=None,
)
if exc:
- db_nslcmop_update[
- "detailed-status"
- ] = "FAILED {}: {}".format(step, exc)
+ db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
nslcmop_operation_state = "FAILED"
else:
nslcmop_operation_state = "COMPLETED"