) # timeout for some progress in a primitive execution
timeout_migrate = 1800 # default global timeout for migrating vnfs
timeout_operate = 1800 # default global timeout for migrating vnfs
+ timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
SUBOPERATION_STATUS_NOT_FOUND = -1
SUBOPERATION_STATUS_NEW = -2
SUBOPERATION_STATUS_SKIP = -3
# vcaStatus
db_dict = dict()
db_dict["vcaStatus"] = status_dict
- await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
# update configurationStatus for this VCA
try:
db_dict = dict()
db_dict["vcaStatus"] = {nsr_id: vca_status}
- if cluster_type in ("juju-bundle", "juju"):
- # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
- # status in a similar way between Juju Bundles and Helm Charts on this side
- await self.k8sclusterjuju.update_vca_status(
- db_dict["vcaStatus"],
- kdu_instance,
- vca_id=vca_id,
- )
-
self.logger.debug(
f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
)
get_iterable(vdur, "interfaces"),
lambda iface: iface.get("ns-vld-id") == a_vld["name"],
)
+
+ vld_params = find_in_list(
+ get_iterable(ns_params, "vld"),
+ lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
+ )
if target_vld:
+
if vnf_params.get("vimAccountId") not in a_vld.get(
"vim_info", {}
):
+ target_vim_network_list = [
+ v for _, v in a_vld.get("vim_info").items()
+ ]
+ target_vim_network_name = next(
+ (
+ item.get("vim_network_name", "")
+ for item in target_vim_network_list
+ ),
+ "",
+ )
+
target["ns"]["vld"][a_index].get("vim_info").update(
{
"vim:{}".format(vnf_params["vimAccountId"]): {
- "vim_network_name": ""
+ "vim_network_name": target_vim_network_name,
}
}
)
+ if vld_params:
+ for param in ("vim-network-name", "vim-network-id"):
+ if vld_params.get(param) and isinstance(
+ vld_params[param], dict
+ ):
+ for vim, vim_net in vld_params[
+ param
+ ].items():
+ other_target_vim = "vim:" + vim
+ populate_dict(
+ target["ns"]["vld"][a_index].get(
+ "vim_info"
+ ),
+ (
+ other_target_vim,
+ param.replace("-", "_"),
+ ),
+ vim_net,
+ )
+
nslcmop_id = db_nslcmop["_id"]
target = {
"name": db_nsr["name"],
await asyncio.sleep(15, loop=self.loop)
else: # timeout_ns_deploy
raise NgRoException("Timeout waiting ns to deploy")
+
+ async def vertical_scale(self, nsr_id, nslcmop_id):
+ """
+ Vertical Scale the VDUs in a NS
+
+ :param: nsr_id: NS Instance ID
+ :param: nslcmop_id: nslcmop ID of migrate
+
+ """
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+ logging_text = "Task ns={} vertical scale ".format(nsr_id)
+ self.logger.debug(logging_text + "Enter")
+ # get all needed from database
+ db_nslcmop = None
+ db_nslcmop_update = {}
+ nslcmop_operation_state = None
+ db_nsr_update = {}
+ target = {}
+ exc = None
+ # in case of error, indicates what part of scale was failed to put nsr at error status
+ start_deploy = time()
+
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="VerticalScale",
+ current_operation_id=nslcmop_id
+ )
+ step = "Getting nslcmop from database"
+ self.logger.debug(step + " after having waited for previous tasks to be completed")
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ operationParams = db_nslcmop.get("operationParams")
+ target = {}
+ target.update(operationParams)
+ desc = await self.RO.vertical_scale(nsr_id, target)
+ self.logger.debug("RO return > {}".format(desc))
+ action_id = desc["action_id"]
+ await self._wait_ng_ro(
+ nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale
+ )
+ except (ROclient.ROClientException, DbException, LcmException) as e:
+ self.logger.error("Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error("Cancelled Exception while '{}'".format(step))
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ finally:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="IDLE",
+ current_operation_id=None,
+ )
+ if exc:
+ db_nslcmop_update[
+ "detailed-status"
+ ] = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ else:
+ nslcmop_operation_state = "COMPLETED"
+ db_nslcmop_update["detailed-status"] = "Done"
+ db_nsr_update["detailed-status"] = "Done"
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message="",
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+ if nslcmop_operation_state:
+ try:
+ msg = {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ }
+ await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")