Fixing flake and black issues in code, enabling the same in tox
Change-Id: Id817198c20c039029c8b053e8eac0a8482d72a48
Signed-off-by: preethika.p <preethika.p@tataelxsi.co.in>
diff --git a/osm_lcm/lcm.py b/osm_lcm/lcm.py
index 8932d89..273edc1 100644
--- a/osm_lcm/lcm.py
+++ b/osm_lcm/lcm.py
@@ -460,9 +460,7 @@
nslcmop_id = nslcmop["_id"]
nsr_id = nslcmop["nsInstanceId"]
task = asyncio.ensure_future(self.ns.heal(nsr_id, nslcmop_id))
- self.lcm_tasks.register(
- "ns", nsr_id, nslcmop_id, "ns_heal", task
- )
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_heal", task)
return
elif command == "migrate":
nslcmop = params
@@ -476,9 +474,15 @@
nslcmop_id = nslcmop["_id"]
nsr_id = nslcmop["nsInstanceId"]
task = asyncio.ensure_future(self.ns.vertical_scale(nsr_id, nslcmop_id))
- self.logger.debug("nsr_id,nslcmop_id,task {},{},{}".format(nsr_id, nslcmop_id, task))
- self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_verticalscale", task)
- self.logger.debug("LCM task registered {},{},{} ".format(nsr_id, nslcmop_id, task))
+ self.logger.debug(
+ "nsr_id,nslcmop_id,task {},{},{}".format(nsr_id, nslcmop_id, task)
+ )
+ self.lcm_tasks.register(
+ "ns", nsr_id, nslcmop_id, "ns_verticalscale", task
+ )
+ self.logger.debug(
+ "LCM task registered {},{},{} ".format(nsr_id, nslcmop_id, task)
+ )
return
elif command == "show":
nsr_id = params
diff --git a/osm_lcm/lcm_utils.py b/osm_lcm/lcm_utils.py
index 7af63a8..749f347 100644
--- a/osm_lcm/lcm_utils.py
+++ b/osm_lcm/lcm_utils.py
@@ -319,9 +319,7 @@
# Get the NSD package path
if revision:
- nsd_package_path = (
- db_nsr["nsd-id"] + ":" + str(revision)
- )
+ nsd_package_path = db_nsr["nsd-id"] + ":" + str(revision)
db_nsd = self.db.get_one("nsds_revisions", {"_id": nsd_package_path})
else:
diff --git a/osm_lcm/ng_ro.py b/osm_lcm/ng_ro.py
index 03819c8..95aa5c9 100644
--- a/osm_lcm/ng_ro.py
+++ b/osm_lcm/ng_ro.py
@@ -347,7 +347,9 @@
target = self._parse_yaml(target)
payload_req = yaml.safe_dump(target)
- url = "{}/ns/v1/verticalscale/{nsr_id}".format(self.endpoint_url, nsr_id=nsr_id)
+ url = "{}/ns/v1/verticalscale/{nsr_id}".format(
+ self.endpoint_url, nsr_id=nsr_id
+ )
async with aiohttp.ClientSession(loop=self.loop) as session:
self.logger.debug("NG-RO POST %s %s", url, payload_req)
async with session.post(
diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py
index 8a99e91..d267065 100644
--- a/osm_lcm/ns.py
+++ b/osm_lcm/ns.py
@@ -135,7 +135,7 @@
) # timeout for some progress in a primitive execution
timeout_migrate = 1800 # default global timeout for migrating vnfs
timeout_operate = 1800 # default global timeout for migrating vnfs
- timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
+ timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
SUBOPERATION_STATUS_NOT_FOUND = -1
SUBOPERATION_STATUS_NEW = -2
SUBOPERATION_STATUS_SKIP = -3
@@ -416,9 +416,9 @@
def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
try:
env = Environment(
- undefined=StrictUndefined,
- autoescape=select_autoescape(default_for_string=True, default=True),
- )
+ undefined=StrictUndefined,
+ autoescape=select_autoescape(default_for_string=True, default=True),
+ )
template = env.from_string(cloud_init_text)
return template.render(additional_params or {})
except UndefinedError as e:
@@ -1286,8 +1286,13 @@
self.logger.debug("RO return > {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
- operation="instantiation"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ timeout_ns_deploy,
+ stage,
+ operation="instantiation",
)
# Updating NSR
@@ -1368,8 +1373,13 @@
# wait until done
delete_timeout = 20 * 60 # 20 minutes
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
- operation="termination"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ delete_timeout,
+ stage,
+ operation="termination",
)
db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
@@ -1643,7 +1653,9 @@
}
desc = await self.RO.deploy(nsr_id, target)
action_id = desc["action_id"]
- await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
+ await self._wait_ng_ro(
+ nsr_id, action_id, timeout=600, operation="instantiation"
+ )
break
else:
# wait until NS is deployed at RO
@@ -2092,7 +2104,7 @@
# for a KNF and not for its KDUs, the previous verification gives False, and the code
# jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
# or it is a KNF)
- elif db_vnfr.get('vdur'):
+ elif db_vnfr.get("vdur"):
rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
logging_text,
nsr_id,
@@ -5002,8 +5014,10 @@
await asyncio.sleep(retries_interval, loop=self.loop)
else:
if isinstance(e, asyncio.TimeoutError):
- e = N2VCException(message="Timed out waiting for action to complete")
- return "FAILED", getattr(e, 'message', repr(e))
+ e = N2VCException(
+ message="Timed out waiting for action to complete"
+ )
+ return "FAILED", getattr(e, "message", repr(e))
return "COMPLETED", output
@@ -5436,33 +5450,39 @@
"member-vnf-index": member_vnf_index,
"type": "delete",
"vdu_index": count_index,
- })
+ }
+ )
scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
scaling_info["vdu"].append(
{
"name": vdu.get("name") or vdu.get("vdu-name"),
"vdu_id": vdu["vdu-id-ref"],
"interface": [],
- })
+ }
+ )
for interface in vdu["interfaces"]:
scaling_info["vdu"][index]["interface"].append(
{
"name": interface["name"],
"ip_address": interface["ip-address"],
"mac_address": interface.get("mac-address"),
- })
+ }
+ )
self.logger.info("NS update scaling info{}".format(scaling_info))
stage[2] = "Terminating VDUs"
if scaling_info.get("vdu-delete"):
# scale_process = "RO"
if self.ro_config.get("ng"):
await self._scale_ng_ro(
- logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
+ logging_text,
+ db_nsr,
+ update_db_nslcmops,
+ db_vnfr,
+ scaling_info,
+ stage,
)
- async def remove_vnf(
- self, nsr_id, nslcmop_id, vnf_instance_id
- ):
+ async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
"""This method is to Remove VNF instances from NS.
Args:
@@ -5481,7 +5501,9 @@
if check_vnfr_count > 1:
stage = ["", "", ""]
step = "Getting nslcmop from database"
- self.logger.debug(step + " after having waited for previous tasks to be completed")
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
# db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
@@ -5490,19 +5512,31 @@
"vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
- await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
+ await self.terminate_vdus(
+ db_vnfr,
+ member_vnf_index,
+ db_nsr,
+ update_db_nslcmops,
+ stage,
+ logging_text,
+ )
constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
constituent_vnfr.remove(db_vnfr.get("_id"))
- db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
+ db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
+ "constituent-vnfr-ref"
+ )
self.update_db_2("nsrs", nsr_id, db_nsr_update)
self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
self.update_db_2("nsrs", nsr_id, db_nsr_update)
return "COMPLETED", "Done"
else:
step = "Terminate VNF Failed with"
- raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
- vnf_instance_id))
+ raise LcmException(
+ "{} Cannot terminate the last VNF in this NS.".format(
+ vnf_instance_id
+ )
+ )
except (LcmException, asyncio.CancelledError):
raise
except Exception as e:
@@ -5510,7 +5544,12 @@
return "FAILED", "Error removing VNF {}".format(e)
async def _ns_redeploy_vnf(
- self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
+ self,
+ nsr_id,
+ nslcmop_id,
+ db_vnfd,
+ db_vnfr,
+ db_nsr,
):
"""This method updates and redeploys VNF instances
@@ -5533,7 +5572,14 @@
# Terminate old VNF resources
update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
- await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
+ await self.terminate_vdus(
+ db_vnfr,
+ member_vnf_index,
+ db_nsr,
+ update_db_nslcmops,
+ stage,
+ logging_text,
+ )
# old_vnfd_id = db_vnfr["vnfd-id"]
# new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
@@ -5554,10 +5600,16 @@
new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
# new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
# new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
- new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
+ new_vnfr_update = {
+ "revision": latest_vnfd_revision,
+ "connection-point": new_vnfr_cp,
+ "vdur": new_vdur,
+ "ip-address": "",
+ }
self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
updated_db_vnfr = self.db.get_one(
- "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
+ "vnfrs",
+ {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
)
# Instantiate new VNF resources
@@ -5569,9 +5621,7 @@
scaling_info["kdu-create"] = {}
vdud_instantiate_list = db_vnfd["vdu"]
for index, vdud in enumerate(vdud_instantiate_list):
- cloud_init_text = self._get_vdu_cloud_init_content(
- vdud, db_vnfd
- )
+ cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
if cloud_init_text:
additional_params = (
self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
@@ -5602,9 +5652,15 @@
scaling_info["vdu-create"][vdud["id"]] = count_index
if self.ro_config.get("ng"):
self.logger.debug(
- "New Resources to be deployed: {}".format(scaling_info))
+ "New Resources to be deployed: {}".format(scaling_info)
+ )
await self._scale_ng_ro(
- logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
+ logging_text,
+ db_nsr,
+ update_db_nslcmops,
+ updated_db_vnfr,
+ scaling_info,
+ stage,
)
return "COMPLETED", "Done"
except (LcmException, asyncio.CancelledError):
@@ -5846,15 +5902,8 @@
# based on new descriptor
step = "Redeploying VNF"
member_vnf_index = db_vnfr["member-vnf-index-ref"]
- (
- result,
- detailed_status
- ) = await self._ns_redeploy_vnf(
- nsr_id,
- nslcmop_id,
- latest_vnfd,
- db_vnfr,
- db_nsr
+ (result, detailed_status) = await self._ns_redeploy_vnf(
+ nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
)
if result == "FAILED":
nslcmop_operation_state = result
@@ -5941,7 +5990,9 @@
db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
member_vnf_index = db_vnfr["member-vnf-index-ref"]
step = "Removing VNF"
- (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
+ (result, detailed_status) = await self.remove_vnf(
+ nsr_id, nslcmop_id, vnf_instance_id
+ )
if result == "FAILED":
nslcmop_operation_state = result
error_description_nslcmop = detailed_status
@@ -5957,12 +6008,18 @@
)
elif update_type == "OPERATE_VNF":
- vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
- operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
- additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
+ vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
+ "vnfInstanceId"
+ ]
+ operation_type = db_nslcmop["operationParams"]["operateVnfData"][
+ "changeStateTo"
+ ]
+ additional_param = db_nslcmop["operationParams"]["operateVnfData"][
+ "additionalParam"
+ ]
(result, detailed_status) = await self.rebuild_start_stop(
nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
- )
+ )
if result == "FAILED":
nslcmop_operation_state = result
error_description_nslcmop = detailed_status
@@ -7265,7 +7322,9 @@
job["vnfr_id"] = vnfr_id
return job_list
- async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
+ async def rebuild_start_stop(
+ self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
+ ):
logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
self.logger.info(logging_text + "Enter")
stage = ["Preparing the environment", ""]
@@ -7283,7 +7342,7 @@
vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
vdur = find_in_list(
vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
- )
+ )
if vdur:
vdu_vim_name = vdur["name"]
vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
@@ -7294,7 +7353,7 @@
# wait for any previous tasks in process
stage[1] = "Waiting for previous operations to terminate"
self.logger.info(stage[1])
- await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
stage[1] = "Reading from database."
self.logger.info(stage[1])
@@ -7302,7 +7361,7 @@
nsr_id=nsr_id,
ns_state=None,
current_operation=operation_type.upper(),
- current_operation_id=nslcmop_id
+ current_operation_id=nslcmop_id,
)
self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
@@ -7318,7 +7377,7 @@
"vdu_index": additional_param["count-index"],
"vdu_id": vdur["id"],
"target_vim": target_vim,
- "vim_account_id": vim_account_id
+ "vim_account_id": vim_account_id,
}
}
stage[1] = "Sending rebuild request to RO... {}".format(desc)
@@ -7328,8 +7387,13 @@
self.logger.info("response from RO: {}".format(result_dict))
action_id = result_dict["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy,
- self.timeout_operate, None, "start_stop_rebuild",
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ self.timeout_operate,
+ None,
+ "start_stop_rebuild",
)
return "COMPLETED", "Done"
except (ROclient.ROClientException, DbException, LcmException) as e:
@@ -7340,7 +7404,9 @@
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
- self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ self.logger.critical(
+ "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+ )
return "FAILED", "Error in operate VNF {}".format(exc)
def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
@@ -7413,8 +7479,12 @@
self.logger.debug("RO return > {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
- operation="migrate"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ self.timeout_migrate,
+ operation="migrate",
)
except (ROclient.ROClientException, DbException, LcmException) as e:
self.logger.error("Exit Exception {}".format(e))
@@ -7464,7 +7534,6 @@
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
-
async def heal(self, nsr_id, nslcmop_id):
"""
Heal NS
@@ -7565,27 +7634,39 @@
member_vnf_index = db_vnfr.get("member-vnf-index-ref")
# Check each target VDU and deploy N2VC
- target_vdu_list = target_vnf.get("additionalParams", {}).get("vdu", [])
+ target_vdu_list = target_vnf.get("additionalParams", {}).get(
+ "vdu", []
+ )
if not target_vdu_list:
# Codigo nuevo para crear diccionario
target_vdu_list = []
for existing_vdu in db_vnfr.get("vdur"):
vdu_name = existing_vdu.get("vdu-name", None)
vdu_index = existing_vdu.get("count-index", 0)
- vdu_run_day1 = target_vnf.get("additionalParams", {}).get("run-day1", False)
- vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index, "run-day1": vdu_run_day1}
+ vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
+ "run-day1", False
+ )
+ vdu_to_be_healed = {
+ "vdu-id": vdu_name,
+ "count-index": vdu_index,
+ "run-day1": vdu_run_day1,
+ }
target_vdu_list.append(vdu_to_be_healed)
for target_vdu in target_vdu_list:
deploy_params_vdu = target_vdu
# Set run-day1 vnf level value if not vdu level value exists
- if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
- deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
+ if not deploy_params_vdu.get("run-day1") and target_vnf[
+ "additionalParams"
+ ].get("run-day1"):
+ deploy_params_vdu["run-day1"] = target_vnf[
+ "additionalParams"
+ ].get("run-day1")
vdu_name = target_vdu.get("vdu-id", None)
# TODO: Get vdu_id from vdud.
vdu_id = vdu_name
# For multi instance VDU count-index is mandatory
# For single session VDU count-indes is 0
- vdu_index = target_vdu.get("count-index",0)
+ vdu_index = target_vdu.get("count-index", 0)
# n2vc_redesign STEP 3 to 6 Deploy N2VC
stage[1] = "Deploying Execution Environments."
@@ -7599,32 +7680,35 @@
vnf_ip_address = db_vnfr.get("ip-address")
target_instance = None
for instance in db_vnfr.get("vdur", None):
- if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
+ if (
+ instance["vdu-name"] == vdu_name
+ and instance["count-index"] == vdu_index
+ ):
target_instance = instance
break
if vnf_ip_address == target_instance.get("ip-address"):
self._heal_n2vc(
- logging_text=logging_text
- + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
- member_vnf_index, vdu_name, vdu_index
- ),
- db_nsr=db_nsr,
- db_vnfr=db_vnfr,
- nslcmop_id=nslcmop_id,
- nsr_id=nsr_id,
- nsi_id=nsi_id,
- vnfd_id=vnfd_ref,
- vdu_id=None,
- kdu_name=None,
- member_vnf_index=member_vnf_index,
- vdu_index=0,
- vdu_name=None,
- deploy_params=deploy_params_vdu,
- descriptor_config=descriptor_config,
- base_folder=base_folder,
- task_instantiation_info=tasks_dict_info,
- stage=stage,
- )
+ logging_text=logging_text
+ + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
+ member_vnf_index, vdu_name, vdu_index
+ ),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_ref,
+ vdu_id=None,
+ kdu_name=None,
+ member_vnf_index=member_vnf_index,
+ vdu_index=0,
+ vdu_name=None,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
# VDU Level charm. Normal case with native charms.
descriptor_config = get_configuration(vnfd, vdu_name)
@@ -7693,9 +7777,7 @@
db_nsr_update["config-status"] = old_config_status
db_nsr_update[
"detailed-status"
- ] = "FAILED healing nslcmop={} {}: {}".format(
- nslcmop_id, step, exc
- )
+ ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
for task, task_name in tasks_dict_info.items():
if not task.done() or task.cancelled() or task.exception():
if task_name.startswith(self.task_name_deploy_vca):
@@ -7758,6 +7840,7 @@
:param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
:return: None or exception
"""
+
def get_vim_account(vim_account_id):
nonlocal db_vims
if vim_account_id in db_vims:
@@ -7772,9 +7855,7 @@
if ns_params and ns_params.get("timeout_ns_heal"):
timeout_ns_heal = ns_params["timeout_ns_heal"]
else:
- timeout_ns_heal = self.timeout.get(
- "ns_heal", self.timeout_ns_heal
- )
+ timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
db_vims = {}
@@ -7782,7 +7863,11 @@
target = {
"action_id": nslcmop_id,
}
- self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
+ self.logger.warning(
+ "db_nslcmop={} and timeout_ns_heal={}".format(
+ db_nslcmop, timeout_ns_heal
+ )
+ )
target.update(db_nslcmop.get("operationParams", {}))
self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
@@ -7791,8 +7876,13 @@
action_id = desc["action_id"]
# waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
- operation="healing"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_heal,
+ timeout_ns_heal,
+ stage,
+ operation="healing",
)
# Updating NSR
@@ -7808,7 +7898,7 @@
except Exception as e:
stage[2] = "ERROR healing at VIM"
- #self.set_vnfr_at_error(db_vnfrs, str(e))
+ # self.set_vnfr_at_error(db_vnfrs, str(e))
self.logger.error(
"Error healing at VIM {}".format(e),
exc_info=not isinstance(
@@ -8179,7 +8269,7 @@
status="INSTALLING SW",
element_under_configuration=element_under_configuration,
element_type=element_type,
- #other_update=db_nsr_update,
+ # other_update=db_nsr_update,
other_update=None,
)
@@ -8252,7 +8342,7 @@
# n2vc_redesign STEP 5.1
# wait for RO (ip-address) Insert pub_key into VM
# IMPORTANT: We need do wait for RO to complete healing operation.
- await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
+ await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
if vnfr_id:
if kdu_name:
rw_mgmt_ip = await self.wait_kdu_up(
@@ -8278,15 +8368,19 @@
# Day1 operations.
# get run-day1 operation parameter
- runDay1 = deploy_params.get("run-day1",False)
- self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
+ runDay1 = deploy_params.get("run-day1", False)
+ self.logger.debug(
+ "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
+ )
if runDay1:
# n2vc_redesign STEP 6 Execute initial config primitive
step = "execute initial config primitive"
# wait for dependent primitives execution (NS -> VNF -> VDU)
if initial_config_primitive_list:
- await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
+ await self._wait_dependent_n2vc(
+ nsr_id, vca_deployed_list, vca_index
+ )
# stage, in function of element type: vdu, kdu, vnf or ns
my_vca = vca_deployed_list[vca_index]
@@ -8334,7 +8428,9 @@
if check_if_terminated_needed:
if config_descriptor.get("terminate-config-primitive"):
self.update_db_2(
- "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
+ "nsrs",
+ nsr_id,
+ {db_update_entry + "needed_terminate": True},
)
check_if_terminated_needed = False
@@ -8397,7 +8493,9 @@
start_time = time()
while time() <= start_time + timeout:
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
- operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
+ operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
+ "operational-status"
+ ]
self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
if operational_status_ro != "healing":
break
@@ -8432,16 +8530,18 @@
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
- await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
self._write_ns_status(
nsr_id=nsr_id,
ns_state=None,
current_operation="VerticalScale",
- current_operation_id=nslcmop_id
+ current_operation_id=nslcmop_id,
)
step = "Getting nslcmop from database"
- self.logger.debug(step + " after having waited for previous tasks to be completed")
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
operationParams = db_nslcmop.get("operationParams")
target = {}
@@ -8450,8 +8550,12 @@
self.logger.debug("RO return > {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
- nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
- operation="verticalscale"
+ nsr_id,
+ action_id,
+ nslcmop_id,
+ start_deploy,
+ self.timeout_verticalscale,
+ operation="verticalscale",
)
except (ROclient.ROClientException, DbException, LcmException) as e:
self.logger.error("Exit Exception {}".format(e))
@@ -8461,7 +8565,9 @@
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
- self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ self.logger.critical(
+ "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+ )
finally:
self._write_ns_status(
nsr_id=nsr_id,
@@ -8470,9 +8576,7 @@
current_operation_id=None,
)
if exc:
- db_nslcmop_update[
- "detailed-status"
- ] = "FAILED {}: {}".format(step, exc)
+ db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
nslcmop_operation_state = "FAILED"
else:
nslcmop_operation_state = "COMPLETED"
diff --git a/osm_lcm/tests/test_ns.py b/osm_lcm/tests/test_ns.py
index 560edc9..2004701 100644
--- a/osm_lcm/tests/test_ns.py
+++ b/osm_lcm/tests/test_ns.py
@@ -122,15 +122,11 @@
print("kwargs > {}".format(kwargs))
if args:
if "update" in args:
- ro_ns_desc = yaml.safe_load(
- descriptors.ro_update_action_text
- )
+ ro_ns_desc = yaml.safe_load(descriptors.ro_update_action_text)
while True:
yield ro_ns_desc
if kwargs.get("delete"):
- ro_ns_desc = yaml.safe_load(
- descriptors.ro_delete_action_text
- )
+ ro_ns_desc = yaml.safe_load(descriptors.ro_delete_action_text)
while True:
yield ro_ns_desc
@@ -176,19 +172,13 @@
Database.instance = None
self.db = Database({"database": {"driver": "memory"}}).instance.db
- self.db.create_list(
- "vnfds", yaml.safe_load(descriptors.db_vnfds_text)
- )
+ self.db.create_list("vnfds", yaml.safe_load(descriptors.db_vnfds_text))
self.db.create_list(
"vnfds_revisions",
yaml.safe_load(descriptors.db_vnfds_revisions_text),
)
- self.db.create_list(
- "nsds", yaml.safe_load(descriptors.db_nsds_text)
- )
- self.db.create_list(
- "nsrs", yaml.safe_load(descriptors.db_nsrs_text)
- )
+ self.db.create_list("nsds", yaml.safe_load(descriptors.db_nsds_text))
+ self.db.create_list("nsrs", yaml.safe_load(descriptors.db_nsrs_text))
self.db.create_list(
"vim_accounts",
yaml.safe_load(descriptors.db_vim_accounts_text),
@@ -200,12 +190,8 @@
self.db.create_list(
"nslcmops", yaml.safe_load(descriptors.db_nslcmops_text)
)
- self.db.create_list(
- "vnfrs", yaml.safe_load(descriptors.db_vnfrs_text)
- )
- self.db_vim_accounts = yaml.safe_load(
- descriptors.db_vim_accounts_text
- )
+ self.db.create_list("vnfrs", yaml.safe_load(descriptors.db_vnfrs_text))
+ self.db_vim_accounts = yaml.safe_load(descriptors.db_vim_accounts_text)
# Mock kafka
self.msg = asynctest.Mock(MsgKafka())
@@ -839,7 +825,10 @@
self.assertEqual(return_value, expected_value)
with self.assertRaises(Exception) as context:
self.db.get_one("vnfrs", {"_id": vnf_instance_id})
- self.assertTrue("database exception Not found entry with filter" in str(context.exception))
+ self.assertTrue(
+ "database exception Not found entry with filter"
+ in str(context.exception)
+ )
# test vertical scale executes sucessfully
# @patch("osm_lcm.ng_ro.status.response")
diff --git a/osm_lcm/vim_sdn.py b/osm_lcm/vim_sdn.py
index c81969a..1e5458c 100644
--- a/osm_lcm/vim_sdn.py
+++ b/osm_lcm/vim_sdn.py
@@ -1159,7 +1159,11 @@
k8s_deploy_methods = db_k8scluster.get("deployment_methods", {})
# for backwards compatibility and all-false case
if not any(k8s_deploy_methods.values()):
- k8s_deploy_methods = {"helm-chart": True, "juju-bundle": True, "helm-chart-v3": True}
+ k8s_deploy_methods = {
+ "helm-chart": True,
+ "juju-bundle": True,
+ "helm-chart-v3": True,
+ }
deploy_methods = tuple(filter(k8s_deploy_methods.get, k8s_deploy_methods))
for task_name in deploy_methods:
diff --git a/tox.ini b/tox.ini
index 183bf6c..f1aeb9b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -33,8 +33,8 @@
deps = black
skip_install = true
commands =
- - black --check --diff osm_lcm/
- - black --check --diff setup.py
+ black --check --diff osm_lcm/
+ black --check --diff setup.py
#######################################################################################