if vnf_RO.get("ip_address"):
db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
elif not db_vnfr.get("ip-address"):
- raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
+ if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
+ raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
vdur_RO_count_index = 0
RO_descriptor_number = 0 # number of descriptors created at RO
vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
start_deploy = time()
- vdu_flag = False # If any of the VNFDs has VDUs
ns_params = db_nslcmop.get("operationParams")
if ns_params and ns_params.get("timeout_ns_deploy"):
timeout_ns_deploy = ns_params["timeout_ns_deploy"]
else:
timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
+ # Check for and optionally request placement optimization. Database will be updated if placement activated
+ await self.do_placement(logging_text, db_nslcmop, db_vnfrs)
+
# deploy RO
# get vnfds, instantiate at RO
for c_vnf in nsd.get("constituent-vnfd", ()):
member_vnf_index = c_vnf["member-vnf-index"]
vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
- if vnfd.get("vdu"):
- vdu_flag = True
vnfd_ref = vnfd["id"]
step = db_nsr_update["_admin.deployed.RO.detailed-status"] = "Creating vnfd='{}' member_vnf_index='{}' at" \
" RO".format(vnfd_ref, member_vnf_index)
elif ns_status == "ACTIVE":
step = detailed_status = "Waiting for management IP address reported by the VIM. Updating VNFRs"
try:
- if vdu_flag:
- self.ns_update_vnfr(db_vnfrs, desc)
+ self.ns_update_vnfr(db_vnfrs, desc)
break
except LcmExceptionNoMgmtIP:
pass
vdur = next((x for x in get_iterable(db_vnfr, "vdur")
if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
+ if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
+ vdur = db_vnfr["vdur"][0]
if not vdur:
- raise LcmException("Not found vnfr_id={}, vdu_index={}, vdu_index={}".format(
- vnfr_id, vdu_id, vdu_index
- ))
+ raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
+ vdu_index))
- if vdur.get("status") == "ACTIVE":
+ if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
ip_address = vdur.get("ip-address")
if not ip_address:
continue
if not target_vdu_id:
continue
- # self.logger.debug(logging_text + "IP address={}".format(ip_address))
-
# inject public key into machine
if pub_key and user:
# self.logger.debug(logging_text + "Inserting RO key")
+ if vdur.get("pdu-type"):
+ self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
+ return ip_address
try:
ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
result_dict = await self.RO.create_action(
self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
.format(status, nsr_id, vca_index, e))
+ async def do_placement(self, logging_text, db_nslcmop, db_vnfrs):
+ placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
+ if placement_engine == "PLA":
+ self.logger.debug(logging_text + "Invoke placement optimization for nslcmopId={}".format(db_nslcmop['id']))
+ await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': db_nslcmop['_id']}, loop=self.loop)
+ db_poll_interval = 5
+ wait = db_poll_interval * 4
+ pla_result = None
+ while not pla_result and wait >= 0:
+ await asyncio.sleep(db_poll_interval)
+ wait -= db_poll_interval
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": db_nslcmop["_id"]})
+ pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
+
+ if not pla_result:
+ raise LcmException("Placement timeout for nslcmopId={}".format(db_nslcmop['id']))
+
+ for pla_vnf in pla_result['vnf']:
+ vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
+ if not pla_vnf.get('vimAccountId') or not vnfr:
+ continue
+ self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
+ return
+
+ def update_nsrs_with_pla_result(self, params):
+ try:
+ nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
+ self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
+ except Exception as e:
+ self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
+
async def instantiate(self, nsr_id, nslcmop_id):
"""
exc_info=True)
finally:
if exc:
+ instantiated_ok = False
if db_nsr:
db_nsr_update["detailed-status"] = "ERROR {}: {}".format(step, exc)
db_nsr_update["operational-status"] = "failed"
except Exception:
# it is not a file
pass
+
+ step = "Prepare instantiate KDU {} in k8s cluster {}".format(
+ kdur["kdu-name"], kdur["k8s-cluster"]["id"])
+
try:
if not error_text:
cluster_uuid = _get_cluster_id(kdur["k8s-cluster"]["id"], k8sclustertype_full)
+
+ updated_cluster_list = []
+ if k8sclustertype == "chart" and cluster_uuid not in updated_cluster_list:
+ del_repo_list, added_repo_dict = await asyncio.ensure_future(
+ self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
+ if del_repo_list or added_repo_dict:
+ unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
+ updated = {'_admin.helm_charts_added.' +
+ item: name for item, name in added_repo_dict.items()}
+ self.logger.debug(logging_text + "repos synchronized, to_delete: {}, to_add: {}".
+ format(del_repo_list, added_repo_dict))
+ self.db.set_one("k8sclusters", {"_id": kdur["k8s-cluster"]["id"]},
+ updated, unset=unset)
+ updated_cluster_list.append(cluster_uuid)
+
except LcmException as e:
error_text = str(e)
deployed_ok = False
db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s."
"{}".format(index)}
+
if k8sclustertype == "chart":
task = asyncio.ensure_future(
self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel, atomic=True,
return_when=asyncio.FIRST_COMPLETED)
if not done_list: # timeout
for task in pending_list:
- db_nsr_update[pending_tasks(task) + "detailed-status"] = "Timeout"
+ db_nsr_update[pending_tasks[task] + "detailed-status"] = "Timeout"
deployed_ok = False
break
for task in done_list: