from copy import copy, deepcopy
from osm_nbi.validation import validate_input, ValidationError, ns_instantiate, ns_terminate, ns_action, ns_scale,\
nsi_instantiate
-from osm_nbi.base_topic import BaseTopic, EngineException, get_iterable, deep_get
+from osm_nbi.base_topic import BaseTopic, EngineException, get_iterable, deep_get, increment_ip_mac
# from descriptor_topics import DescriptorTopic
from yaml import safe_dump
from osm_common.dbbase import DbException
class NsrTopic(BaseTopic):
topic = "nsrs"
topic_msg = "ns"
+ quota_name = "ns_instances"
schema_new = ns_instantiate
def __init__(self, db, fs, msg, auth):
nsd = self.db.get_one("nsds", _filter)
del _filter["_id"]
+ # check NSD is not disabled
+ step = "checking nsdOperationalState"
+ if nsd["_admin"]["operationalState"] == "DISABLED":
+ raise EngineException("nsd with id '{}' is DISABLED, and thus cannot be used to create "
+ "a network service".format(ns_request["nsdId"]), http_code=HTTPStatus.CONFLICT)
+
nsr_id = str(uuid4())
now = time()
"deploymentStatus": None,
"configurationStatus": None,
"vcaStatus": None,
- "nsd": {k: v for k, v in nsd.items() if k in ("vld", "_id", "id", "constituent-vnfd", "name")},
+ "nsd": {k: v for k, v in nsd.items() if k in ("vld", "_id", "id", "constituent-vnfd", "name",
+ "ns-configuration")},
"datacenter": ns_request["vimAccountId"],
"resource-orchestrator": "osmopenmano",
"description": ns_request.get("nsDescription", ""),
"ssh-authorized-key": ns_request.get("ssh_keys"), # TODO remove
}
ns_request["nsr_id"] = nsr_id
+ if ns_request and ns_request.get("config-units"):
+ nsr_descriptor["config-units"] = ns_request["config-units"]
+
# Create vld
if nsd.get("vld"):
nsr_descriptor["vld"] = nsd["vld"]
additional_params, vnf_params = self._format_additional_params(ns_request,
member_vnf["member-vnf-index"],
descriptor=vnfd)
- vnf_k8s_namespace = ns_k8s_namespace
- if vnf_params and vnf_params.get("k8s-namespace"):
- vnf_k8s_namespace = vnf_params["k8s-namespace"]
vnfr_descriptor = {
"id": vnfr_id,
"_id": vnfr_id,
"connection-point": [],
"ip-address": None, # mgmt-interface filled by LCM
}
+ vnf_k8s_namespace = ns_k8s_namespace
+ if vnf_params:
+ if vnf_params.get("k8s-namespace"):
+ vnf_k8s_namespace = vnf_params["k8s-namespace"]
+ if vnf_params.get("config-units"):
+ vnfr_descriptor["config-units"] = vnf_params["config-units"]
# Create vld
if vnfd.get("internal-vld"):
"ip-address": None, # mgmt-interface filled by LCM
"k8s-cluster": {},
}
+ if kdu_params and kdu_params.get("config-units"):
+ kdur["config-units"] = kdu_params["config-units"]
for k8s_type in ("helm-chart", "juju-bundle"):
if kdu.get(k8s_type):
kdur[k8s_type] = kdu_model or kdu[k8s_type]
vnfr_descriptor["kdur"].append(kdur)
for vdu in vnfd.get("vdu", ()):
- additional_params, _ = self._format_additional_params(ns_request, member_vnf["member-vnf-index"],
- vdu_id=vdu["id"], descriptor=vnfd)
+ additional_params, vdu_params = self._format_additional_params(
+ ns_request, member_vnf["member-vnf-index"], vdu_id=vdu["id"], descriptor=vnfd)
vdur = {
"vdu-id-ref": vdu["id"],
# TODO "name": "" Name of the VDU in the VIM
"interfaces": [],
"additionalParams": additional_params
}
+ if vdu_params and vdu_params.get("config-units"):
+ vdur["config-units"] = vdu_params["config-units"]
+ if deep_get(vdu, ("supplemental-boot-data", "boot-data-drive")):
+ vdur["boot-data-drive"] = vdu["supplemental-boot-data"]["boot-data-drive"]
if vdu.get("pdu-type"):
vdur["pdu-type"] = vdu["pdu-type"]
+ vdur["name"] = vdu["pdu-type"]
# TODO volumes: name, volume-id
for icp in vdu.get("internal-connection-point", ()):
vdu_icp = {
"id": icp["id"],
"connection-point-id": icp["id"],
"name": icp.get("name"),
- # "ip-address", "mac-address" # filled by LCM
- # vim-id # TODO it would be nice having a vim port id
}
vdur["internal-connection-point"].append(vdu_icp)
for iface in vdu.get("interface", ()):
vdu_iface = {
- "name": iface.get("name"),
- # "ip-address", "mac-address" # filled by LCM
- # vim-id # TODO it would be nice having a vim port id
- }
+ x: iface[x] for x in ("name", "ip-address", "mac-address", "internal-connection-point-ref",
+ "external-connection-point-ref") if iface.get(x) is not None}
if vnfd_mgmt_cp and iface.get("external-connection-point-ref") == vnfd_mgmt_cp:
vdu_iface["mgmt-vnf"] = True
if iface.get("mgmt-interface"):
for vnfd_ivld_icp in get_iterable(vnfd_ivld.get("internal-connection-point")):
if vnfd_ivld_icp.get("id-ref") == iface["internal-connection-point-ref"]:
vdu_iface["vnf-vld-id"] = vnfd_ivld["id"]
+ if vnfd_ivld_icp.get("ip-address"):
+ vdu_iface["ip-address"] = vnfd_ivld_icp["ip-address"]
break
else:
continue
for index in range(0, count):
if index:
vdur = deepcopy(vdur)
+ for iface in vdur["interfaces"]:
+ if iface.get("ip-address"):
+ iface["ip-address"] = increment_ip_mac(iface["ip-address"])
+ if iface.get("mac-address"):
+ iface["mac-address"] = increment_ip_mac(iface["mac-address"])
+
vdur["_id"] = str(uuid4())
vdur["count-index"] = index
vnfr_descriptor["vdur"].append(vdur)
return nsr_id, None
except (ValidationError, EngineException, DbException, MsgException, FsException) as e:
- raise type(e)("{} while '{}".format(e, step), http_code=e.http_code)
+ raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code)
def edit(self, session, _id, indata=None, kwargs=None, content=None):
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
for in_ivld in get_iterable(in_vnfd.get("internal-vld")):
for ivld in get_iterable(vnfd.get("internal-vld")):
- if in_ivld["name"] == ivld["name"] or in_ivld["name"] == ivld["id"]:
+ if in_ivld["name"] in (ivld["id"], ivld.get("name")):
for in_icp in get_iterable(in_ivld.get("internal-connection-point")):
for icp in ivld["internal-connection-point"]:
if in_icp["id-ref"] == icp["id-ref"]:
if vnf_inst_params.get("vimAccountId"):
vim_account = vnf_inst_params.get("vimAccountId")
+ # get vnf.vdu.interface instantiation params to update vnfr.vdur.interfaces ip, mac
+ for vdu_inst_param in get_iterable(vnf_inst_params.get("vdu")):
+ for vdur_index, vdur in enumerate(vnfr["vdur"]):
+ if vdu_inst_param["id"] != vdur["vdu-id-ref"]:
+ continue
+ for iface_inst_param in get_iterable(vdu_inst_param.get("interface")):
+ iface_index, _ = next(i for i in enumerate(vdur["interfaces"])
+ if i[1]["name"] == iface_inst_param["name"])
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index)
+ if iface_inst_param.get("ip-address"):
+ vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac(
+ iface_inst_param.get("ip-address"), vdur.get("count-index", 0))
+ if iface_inst_param.get("mac-address"):
+ vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac(
+ iface_inst_param.get("mac-address"), vdur.get("count-index", 0))
+ # get vnf.internal-vld.internal-conection-point instantiation params to update vnfr.vdur.interfaces
+ # TODO update vld with the ip-profile
+ for ivld_inst_param in get_iterable(vnf_inst_params.get("internal-vld")):
+ for icp_inst_param in get_iterable(ivld_inst_param.get("internal-connection-point")):
+ # look for iface
+ for vdur_index, vdur in enumerate(vnfr["vdur"]):
+ for iface_index, iface in enumerate(vdur["interfaces"]):
+ if iface.get("internal-connection-point-ref") == icp_inst_param["id-ref"]:
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index)
+ if icp_inst_param.get("ip-address"):
+ vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac(
+ icp_inst_param.get("ip-address"), vdur.get("count-index", 0))
+ if icp_inst_param.get("mac-address"):
+ vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac(
+ icp_inst_param.get("mac-address"), vdur.get("count-index", 0))
+ break
+ # get ip address from instantiation parameters.vld.vnfd-connection-point-ref
+ for vld_inst_param in get_iterable(indata.get("vld")):
+ for vnfcp_inst_param in get_iterable(vld_inst_param.get("vnfd-connection-point-ref")):
+ if vnfcp_inst_param["member-vnf-index-ref"] != member_vnf_index:
+ continue
+ # look for iface
+ for vdur_index, vdur in enumerate(vnfr["vdur"]):
+ for iface_index, iface in enumerate(vdur["interfaces"]):
+ if iface.get("external-connection-point-ref") == \
+ vnfcp_inst_param["vnfd-connection-point-ref"]:
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index)
+ if vnfcp_inst_param.get("ip-address"):
+ vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac(
+ vnfcp_inst_param.get("ip-address"), vdur.get("count-index", 0))
+ if vnfcp_inst_param.get("mac-address"):
+ vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac(
+ vnfcp_inst_param.get("mac-address"), vdur.get("count-index", 0))
+ break
+
vnfr_update["vim-account-id"] = vim_account
vnfr_update_rollback["vim-account-id"] = vnfr.get("vim-account-id")
db_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id
nsis = self.db.get_one("nsis", db_filter, fail_on_empty=False, fail_on_more=False)
if nsis:
- raise EngineException("The NS instance {} cannot be terminate because is used by the slice {}".format(
+ raise EngineException("The NS instance {} cannot be terminated because is used by the slice {}".format(
nsr_id, nsis["_id"]), http_code=HTTPStatus.CONFLICT)
try:
class NsiTopic(BaseTopic):
topic = "nsis"
topic_msg = "nsi"
+ quota_name = "slice_instances"
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
_filter = self._get_project_filter(session)
_filter["_id"] = slice_request["nstId"]
nstd = self.db.get_one("nsts", _filter)
+ # check NST is not disabled
+ step = "checking NST operationalState"
+ if nstd["_admin"]["operationalState"] == "DISABLED":
+ raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create a netslice "
+ "instance".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT)
del _filter["_id"]
+ # check NSD is not disabled
+ step = "checking operationalState"
+ if nstd["_admin"]["operationalState"] == "DISABLED":
+ raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create "
+ "a network slice".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT)
+
nstd.pop("_admin", None)
nstd_id = nstd.pop("_id", None)
nsi_id = str(uuid4())
if nstId == netslice_subnet["id"]:
nsd_id = netslice_subnet["nsd-ref"]
if nsd_id not in nsds:
- nsds[nsd_id] = self.db.get_one("nsds", {"id": nsd_id})
+ _filter = self._get_project_filter(session)
+ _filter["id"] = nsd_id
+ nsds[nsd_id] = self.db.get_one("nsds", _filter)
return nsds[nsd_id]
else:
raise EngineException("Invalid parameter nstId='{}' is not one of the "
_filter = self._get_project_filter(session)
_filter["_id"] = netsliceInstanceId
nsir = self.db.get_one("nsis", _filter)
+ logging_prefix = "nsi={} {} ".format(netsliceInstanceId, operation)
del _filter["_id"]
# initial checking
nslcmops = []
# nslcmops_item = None
for index, nsr_item in enumerate(nsrs_list):
- nsi = None
+ nsr_id = nsr_item["nsrId"]
if nsr_item.get("shared"):
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True
- _filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_item["nsrId"]
+ _filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id
_filter["_admin.nsrs-detailed-list.ANYINDEX.nslcmop_instantiate.ne"] = None
_filter["_id.ne"] = netsliceInstanceId
nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False)
if operation == "terminate":
_update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): None}
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
-
- # looks the first nsi fulfilling the conditions but not being the current NSIR
- if nsi:
- nsi_admin_shared = nsi["_admin"]["nsrs-detailed-list"]
- for nsi_nsr_item in nsi_admin_shared:
- if nsi_nsr_item["nsd-id"] == nsr_item["nsd-id"] and nsi_nsr_item["shared"]:
- self.add_shared_nsr_2vld(nsir, nsr_item)
- nslcmops.append(nsi_nsr_item["nslcmop_instantiate"])
- _update = {"_admin.nsrs-detailed-list.{}".format(index): nsi_nsr_item}
- self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
- break
- # continue to not create nslcmop since nsrs is shared and nsrs was created
- continue
- else:
- self.add_shared_nsr_2vld(nsir, nsr_item)
+ if nsi: # other nsi is using this nsr and it needs this nsr instantiated
+ continue # do not create nsilcmop
+ else: # instantiate
+ # looks the first nsi fulfilling the conditions but not being the current NSIR
+ if nsi:
+ nsi_nsr_item = next(n for n in nsi["_admin"]["nsrs-detailed-list"] if
+ n["nsrId"] == nsr_id and n["shared"] and
+ n["nslcmop_instantiate"])
+ self.add_shared_nsr_2vld(nsir, nsr_item)
+ nslcmops.append(nsi_nsr_item["nslcmop_instantiate"])
+ _update = {"_admin.nsrs-detailed-list.{}".format(index): nsi_nsr_item}
+ self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
+ # continue to not create nslcmop since nsrs is shared and nsrs was created
+ continue
+ else:
+ self.add_shared_nsr_2vld(nsir, nsr_item)
+ # create operation
try:
- service = self.db.get_one("nsrs", {"_id": nsr_item["nsrId"]})
indata_ns = {
"lcmOperationType": operation,
- "nsInstanceId": service["_id"],
+ "nsInstanceId": nsr_id,
# Including netslice_id in the ns instantiate Operation
"netsliceInstanceId": netsliceInstanceId,
}
if operation == "instantiate":
+ service = self.db.get_one("nsrs", {"_id": nsr_id})
indata_ns.update(service["instantiate_params"])
# Creating NS_LCM_OP with the flag slice_object=True to not trigger the service instantiation
# message via kafka bus
- nslcmop, _ = self.nsi_NsLcmOpTopic.new(rollback, session, indata_ns, kwargs, headers,
+ nslcmop, _ = self.nsi_NsLcmOpTopic.new(rollback, session, indata_ns, None, headers,
slice_object=True)
nslcmops.append(nslcmop)
- if operation == "terminate":
- nslcmop = None
- _update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): nslcmop}
- self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
+ if operation == "instantiate":
+ _update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): nslcmop}
+ self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
except (DbException, EngineException) as e:
if e.http_code == HTTPStatus.NOT_FOUND:
- self.logger.info("HTTPStatus.NOT_FOUND")
+ self.logger.info(logging_prefix + "skipping NS={} because not found".format(nsr_id))
pass
else:
raise