from http import HTTPStatus
from time import time
from copy import copy, deepcopy
-from osm_nbi.validation import validate_input, ValidationError, ns_instantiate, ns_action, ns_scale, nsi_instantiate
-from osm_nbi.base_topic import BaseTopic, EngineException, get_iterable
+from osm_nbi.validation import validate_input, ValidationError, ns_instantiate, ns_terminate, ns_action, ns_scale,\
+ nsi_instantiate
+from osm_nbi.base_topic import BaseTopic, EngineException, get_iterable, deep_get
# from descriptor_topics import DescriptorTopic
from yaml import safe_dump
from osm_common.dbbase import DbException
class NsrTopic(BaseTopic):
topic = "nsrs"
topic_msg = "ns"
+ quota_name = "ns_instances"
schema_new = ns_instantiate
def __init__(self, db, fs, msg, auth):
"Launch 'terminate' operation first; or force deletion".format(_id),
http_code=HTTPStatus.CONFLICT)
- def delete_extra(self, session, _id, db_content):
+ def delete_extra(self, session, _id, db_content, not_send_msg=None):
"""
Deletes associated nslcmops and vnfrs from database. Deletes associated filesystem.
Set usageState of pdu, vnfd, nsd
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: server internal id
:param db_content: The database content of the descriptor
+ :param not_send_msg: To not send message (False) or store content (list) instead
:return: None if ok or raises EngineException with the problem
"""
self.fs.file_delete(_id, ignore_non_exist=True)
return formated_request
@staticmethod
- def _format_addional_params(ns_request, member_vnf_index=None, descriptor=None):
+ def _format_additional_params(ns_request, member_vnf_index=None, vdu_id=None, kdu_name=None, descriptor=None):
"""
Get and format user additional params for NS or VNF
:param ns_request: User instantiation additional parameters
:param member_vnf_index: None for extract NS params, or member_vnf_index to extract VNF params
:param descriptor: If not None it check that needed parameters of descriptor are supplied
- :return: a formated copy of additional params or None if not supplied
+ :return: tuple with a formatted copy of additional params or None if not supplied, plus other parameters
"""
additional_params = None
+ other_params = None
if not member_vnf_index:
additional_params = copy(ns_request.get("additionalParamsForNs"))
where_ = "additionalParamsForNs"
elif ns_request.get("additionalParamsForVnf"):
- for additionalParamsForVnf in get_iterable(ns_request.get("additionalParamsForVnf")):
- if additionalParamsForVnf["member-vnf-index"] == member_vnf_index:
- additional_params = copy(additionalParamsForVnf.get("additionalParams"))
- where_ = "additionalParamsForVnf[member-vnf-index={}]".format(
- additionalParamsForVnf["member-vnf-index"])
- break
+ where_ = "additionalParamsForVnf[member-vnf-index={}]".format(member_vnf_index)
+ item = next((x for x in ns_request["additionalParamsForVnf"] if x["member-vnf-index"] == member_vnf_index),
+ None)
+ if item:
+ if not vdu_id and not kdu_name:
+ other_params = item
+ additional_params = copy(item.get("additionalParams")) or {}
+ if vdu_id and item.get("additionalParamsForVdu"):
+ item_vdu = next((x for x in item["additionalParamsForVdu"] if x["vdu_id"] == vdu_id), None)
+ other_params = item_vdu
+ if item_vdu and item_vdu.get("additionalParams"):
+ where_ += ".additionalParamsForVdu[vdu_id={}]".format(vdu_id)
+ additional_params = item_vdu["additionalParams"]
+ if kdu_name:
+ additional_params = {}
+ if item.get("additionalParamsForKdu"):
+ item_kdu = next((x for x in item["additionalParamsForKdu"] if x["kdu_name"] == kdu_name), None)
+ other_params = item_kdu
+ if item_kdu and item_kdu.get("additionalParams"):
+ where_ += ".additionalParamsForKdu[kdu_name={}]".format(kdu_name)
+ additional_params = item_kdu["additionalParams"]
+
if additional_params:
for k, v in additional_params.items():
- # BEGIN Check that additional parameter names are valid Jinja2 identifiers
- if not match('^[a-zA-Z_][a-zA-Z0-9_]*$', k):
+ # BEGIN Check that additional parameter names are valid Jinja2 identifiers if target is not Kdu
+ if not kdu_name and not match('^[a-zA-Z_][a-zA-Z0-9_]*$', k):
raise EngineException("Invalid param name at {}:{}. Must contain only alphanumeric characters "
"and underscores, and cannot start with a digit"
.format(where_, k))
# check that enough parameters are supplied for the initial-config-primitive
# TODO: check for cloud-init
if member_vnf_index:
- if descriptor.get("vnf-configuration"):
- for initial_primitive in get_iterable(
- descriptor["vnf-configuration"].get("initial-config-primitive")):
- for param in get_iterable(initial_primitive.get("parameter")):
- if param["value"].startswith("<") and param["value"].endswith(">"):
- if param["value"] in ("<rw_mgmt_ip>", "<VDU_SCALE_INFO>"):
- continue
- if not additional_params or param["value"][1:-1] not in additional_params:
- raise EngineException("Parameter '{}' needed for vnfd[id={}]:vnf-configuration:"
- "initial-config-primitive[name={}] not supplied".
- format(param["value"], descriptor["id"],
- initial_primitive["name"]))
+ if kdu_name:
+ initial_primitives = None
+ elif vdu_id:
+ vdud = next(x for x in descriptor["vdu"] if x["id"] == vdu_id)
+ initial_primitives = deep_get(vdud, ("vdu-configuration", "initial-config-primitive"))
+ else:
+ initial_primitives = deep_get(descriptor, ("vnf-configuration", "initial-config-primitive"))
+ else:
+ initial_primitives = deep_get(descriptor, ("ns-configuration", "initial-config-primitive"))
- return additional_params
+ for initial_primitive in get_iterable(initial_primitives):
+ for param in get_iterable(initial_primitive.get("parameter")):
+ if param["value"].startswith("<") and param["value"].endswith(">"):
+ if param["value"] in ("<rw_mgmt_ip>", "<VDU_SCALE_INFO>", "<ns_config_info>"):
+ continue
+ if not additional_params or param["value"][1:-1] not in additional_params:
+ raise EngineException("Parameter '{}' needed for vnfd[id={}]:vnf-configuration:"
+ "initial-config-primitive[name={}] not supplied".
+ format(param["value"], descriptor["id"],
+ initial_primitive["name"]))
+
+ return additional_params or None, other_params or None
def new(self, rollback, session, indata=None, kwargs=None, headers=None):
"""
self._update_input_with_kwargs(ns_request, kwargs)
self._validate_input_new(ns_request, session["force"])
- # look for nsr
+ # look for nsd
step = "getting nsd id='{}' from database".format(ns_request.get("nsdId"))
_filter = self._get_project_filter(session)
_filter["_id"] = ns_request["nsdId"]
nsd = self.db.get_one("nsds", _filter)
del _filter["_id"]
+ # check NSD is not disabled
+ step = "checking nsdOperationalState"
+ if nsd["_admin"]["operationalState"] == "DISABLED":
+ raise EngineException("nsd with id '{}' is DISABLED, and thus cannot be used to create "
+ "a network service".format(ns_request["nsdId"]), http_code=HTTPStatus.CONFLICT)
+
nsr_id = str(uuid4())
now = time()
step = "filling nsr from input data"
+ additional_params, _ = self._format_additional_params(ns_request, descriptor=nsd)
+
+ # use for k8s-namespace from ns_request or additionalParamsForNs. By default, the project_id
+ ns_k8s_namespace = session["project_id"][0] if session["project_id"] else None
+ if ns_request and ns_request.get("k8s-namespace"):
+ ns_k8s_namespace = ns_request["k8s-namespace"]
+ if additional_params and additional_params.get("k8s-namespace"):
+ ns_k8s_namespace = additional_params["k8s-namespace"]
+
nsr_descriptor = {
"name": ns_request["nsName"],
"name-ref": ns_request["nsName"],
"short-name": ns_request["nsName"],
"admin-status": "ENABLED",
- "nsd": nsd,
+ "nsState": "NOT_INSTANTIATED",
+ "currentOperation": "IDLE",
+ "currentOperationID": None,
+ "errorDescription": None,
+ "errorDetail": None,
+ "deploymentStatus": None,
+ "configurationStatus": None,
+ "vcaStatus": None,
+ "nsd": {k: v for k, v in nsd.items() if k in ("vld", "_id", "id", "constituent-vnfd", "name",
+ "ns-configuration")},
"datacenter": ns_request["vimAccountId"],
"resource-orchestrator": "osmopenmano",
"description": ns_request.get("nsDescription", ""),
"nsd-id": nsd["_id"],
"vnfd-id": [],
"instantiate_params": self._format_ns_request(ns_request),
- "additionalParamsForNs": self._format_addional_params(ns_request),
+ "additionalParamsForNs": additional_params,
"ns-instance-config-ref": nsr_id,
"id": nsr_id,
"_id": nsr_id,
"ssh-authorized-key": ns_request.get("ssh_keys"), # TODO remove
}
ns_request["nsr_id"] = nsr_id
+ if ns_request and ns_request.get("config-units"):
+ nsr_descriptor["config-units"] = ns_request["config-units"]
+
# Create vld
if nsd.get("vld"):
- nsr_descriptor["vld"] = []
- for nsd_vld in nsd.get("vld"):
- nsr_descriptor["vld"].append(
- {key: nsd_vld[key] for key in ("id", "vim-network-name", "vim-network-id") if key in nsd_vld})
+ nsr_descriptor["vld"] = nsd["vld"]
# Create VNFR
needed_vnfds = {}
step = "filling vnfr vnfd-id='{}' constituent-vnfd='{}'".format(
member_vnf["vnfd-id-ref"], member_vnf["member-vnf-index"])
vnfr_id = str(uuid4())
+ additional_params, vnf_params = self._format_additional_params(ns_request,
+ member_vnf["member-vnf-index"],
+ descriptor=vnfd)
vnfr_descriptor = {
"id": vnfr_id,
"_id": vnfr_id,
"nsr-id-ref": nsr_id,
"member-vnf-index-ref": member_vnf["member-vnf-index"],
- "additionalParamsForVnf": self._format_addional_params(ns_request, member_vnf["member-vnf-index"],
- vnfd),
+ "additionalParamsForVnf": additional_params,
"created-time": now,
# "vnfd": vnfd, # at OSM model.but removed to avoid data duplication TODO: revise
"vnfd-ref": vnfd_id,
"connection-point": [],
"ip-address": None, # mgmt-interface filled by LCM
}
+ vnf_k8s_namespace = ns_k8s_namespace
+ if vnf_params:
+ if vnf_params.get("k8s-namespace"):
+ vnf_k8s_namespace = vnf_params["k8s-namespace"]
+ if vnf_params.get("config-units"):
+ vnfr_descriptor["config-units"] = vnf_params["config-units"]
# Create vld
if vnfd.get("internal-vld"):
# vim-id # TODO it would be nice having a vim port id
}
vnfr_descriptor["connection-point"].append(vnf_cp)
+
+ # Create k8s-cluster information
+ if vnfd.get("k8s-cluster"):
+ vnfr_descriptor["k8s-cluster"] = vnfd["k8s-cluster"]
+ for net in get_iterable(vnfr_descriptor["k8s-cluster"].get("nets")):
+ if net.get("external-connection-point-ref"):
+ for nsd_vld in get_iterable(nsd.get("vld")):
+ for nsd_vld_cp in get_iterable(nsd_vld.get("vnfd-connection-point-ref")):
+ if nsd_vld_cp.get("vnfd-connection-point-ref") == \
+ net["external-connection-point-ref"] and \
+ nsd_vld_cp.get("member-vnf-index-ref") == member_vnf["member-vnf-index"]:
+ net["ns-vld-id"] = nsd_vld["id"]
+ break
+ else:
+ continue
+ break
+ elif net.get("internal-connection-point-ref"):
+ for vnfd_ivld in get_iterable(vnfd.get("internal-vld")):
+ for vnfd_ivld_icp in get_iterable(vnfd_ivld.get("internal-connection-point")):
+ if vnfd_ivld_icp.get("id-ref") == net["internal-connection-point-ref"]:
+ net["vnf-vld-id"] = vnfd_ivld["id"]
+ break
+ else:
+ continue
+ break
+ # update kdus
+ for kdu in get_iterable(vnfd.get("kdu")):
+ additional_params, kdu_params = self._format_additional_params(ns_request,
+ member_vnf["member-vnf-index"],
+ kdu_name=kdu["name"],
+ descriptor=vnfd)
+ kdu_k8s_namespace = vnf_k8s_namespace
+ kdu_model = kdu_params.get("kdu_model") if kdu_params else None
+ if kdu_params and kdu_params.get("k8s-namespace"):
+ kdu_k8s_namespace = kdu_params["k8s-namespace"]
+
+ kdur = {
+ "additionalParams": additional_params,
+ "k8s-namespace": kdu_k8s_namespace,
+ "kdu-name": kdu["name"],
+ # TODO "name": "" Name of the VDU in the VIM
+ "ip-address": None, # mgmt-interface filled by LCM
+ "k8s-cluster": {},
+ }
+ if kdu_params and kdu_params.get("config-units"):
+ kdur["config-units"] = kdu_params["config-units"]
+ for k8s_type in ("helm-chart", "juju-bundle"):
+ if kdu.get(k8s_type):
+ kdur[k8s_type] = kdu_model or kdu[k8s_type]
+ if not vnfr_descriptor.get("kdur"):
+ vnfr_descriptor["kdur"] = []
+ vnfr_descriptor["kdur"].append(kdur)
+
for vdu in vnfd.get("vdu", ()):
+ additional_params, vdu_params = self._format_additional_params(
+ ns_request, member_vnf["member-vnf-index"], vdu_id=vdu["id"], descriptor=vnfd)
vdur = {
"vdu-id-ref": vdu["id"],
# TODO "name": "" Name of the VDU in the VIM
# "vim-id", "flavor-id", "image-id", "management-ip" # filled by LCM
"internal-connection-point": [],
"interfaces": [],
+ "additionalParams": additional_params
}
+ if vdu_params and vdu_params.get("config-units"):
+ vdur["config-units"] = vdu_params["config-units"]
+ if deep_get(vdu, ("supplemental-boot-data", "boot-data-drive")):
+ vdur["boot-data-drive"] = vdu["supplemental-boot-data"]["boot-data-drive"]
if vdu.get("pdu-type"):
vdur["pdu-type"] = vdu["pdu-type"]
+ vdur["name"] = vdu["pdu-type"]
# TODO volumes: name, volume-id
for icp in vdu.get("internal-connection-point", ()):
vdu_icp = {
return nsr_id, None
except (ValidationError, EngineException, DbException, MsgException, FsException) as e:
- raise type(e)("{} while '{}".format(e, step), http_code=e.http_code)
+ raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code)
def edit(self, session, _id, indata=None, kwargs=None, content=None):
raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
- def delete(self, session, _id, dry_run=False):
+ def delete(self, session, _id, dry_run=False, not_send_msg=None):
raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
def edit(self, session, _id, indata=None, kwargs=None, content=None):
"instantiate": ns_instantiate,
"action": ns_action,
"scale": ns_scale,
- "terminate": None,
+ "terminate": ns_terminate,
}
def __init__(self, db, fs, msg, auth):
else:
raise EngineException("Invalid parameter vdu_id='{}' not present at vnfd:vdu:id".format(vdu_id))
+ def check_valid_kdu(vnfd, kdu_name):
+ for kdud in get_iterable(vnfd.get("kdu")):
+ if kdud["name"] == kdu_name:
+ return kdud
+ else:
+ raise EngineException("Invalid parameter kdu_name='{}' not present at vnfd:kdu:name".format(kdu_name))
+
def _check_vnf_instantiation_params(in_vnfd, vnfd):
for in_vdu in get_iterable(in_vnfd.get("vdu")):
for in_ivld in get_iterable(in_vnfd.get("internal-vld")):
for ivld in get_iterable(vnfd.get("internal-vld")):
- if in_ivld["name"] == ivld["name"] or in_ivld["name"] == ivld["id"]:
+ if in_ivld["name"] in (ivld["id"], ivld.get("name")):
for in_icp in get_iterable(in_ivld.get("internal-connection-point")):
for icp in ivld["internal-connection-point"]:
if in_icp["id-ref"] == icp["id-ref"]:
"='{}']:internal-connection-point[id-ref:'{}'] is not present at "
"vnfd:internal-vld:name/id:internal-connection-point"
.format(in_vnf["member-vnf-index"], in_ivld["name"],
- in_icp["id-ref"], vnfd["id"]))
+ in_icp["id-ref"]))
break
else:
raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:internal-vld:name='{}'"
if indata.get("vdu_id"):
vdud = check_valid_vdu(vnfd, indata["vdu_id"])
descriptor_configuration = vdud.get("vdu-configuration", {}).get("config-primitive")
+ elif indata.get("kdu_name"):
+ kdud = check_valid_kdu(vnfd, indata["kdu_name"])
+ descriptor_configuration = kdud.get("kdu-configuration", {}).get("config-primitive")
else:
descriptor_configuration = vnfd.get("vnf-configuration", {}).get("config-primitive")
else: # use a NSD
descriptor_configuration = nsd.get("ns-configuration", {}).get("config-primitive")
- # check primitive
+
+ # For k8s allows default primitives without validating the parameters
+ if indata.get("kdu_name") and indata["primitive"] in ("upgrade", "rollback", "status", "inspect", "readme"):
+ # TODO should be checked that rollback only can contains revsision_numbe????
+ if not indata.get("member_vnf_index"):
+ raise EngineException("Missing action parameter 'member_vnf_index' for default KDU primitive '{}'"
+ .format(indata["primitive"]))
+ return
+ # if not, check primitive
for config_primitive in get_iterable(descriptor_configuration):
if indata["primitive"] == config_primitive["name"]:
# check needed primitive_params are provided
Look for a free PDU in the catalog matching vdur type and interfaces. Fills vnfr.vdur with the interface
(ip_address, ...) information.
Modifies PDU _admin.usageState to 'IN_USE'
-
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param rollback: list with the database modifications to rollback if needed
:param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found
vnfr_update[iface_text + ".{}".format(k)] = v
vnfr_update_rollback[iface_text + ".{}".format(k)] = vdur_interface.get(v)
if pdu_interface.get("ip-address"):
- if vdur_interface.get("mgmt-interface"):
+ if vdur_interface.get("mgmt-interface") or vdur_interface.get("mgmt-vnf"):
vnfr_update_rollback[vdu_text + ".ip-address"] = vdur.get("ip-address")
vnfr_update[vdu_text + ".ip-address"] = pdu_interface["ip-address"]
if vdur_interface.get("mgmt-vnf"):
vnfr_update_rollback["ip-address"] = vnfr.get("ip-address")
vnfr_update["ip-address"] = pdu_interface["ip-address"]
+ vnfr_update[vdu_text + ".ip-address"] = pdu_interface["ip-address"]
if pdu_interface.get("vim-network-name") or pdu_interface.get("vim-network-id"):
ifaces_forcing_vim_network.append({
"name": vdur_interface.get("vnf-vld-id") or vdur_interface.get("ns-vld-id"),
"vnf-vld-id": vdur_interface.get("vnf-vld-id"),
"ns-vld-id": vdur_interface.get("ns-vld-id")})
if pdu_interface.get("vim-network-id"):
- ifaces_forcing_vim_network.append({
- "vim-network-id": pdu_interface.get("vim-network-id")})
+ ifaces_forcing_vim_network[-1]["vim-network-id"] = pdu_interface["vim-network-id"]
if pdu_interface.get("vim-network-name"):
- ifaces_forcing_vim_network.append({
- "vim-network-name": pdu_interface.get("vim-network-name")})
+ ifaces_forcing_vim_network[-1]["vim-network-name"] = pdu_interface["vim-network-name"]
break
return ifaces_forcing_vim_network
+ def _look_for_k8scluster(self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback):
+ """
+ Look for an available k8scluster for all the kuds in the vnfd matching version and cni requirements.
+ Fills vnfr.kdur with the selected k8scluster
+
+ :param session: contains "username", "admin", "force", "public", "project_id", "set_project"
+ :param rollback: list with the database modifications to rollback if needed
+ :param vnfr: vnfr to be updated. It is modified with pdu interface info if pdu is found
+ :param vim_account: vim_account where this vnfr should be deployed
+ :param vnfr_update: dictionary filled by this method with changes to be done at database vnfr
+ :param vnfr_update_rollback: dictionary filled by this method with original content of vnfr in case a rollback
+ of the changed vnfr is needed
+
+ :return: List of KDU interfaces that are connected to an existing VIM network. Each item contains:
+ "vim-network-name": used at VIM
+ "name": interface name
+ "vnf-vld-id": internal VNFD vld where this interface is connected, or
+ "ns-vld-id": NSD vld where this interface is connected.
+ NOTE: One, and only one between 'vnf-vld-id' and 'ns-vld-id' contains a value. The other will be None
+ """
+
+ ifaces_forcing_vim_network = []
+ if not vnfr.get("kdur"):
+ return ifaces_forcing_vim_network
+
+ kdu_filter = self._get_project_filter(session)
+ kdu_filter["vim_account"] = vim_account
+ # TODO kdu_filter["_admin.operationalState"] = "ENABLED"
+ available_k8sclusters = self.db.get_list("k8sclusters", kdu_filter)
+
+ k8s_requirements = {} # just for logging
+ for k8scluster in available_k8sclusters:
+ if not vnfr.get("k8s-cluster"):
+ break
+ # restrict by cni
+ if vnfr["k8s-cluster"].get("cni"):
+ k8s_requirements["cni"] = vnfr["k8s-cluster"]["cni"]
+ if not set(vnfr["k8s-cluster"]["cni"]).intersection(k8scluster.get("cni", ())):
+ continue
+ # restrict by version
+ if vnfr["k8s-cluster"].get("version"):
+ k8s_requirements["version"] = vnfr["k8s-cluster"]["version"]
+ if k8scluster.get("k8s_version") not in vnfr["k8s-cluster"]["version"]:
+ continue
+ # restrict by number of networks
+ if vnfr["k8s-cluster"].get("nets"):
+ k8s_requirements["networks"] = len(vnfr["k8s-cluster"]["nets"])
+ if not k8scluster.get("nets") or len(k8scluster["nets"]) < len(vnfr["k8s-cluster"]["nets"]):
+ continue
+ break
+ else:
+ raise EngineException("No k8scluster with requirements='{}' at vim_account={} found for member_vnf_index={}"
+ .format(k8s_requirements, vim_account, vnfr["member-vnf-index-ref"]))
+
+ for kdur_index, kdur in enumerate(get_iterable(vnfr.get("kdur"))):
+ # step 3. Fill vnfr info by filling kdur
+ kdu_text = "kdur.{}.".format(kdur_index)
+ vnfr_update_rollback[kdu_text + "k8s-cluster.id"] = None
+ vnfr_update[kdu_text + "k8s-cluster.id"] = k8scluster["_id"]
+
+ # step 4. Check VIM networks that forces the selected k8s_cluster
+ if vnfr.get("k8s-cluster") and vnfr["k8s-cluster"].get("nets"):
+ k8scluster_net_list = list(k8scluster.get("nets").keys())
+ for net_index, kdur_net in enumerate(vnfr["k8s-cluster"]["nets"]):
+ # get a network from k8s_cluster nets. If name matches use this, if not use other
+ if kdur_net["id"] in k8scluster_net_list: # name matches
+ vim_net = k8scluster["nets"][kdur_net["id"]]
+ k8scluster_net_list.remove(kdur_net["id"])
+ else:
+ vim_net = k8scluster["nets"][k8scluster_net_list[0]]
+ k8scluster_net_list.pop(0)
+ vnfr_update_rollback["k8s-cluster.nets.{}.vim_net".format(net_index)] = None
+ vnfr_update["k8s-cluster.nets.{}.vim_net".format(net_index)] = vim_net
+ if vim_net and (kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id")):
+ ifaces_forcing_vim_network.append({
+ "name": kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id"),
+ "vnf-vld-id": kdur_net.get("vnf-vld-id"),
+ "ns-vld-id": kdur_net.get("ns-vld-id"),
+ "vim-network-name": vim_net, # TODO can it be vim-network-id ???
+ })
+ # TODO check that this forcing is not incompatible with other forcing
+ return ifaces_forcing_vim_network
+
def _update_vnfrs(self, session, rollback, nsr, indata):
- vnfrs = None
# get vnfr
nsr_id = nsr["_id"]
vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
ifaces_forcing_vim_network = self._look_for_pdu(session, rollback, vnfr, vim_account, vnfr_update,
vnfr_update_rollback)
- # updata database vnfr
+ # get kdus
+ ifaces_forcing_vim_network += self._look_for_k8scluster(session, rollback, vnfr, vim_account, vnfr_update,
+ vnfr_update_rollback)
+ # update database vnfr
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update)
rollback.append({"topic": "vnfrs", "_id": vnfr["_id"], "operation": "set", "content": vnfr_update_rollback})
"id": _id,
"_id": _id,
"operationState": "PROCESSING", # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
+ "queuePosition": None,
+ "stage": None,
+ "errorMessage": None,
+ "detailedStatus": None,
"statusEnteredTime": now,
"nsInstanceId": nsr_id,
"lcmOperationType": operation,
}
return nslcmop
+ def _get_enabled_vims(self, session):
+ """
+ Retrieve and return VIM accounts that are accessible by current user and has state ENABLE
+ :param session: current session with user information
+ """
+ db_filter = self._get_project_filter(session)
+ db_filter["_admin.operationalState"] = "ENABLED"
+ vims = self.db.get_list("vim_accounts", db_filter)
+ vimAccounts = []
+ for vim in vims:
+ vimAccounts.append(vim['_id'])
+ return vimAccounts
+
def new(self, rollback, session, indata=None, kwargs=None, headers=None, slice_object=False):
"""
Performs a new operation over a ns
try:
# Override descriptor with query string kwargs
- self._update_input_with_kwargs(indata, kwargs)
+ self._update_input_with_kwargs(indata, kwargs, yaml_format=True)
operation = indata["lcmOperationType"]
nsInstanceId = indata["nsInstanceId"]
nslcmop_desc = self._create_nslcmop(nsInstanceId, operation, indata)
_id = nslcmop_desc["_id"]
self.format_on_new(nslcmop_desc, session["project_id"], make_public=session["public"])
+ if indata.get("placement-engine"):
+ # Save valid vim accounts in lcm operation descriptor
+ nslcmop_desc['operationParams']['validVimAccounts'] = self._get_enabled_vims(session)
self.db.create("nslcmops", nslcmop_desc)
rollback.append({"topic": "nslcmops", "_id": _id})
if not slice_object:
# except DbException as e:
# raise EngineException("Cannot get ns_instance '{}': {}".format(e), HTTPStatus.NOT_FOUND)
- def delete(self, session, _id, dry_run=False):
+ def delete(self, session, _id, dry_run=False, not_send_msg=None):
raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
def edit(self, session, _id, indata=None, kwargs=None, content=None):
class NsiTopic(BaseTopic):
topic = "nsis"
topic_msg = "nsi"
+ quota_name = "slice_instances"
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
"Launch 'terminate' operation first; or force deletion".format(_id),
http_code=HTTPStatus.CONFLICT)
- def delete_extra(self, session, _id, db_content):
+ def delete_extra(self, session, _id, db_content, not_send_msg=None):
"""
Deletes associated nsilcmops from database. Deletes associated filesystem.
Set usageState of nst
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: server internal id
:param db_content: The database content of the descriptor
+ :param not_send_msg: To not send message (False) or store content (list) instead
:return: None if ok or raises EngineException with the problem
"""
if nsi: # last one using nsr
continue
try:
- self.nsrTopic.delete(session, nsr_id, dry_run=False)
+ self.nsrTopic.delete(session, nsr_id, dry_run=False, not_send_msg=not_send_msg)
except (DbException, EngineException) as e:
if e.http_code == HTTPStatus.NOT_FOUND:
pass
nstd = self.db.get_one("nsts", _filter)
del _filter["_id"]
+ # check NSD is not disabled
+ step = "checking operationalState"
+ if nstd["_admin"]["operationalState"] == "DISABLED":
+ raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create "
+ "a network slice".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT)
+
nstd.pop("_admin", None)
nstd_id = nstd.pop("_id", None)
nsi_id = str(uuid4())
nsi_vlds.append(nsi_vld)
nsi_descriptor["_admin"]["netslice-vld"] = nsi_vlds
- # Creating netslice-subnet_record.
+ # Creating netslice-subnet_record.
needed_nsds = {}
services = []
if nstId == netslice_subnet["id"]:
nsd_id = netslice_subnet["nsd-ref"]
if nsd_id not in nsds:
- nsds[nsd_id] = self.db.get_one("nsds", {"id": nsd_id})
+ _filter = self._get_project_filter(session)
+ _filter["id"] = nsd_id
+ nsds[nsd_id] = self.db.get_one("nsds", _filter)
return nsds[nsd_id]
else:
raise EngineException("Invalid parameter nstId='{}' is not one of the "
try:
service = self.db.get_one("nsrs", {"_id": nsr_item["nsrId"]})
- indata_ns = {}
- indata_ns = service["instantiate_params"]
- indata_ns["lcmOperationType"] = operation
- indata_ns["nsInstanceId"] = service["_id"]
- # Including netslice_id in the ns instantiate Operation
- indata_ns["netsliceInstanceId"] = netsliceInstanceId
+ indata_ns = {
+ "lcmOperationType": operation,
+ "nsInstanceId": service["_id"],
+ # Including netslice_id in the ns instantiate Operation
+ "netsliceInstanceId": netsliceInstanceId,
+ }
+ if operation == "instantiate":
+ indata_ns.update(service["instantiate_params"])
+
# Creating NS_LCM_OP with the flag slice_object=True to not trigger the service instantiation
# message via kafka bus
nslcmop, _ = self.nsi_NsLcmOpTopic.new(rollback, session, indata_ns, kwargs, headers,
except ValidationError as e:
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY)
- def delete(self, session, _id, dry_run=False):
+ def delete(self, session, _id, dry_run=False, not_send_msg=None):
raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
def edit(self, session, _id, indata=None, kwargs=None, content=None):