import logging.handlers
import functools
import traceback
+from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
-import ROclient
-from lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase
+from osm_lcm import ROclient
+from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase
from osm_common.dbbase import DbException
from osm_common.fsbase import FsException
-from n2vc.vnf import N2VC
+from n2vc.vnf import N2VC, N2VCPrimitiveExecutionFailed, NetworkServiceDoesNotExist, PrimitiveDoesNotExist
from copy import copy, deepcopy
from http import HTTPStatus
def populate_dict(target_dict, key_list, value):
"""
- Upate target_dict creating nested dictionaries with the key_list. Last key_list item is asigned the value.
+ Update target_dict creating nested dictionaries with the key_list. Last key_list item is asigned the value.
Example target_dict={K: J}; key_list=[a,b,c]; target_dict will be {K: J, a: {b: {c: value}}}
:param target_dict: dictionary to be changed
:param key_list: list of keys to insert at target_dict
target_dict[key_list[-1]] = value
+def deep_get(target_dict, key_list):
+ """
+ Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None
+ Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None
+ :param target_dict: dictionary to be read
+ :param key_list: list of keys to read from target_dict
+ :return: The wanted value if exist, None otherwise
+ """
+ for key in key_list:
+ if not isinstance(target_dict, dict) or key not in target_dict:
+ return None
+ target_dict = target_dict[key]
+ return target_dict
+
+
class NsLcm(LcmBase):
timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
total_deploy_timeout = 2 * 3600 # global timeout for deployment
+ timeout_charm_delete = 10 * 60
+ timeout_primitive = 10 * 60 # timeout for primitive execution
+
+ SUBOPERATION_STATUS_NOT_FOUND = -1
+ SUBOPERATION_STATUS_NEW = -2
+ SUBOPERATION_STATUS_SKIP = -3
def __init__(self, db, msg, fs, lcm_tasks, ro_config, vca_config, loop):
"""
# it unset and pass it via DeployCharms
# artifacts=vca_config[''],
artifacts=None,
+ juju_public_key=vca_config.get('pubkey'),
+ ca_cert=vca_config.get('cacert'),
+ api_proxy=vca_config.get('apiproxy')
)
+ self.RO = ROclient.ROClient(self.loop, **self.ro_config)
- def vnfd2RO(self, vnfd, new_id=None):
+ def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
"""
Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
:param vnfd: input vnfd
:param new_id: overrides vnf id if provided
+ :param additionalParams: Instantiation params for VNFs provided
+ :param nsrId: Id of the NSR
:return: copy of vnfd
"""
- ci_file = None
try:
vnfd_RO = deepcopy(vnfd)
+ # remove unused by RO configuration, monitoring, scaling and internal keys
vnfd_RO.pop("_id", None)
vnfd_RO.pop("_admin", None)
+ vnfd_RO.pop("vnf-configuration", None)
+ vnfd_RO.pop("monitoring-param", None)
+ vnfd_RO.pop("scaling-group-descriptor", None)
if new_id:
vnfd_RO["id"] = new_id
- for vdu in vnfd_RO.get("vdu", ()):
- if "cloud-init-file" in vdu:
+
+ # parse cloud-init or cloud-init-file with the provided variables using Jinja2
+ for vdu in get_iterable(vnfd_RO, "vdu"):
+ cloud_init_file = None
+ if vdu.get("cloud-init-file"):
base_folder = vnfd["_admin"]["storage"]
- clout_init_file = "{}/{}/cloud_init/{}".format(
- base_folder["folder"],
- base_folder["pkg-dir"],
- vdu["cloud-init-file"]
- )
- ci_file = self.fs.file_open(clout_init_file, "r")
- # TODO: detect if binary or text. Propose to read as binary and try to decode to utf8. If fails
- # convert to base 64 or similar
- clout_init_content = ci_file.read()
- ci_file.close()
- ci_file = None
+ cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
+ vdu["cloud-init-file"])
+ with self.fs.file_open(cloud_init_file, "r") as ci_file:
+ cloud_init_content = ci_file.read()
vdu.pop("cloud-init-file", None)
- vdu["cloud-init"] = clout_init_content
- # remnove unused by RO configuration, monitoring, scaling
- vnfd_RO.pop("vnf-configuration", None)
- vnfd_RO.pop("monitoring-param", None)
- vnfd_RO.pop("scaling-group-descriptor", None)
+ elif vdu.get("cloud-init"):
+ cloud_init_content = vdu["cloud-init"]
+ else:
+ continue
+
+ env = Environment()
+ ast = env.parse(cloud_init_content)
+ mandatory_vars = meta.find_undeclared_variables(ast)
+ if mandatory_vars:
+ for var in mandatory_vars:
+ if not additionalParams or var not in additionalParams.keys():
+ raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
+ "file, must be provided in the instantiation parameters inside the "
+ "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
+ template = Template(cloud_init_content)
+ cloud_init_content = template.render(additionalParams or {})
+ vdu["cloud-init"] = cloud_init_content
+
return vnfd_RO
except FsException as e:
- raise LcmException("Error reading file at vnfd {}: {} ".format(vnfd["_id"], e))
- finally:
- if ci_file:
- ci_file.close()
+ raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
+ format(vnfd["id"], vdu["id"], cloud_init_file, e))
+ except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
+ raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
+ format(vnfd["id"], vdu["id"], e))
def n2vc_callback(self, model_name, application_name, status, message, n2vc_info, task=None):
"""
if model_name == vca_deployed["model"] and application_name == vca_deployed["application"]:
break
else:
- self.logger.error(logging_text + " Not present at nsr._admin.deployed.VCA")
+ self.logger.error(logging_text + " Not present at nsr._admin.deployed.VCA. Received model_name={}".
+ format(model_name))
return
if task:
if task.cancelled():
:return: The RO ns descriptor
"""
vim_2_RO = {}
+ wim_2_RO = {}
# TODO feature 1417: Check that no instantiation is set over PDU
# check if PDU forces a concrete vim-network-id and add it
# check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
vim_2_RO[vim_account] = RO_vim_id
return RO_vim_id
+ def wim_account_2_RO(wim_account):
+ if isinstance(wim_account, str):
+ if wim_account in wim_2_RO:
+ return wim_2_RO[wim_account]
+
+ db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
+ if db_wim["_admin"]["operationalState"] != "ENABLED":
+ raise LcmException("WIM={} is not available. operationalState={}".format(
+ wim_account, db_wim["_admin"]["operationalState"]))
+ RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
+ wim_2_RO[wim_account] = RO_wim_id
+ return RO_wim_id
+ else:
+ return wim_account
+
def ip_profile_2_RO(ip_profile):
RO_ip_profile = deepcopy((ip_profile))
if "dns-server" in RO_ip_profile:
# "name": ns_params["nsName"],
# "description": ns_params.get("nsDescription"),
"datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
+ "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
# "scenario": ns_params["nsdId"],
}
- if n2vc_key_list:
- for vnfd_ref, vnfd in vnfd_dict.items():
- vdu_needed_access = []
- mgmt_cp = None
- if vnfd.get("vnf-configuration"):
- if vnfd.get("mgmt-interface"):
- if vnfd["mgmt-interface"].get("vdu-id"):
- vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
- elif vnfd["mgmt-interface"].get("cp"):
- mgmt_cp = vnfd["mgmt-interface"]["cp"]
-
- for vdu in vnfd.get("vdu", ()):
- if vdu.get("vdu-configuration"):
+ n2vc_key_list = n2vc_key_list or []
+ for vnfd_ref, vnfd in vnfd_dict.items():
+ vdu_needed_access = []
+ mgmt_cp = None
+ if vnfd.get("vnf-configuration"):
+ ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
+ if ssh_required and vnfd.get("mgmt-interface"):
+ if vnfd["mgmt-interface"].get("vdu-id"):
+ vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
+ elif vnfd["mgmt-interface"].get("cp"):
+ mgmt_cp = vnfd["mgmt-interface"]["cp"]
+
+ for vdu in vnfd.get("vdu", ()):
+ if vdu.get("vdu-configuration"):
+ ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
+ if ssh_required:
vdu_needed_access.append(vdu["id"])
- elif mgmt_cp:
- for vdu_interface in vdu.get("interface"):
- if vdu_interface.get("external-connection-point-ref") and \
- vdu_interface["external-connection-point-ref"] == mgmt_cp:
- vdu_needed_access.append(vdu["id"])
- mgmt_cp = None
- break
+ elif mgmt_cp:
+ for vdu_interface in vdu.get("interface"):
+ if vdu_interface.get("external-connection-point-ref") and \
+ vdu_interface["external-connection-point-ref"] == mgmt_cp:
+ vdu_needed_access.append(vdu["id"])
+ mgmt_cp = None
+ break
- if vdu_needed_access:
- for vnf_member in nsd.get("constituent-vnfd"):
- if vnf_member["vnfd-id-ref"] != vnfd_ref:
- continue
- for vdu in vdu_needed_access:
- populate_dict(RO_ns_params,
- ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
- n2vc_key_list)
+ if vdu_needed_access:
+ for vnf_member in nsd.get("constituent-vnfd"):
+ if vnf_member["vnfd-id-ref"] != vnfd_ref:
+ continue
+ for vdu in vdu_needed_access:
+ populate_dict(RO_ns_params,
+ ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
+ n2vc_key_list)
if ns_params.get("vduImage"):
RO_ns_params["vduImage"] = ns_params["vduImage"]
populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
internal_vld_params["name"], "vim-network-name"),
internal_vld_params["vim-network-name"])
+ if internal_vld_params.get("vim-network-id"):
+ populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
+ internal_vld_params["name"], "vim-network-id"),
+ internal_vld_params["vim-network-id"])
if internal_vld_params.get("ip-profile"):
populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
internal_vld_params["name"], "ip-profile"),
ip_profile_2_RO(internal_vld_params["ip-profile"]))
+ if internal_vld_params.get("provider-network"):
+
+ populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
+ internal_vld_params["name"], "provider-network"),
+ internal_vld_params["provider-network"].copy())
for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
# look for interface
if "ip-profile" in vld_params:
populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
ip_profile_2_RO(vld_params["ip-profile"]))
+
+ if vld_params.get("provider-network"):
+
+ populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
+ vld_params["provider-network"].copy())
+
+ if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
+ populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
+ wim_account_2_RO(vld_params["wimAccountId"])),
if vld_params.get("vim-network-name"):
RO_vld_sites = []
if isinstance(vld_params["vim-network-name"], dict):
RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
if RO_vld_sites:
populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
+
+ if vld_params.get("vim-network-id"):
+ RO_vld_sites = []
+ if isinstance(vld_params["vim-network-id"], dict):
+ for vim_account, vim_net in vld_params["vim-network-id"].items():
+ RO_vld_sites.append({
+ "netmap-use": vim_net,
+ "datacenter": vim_account_2_RO(vim_account)
+ })
+ else: # isinstance str
+ RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
+ if RO_vld_sites:
+ populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
+ if vld_params.get("ns-net"):
+ if isinstance(vld_params["ns-net"], dict):
+ for vld_id, instance_scenario_id in vld_params["ns-net"].items():
+ RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
+ if RO_vld_ns_net:
+ populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
if "vnfd-connection-point-ref" in vld_params:
for cp_params in vld_params["vnfd-connection-point-ref"]:
# look for interface
continue
vnfr_update = {}
if vnf_RO.get("ip_address"):
- db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"]
+ db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
elif not db_vnfr.get("ip-address"):
raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
vdur_RO_count_index += 1
continue
vdur["vim-id"] = vdur_RO.get("vim_vm_id")
- vdur["ip-address"] = vdur_RO.get("ip_address")
+ if vdur_RO.get("ip_address"):
+ vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
+ else:
+ vdur["ip-address"] = None
vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
vdur["name"] = vdur_RO.get("vim_name")
vdur["status"] = vdur_RO.get("status")
break
else:
raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
- "at RO info".format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
+ "from VIM info".format(vnf_index, vdur["vdu-id-ref"],
+ ifacer["name"]))
vnfr_update["vdur.{}".format(vdu_index)] = vdur
break
else:
- raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} at "
- "RO info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
+ raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
+ "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
for net_RO in get_iterable(nsr_desc_RO, "nets"):
vnfr_update["vld.{}".format(vld_index)] = vld
break
else:
- raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} at RO info".format(
+ raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
vnf_index, vld["id"]))
self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
break
else:
- raise LcmException("ns_update_vnfr: Not found member_vnf_index={} at RO info".format(vnf_index))
+ raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
+
+ @staticmethod
+ def _get_ns_config_info(vca_deployed_list):
+ """
+ Generates a mapping between vnf,vdu elements and the N2VC id
+ :param vca_deployed_list: List of database _admin.deploy.VCA that contains this list
+ :return: a dictionary with {osm-config-mapping: {}} where its element contains:
+ "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
+ "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
+ """
+ mapping = {}
+ ns_config_info = {"osm-config-mapping": mapping}
+ for vca in vca_deployed_list:
+ if not vca["member-vnf-index"]:
+ continue
+ if not vca["vdu_id"]:
+ mapping[vca["member-vnf-index"]] = vca["application"]
+ else:
+ mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
+ vca["application"]
+ return ns_config_info
+
+ @staticmethod
+ def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed):
+ """
+ Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
+ primitives as verify-ssh-credentials, or config when needed
+ :param desc_primitive_list: information of the descriptor
+ :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
+ this element contains a ssh public key
+ :return: The modified list. Can ba an empty list, but always a list
+ """
+ if desc_primitive_list:
+ primitive_list = desc_primitive_list.copy()
+ else:
+ primitive_list = []
+ # look for primitive config, and get the position. None if not present
+ config_position = None
+ for index, primitive in enumerate(primitive_list):
+ if primitive["name"] == "config":
+ config_position = index
+ break
+
+ # for NS, add always a config primitive if not present (bug 874)
+ if not vca_deployed["member-vnf-index"] and config_position is None:
+ primitive_list.insert(0, {"name": "config", "parameter": []})
+ config_position = 0
+ # for VNF/VDU add verify-ssh-credentials after config
+ if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
+ primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
+ return primitive_list
async def instantiate(self, nsr_id, nslcmop_id):
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
self.logger.debug(logging_text + "Enter")
# get all needed from database
nslcmop_operation_state = None
db_vnfrs = {}
RO_descriptor_number = 0 # number of descriptors created at RO
- descriptor_id_2_RO = {} # map between vnfd/nsd id to the id used at RO
+ vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
n2vc_info = {}
+ n2vc_key_list = [] # list of public keys to be injected as authorized to VMs
exc = None
try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
step = "Getting nslcmop={} from db".format(nslcmop_id)
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
step = "Getting nsr={} from db".format(nsr_id)
nsd = db_nsr["nsd"]
nsr_name = db_nsr["name"] # TODO short-name??
- # look if previous tasks in process
- task_name, task_dependency = self.lcm_tasks.lookfor_related("ns", nsr_id, nslcmop_id)
- if task_dependency:
- step = db_nslcmop_update["detailed-status"] = \
- "Waiting for related tasks to be completed: {}".format(task_name)
- self.logger.debug(logging_text + step)
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- _, pending = await asyncio.wait(task_dependency, timeout=3600)
- if pending:
- raise LcmException("Timeout waiting related tasks to be completed")
-
step = "Getting vnfrs from db"
db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
db_vnfds_ref = {}
db_vnfds = {}
+ db_vnfds_index = {}
for vnfr in db_vnfrs_list:
db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
vnfd_id = vnfr["vnfd-id"]
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
db_vnfds_ref[vnfd_ref] = vnfd
db_vnfds[vnfd_id] = vnfd
+ db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id]
# Get or generates the _admin.deployed,VCA list
vca_deployed_list = None
+ vca_model_name = None
if db_nsr["_admin"].get("deployed"):
vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
+ vca_model_name = db_nsr["_admin"]["deployed"].get("VCA-model-name")
if vca_deployed_list is None:
vca_deployed_list = []
db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
elif isinstance(vca_deployed_list, dict):
# maintain backward compatibility. Change a dict to list at database
vca_deployed_list = list(vca_deployed_list.values())
db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
db_nsr_update["detailed-status"] = "creating"
db_nsr_update["operational-status"] = "init"
+ if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
+ populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
+ db_nsr_update["_admin.deployed.RO.vnfd"] = []
+
+ # set state to INSTANTIATED. When instantiated NBI will not delete directly
+ db_nsr_update["_admin.nsState"] = "INSTANTIATED"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
- RO = ROclient.ROClient(self.loop, **self.ro_config)
+ # Deploy charms
+ # The parameters we'll need to deploy a charm
+ number_to_configure = 0
+ def deploy_charm(vnf_index, vdu_id, vdu_name, vdu_count_index, charm_params, n2vc_info, native_charm=False):
+ """An inner function to deploy the charm from either ns, vnf or vdu
+ For ns both vnf_index and vdu_id are None.
+ For vnf only vdu_id is None
+ For vdu both vnf_index and vdu_id contain a value
+ """
+ # if not charm_params.get("rw_mgmt_ip") and vnf_index: # if NS skip mgmt_ip checking
+ # raise LcmException("ns/vnfd/vdu has not management ip address to configure it")
+
+ machine_spec = {}
+ if native_charm:
+ machine_spec["username"] = charm_params.get("username")
+ machine_spec["hostname"] = charm_params.get("rw_mgmt_ip")
+
+ # Note: The charm needs to exist on disk at the location
+ # specified by charm_path.
+ descriptor = vnfd if vnf_index else nsd
+ base_folder = descriptor["_admin"]["storage"]
+ storage_params = self.fs.get_params()
+ charm_path = "{}{}/{}/charms/{}".format(
+ storage_params["path"],
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ proxy_charm
+ )
+
+ # ns_name will be ignored in the current version of N2VC
+ # but will be implemented for the next point release.
+ model_name = nsr_id
+ vdu_id_text = (str(vdu_id) if vdu_id else "") + "-"
+ vnf_index_text = (str(vnf_index) if vnf_index else "") + "-"
+ application_name = self.n2vc.FormatApplicationName(nsr_name, vnf_index_text, vdu_id_text)
+
+ vca_index = len(vca_deployed_list)
+ # trunk name and add two char index at the end to ensure that it is unique. It is assumed no more than
+ # 26*26 charm in the same NS
+ application_name = application_name[0:48]
+ application_name += chr(97 + vca_index // 26) + chr(97 + vca_index % 26)
+ vca_deployed_ = {
+ "member-vnf-index": vnf_index,
+ "vdu_id": vdu_id,
+ "model": model_name,
+ "application": application_name,
+ "operational-status": "init",
+ "detailed-status": "",
+ "step": "initial-deploy",
+ "vnfd_id": vnfd_id,
+ "vdu_name": vdu_name,
+ "vdu_count_index": vdu_count_index,
+ }
+ vca_deployed_list.append(vca_deployed_)
+ db_nsr_update["_admin.deployed.VCA.{}".format(vca_index)] = vca_deployed_
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ self.logger.debug("Task create_ns={} Passing artifacts path '{}' for {}".format(nsr_id, charm_path,
+ proxy_charm))
+ if not n2vc_info:
+ n2vc_info["nsr_id"] = nsr_id
+ n2vc_info["nslcmop_id"] = nslcmop_id
+ n2vc_info["n2vc_event"] = asyncio.Event(loop=self.loop)
+ n2vc_info["lcmOperationType"] = "instantiate"
+ n2vc_info["deployed"] = vca_deployed_list
+ n2vc_info["db_update"] = db_nsr_update
+ task = asyncio.ensure_future(
+ self.n2vc.DeployCharms(
+ model_name, # The network service name
+ application_name, # The application name
+ descriptor, # The vnf/nsd descriptor
+ charm_path, # Path to charm
+ charm_params, # Runtime params, like mgmt ip
+ machine_spec, # for native charms only
+ self.n2vc_callback, # Callback for status changes
+ n2vc_info, # Callback parameter
+ None, # Callback parameter (task)
+ )
+ )
+ task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, None, None,
+ n2vc_info))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "create_charm:" + application_name, task)
+
+ step = "Looking for needed vnfd to configure with proxy charm"
+ self.logger.debug(logging_text + step)
+
+ for c_vnf in get_iterable(nsd, "constituent-vnfd"):
+ vnfd_id = c_vnf["vnfd-id-ref"]
+ vnf_index = str(c_vnf["member-vnf-index"])
+ vnfd = db_vnfds_ref[vnfd_id]
+
+ # Get additional parameters
+ vnfr_params = {}
+ if db_vnfrs[vnf_index].get("additionalParamsForVnf"):
+ vnfr_params = db_vnfrs[vnf_index]["additionalParamsForVnf"].copy()
+ for k, v in vnfr_params.items():
+ if isinstance(v, str) and v.startswith("!!yaml "):
+ vnfr_params[k] = yaml.safe_load(v[7:])
+
+ step = "deploying proxy charms for configuration"
+ # Check if this VNF has a charm configuration
+ vnf_config = vnfd.get("vnf-configuration")
+ if vnf_config and vnf_config.get("juju"):
+ proxy_charm = vnf_config["juju"]["charm"]
+ if vnf_config["juju"].get("proxy") is False:
+ # native_charm, will be deployed after VM. Skip
+ proxy_charm = None
+
+ if proxy_charm:
+ if not vca_model_name:
+ step = "creating VCA model name '{}'".format(nsr_id)
+ self.logger.debug(logging_text + step)
+ await self.n2vc.CreateNetworkService(nsr_id)
+ vca_model_name = nsr_id
+ db_nsr_update["_admin.deployed.VCA-model-name"] = nsr_id
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ step = "deploying proxy charm to configure vnf {}".format(vnf_index)
+ vnfr_params["rw_mgmt_ip"] = db_vnfrs[vnf_index]["ip-address"]
+ charm_params = {
+ "user_values": vnfr_params,
+ "rw_mgmt_ip": db_vnfrs[vnf_index]["ip-address"],
+ "initial-config-primitive": {} # vnf_config.get('initial-config-primitive') or {}
+ }
+
+ # Login to the VCA. If there are multiple calls to login(),
+ # subsequent calls will be a nop and return immediately.
+ await self.n2vc.login()
+
+ deploy_charm(vnf_index, None, None, None, charm_params, n2vc_info)
+ number_to_configure += 1
+
+ # Deploy charms for each VDU that supports one.
+ for vdu_index, vdu in enumerate(get_iterable(vnfd, 'vdu')):
+ vdu_config = vdu.get('vdu-configuration')
+ proxy_charm = None
+
+ if vdu_config and vdu_config.get("juju"):
+ proxy_charm = vdu_config["juju"]["charm"]
+ if vdu_config["juju"].get("proxy") is False:
+ # native_charm, will be deployed after VM. Skip
+ proxy_charm = None
+ if proxy_charm:
+ if not vca_model_name:
+ step = "creating VCA model name"
+ await self.n2vc.CreateNetworkService(nsr_id)
+ vca_model_name = nsr_id
+ db_nsr_update["_admin.deployed.VCA-model-name"] = nsr_id
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ step = "deploying proxy charm to configure member_vnf_index={} vdu={}".format(vnf_index,
+ vdu["id"])
+ await self.n2vc.login()
+ vdur = db_vnfrs[vnf_index]["vdur"][vdu_index]
+ # TODO for the moment only first vdu_id contains a charm deployed
+ if vdur["vdu-id-ref"] != vdu["id"]:
+ raise LcmException("Mismatch vdur {}, vdu {} at index {} for member_vnf_index={}"
+ .format(vdur["vdu-id-ref"], vdu["id"], vdu_index, vnf_index))
+ vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
+ charm_params = {
+ "user_values": vnfr_params,
+ "rw_mgmt_ip": vdur["ip-address"],
+ "initial-config-primitive": {} # vdu_config.get('initial-config-primitive') or {}
+ }
+ deploy_charm(vnf_index, vdu["id"], vdur.get("name"), vdur["count-index"],
+ charm_params, n2vc_info)
+ number_to_configure += 1
+
+ # Check if this NS has a charm configuration
+
+ ns_config = nsd.get("ns-configuration")
+ if ns_config and ns_config.get("juju"):
+ proxy_charm = ns_config["juju"]["charm"]
+ if ns_config["juju"].get("proxy") is False:
+ # native_charm, will be deployed after VM. Skip
+ proxy_charm = None
+ if proxy_charm:
+ step = "deploying proxy charm to configure ns"
+ # TODO is NS magmt IP address needed?
+
+ # Get additional parameters
+ additional_params = {}
+ if db_nsr.get("additionalParamsForNs"):
+ additional_params = db_nsr["additionalParamsForNs"].copy()
+ for k, v in additional_params.items():
+ if isinstance(v, str) and v.startswith("!!yaml "):
+ additional_params[k] = yaml.safe_load(v[7:])
+
+ # additional_params["rw_mgmt_ip"] = db_nsr["ip-address"]
+ charm_params = {
+ "user_values": additional_params,
+ # "rw_mgmt_ip": db_nsr["ip-address"],
+ "initial-config-primitive": {} # ns_config.get('initial-config-primitive') or {}
+ }
+
+ # Login to the VCA. If there are multiple calls to login(),
+ # subsequent calls will be a nop and return immediately.
+ await self.n2vc.login()
+ deploy_charm(None, None, None, None, charm_params, n2vc_info)
+ number_to_configure += 1
+
+ db_nsr_update["operational-status"] = "running"
+
+ # Wait until all charms has reached blocked or active status
+ step = "waiting proxy charms to be ready"
+ if number_to_configure:
+ # wait until all charms are configured.
+ # steps are:
+ # initial-deploy
+ # get-ssh-public-key
+ # generate-ssh-key
+ # retry-get-ssh-public-key
+ # ssh-public-key-obtained
+ while time() <= start_deploy + self.total_deploy_timeout:
+ if db_nsr_update:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ if db_nslcmop_update:
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+
+ all_active = True
+ for vca_index, vca_deployed in enumerate(vca_deployed_list):
+ database_entry = "_admin.deployed.VCA.{}.".format(vca_index)
+ if vca_deployed["step"] == "initial-deploy":
+ if vca_deployed["operational-status"] in ("active", "blocked"):
+ step = "execute charm primitive get-ssh-public-key for member_vnf_index={} vdu_id={}" \
+ .format(vca_deployed["member-vnf-index"],
+ vca_deployed["vdu_id"])
+ self.logger.debug(logging_text + step)
+ try:
+ primitive_id = await self.n2vc.ExecutePrimitive(
+ vca_deployed["model"],
+ vca_deployed["application"],
+ "get-ssh-public-key",
+ None,
+ )
+ vca_deployed["step"] = db_nsr_update[database_entry + "step"] = "get-ssh-public-key"
+ vca_deployed["primitive_id"] = db_nsr_update[database_entry + "primitive_id"] =\
+ primitive_id
+ db_nsr_update[database_entry + "operational-status"] =\
+ vca_deployed["operational-status"]
+ except PrimitiveDoesNotExist:
+ ssh_public_key = None
+ vca_deployed["step"] = db_nsr_update[database_entry + "step"] =\
+ "ssh-public-key-obtained"
+ vca_deployed["ssh-public-key"] = db_nsr_update[database_entry + "ssh-public-key"] =\
+ ssh_public_key
+ step = "charm ssh-public-key for member_vnf_index={} vdu_id={} not needed".format(
+ vca_deployed["member-vnf-index"], vca_deployed["vdu_id"])
+ self.logger.debug(logging_text + step)
+
+ elif vca_deployed["step"] in ("get-ssh-public-key", "retry-get-ssh-public-key"):
+ primitive_id = vca_deployed["primitive_id"]
+ primitive_status = await self.n2vc.GetPrimitiveStatus(vca_deployed["model"],
+ primitive_id)
+ if primitive_status in ("completed", "failed"):
+ primitive_result = await self.n2vc.GetPrimitiveOutput(vca_deployed["model"],
+ primitive_id)
+ vca_deployed["primitive_id"] = db_nsr_update[database_entry + "primitive_id"] = None
+ if primitive_status == "completed" and isinstance(primitive_result, dict) and \
+ primitive_result.get("pubkey"):
+ ssh_public_key = primitive_result.get("pubkey")
+ vca_deployed["step"] = db_nsr_update[database_entry + "step"] =\
+ "ssh-public-key-obtained"
+ vca_deployed["ssh-public-key"] = db_nsr_update[database_entry + "ssh-public-key"] =\
+ ssh_public_key
+ n2vc_key_list.append(ssh_public_key)
+ step = "charm ssh-public-key for member_vnf_index={} vdu_id={} is '{}'".format(
+ vca_deployed["member-vnf-index"], vca_deployed["vdu_id"], ssh_public_key)
+ self.logger.debug(logging_text + step)
+ else: # primitive_status == "failed":
+ if vca_deployed["step"] == "get-ssh-public-key":
+ step = "execute charm primitive generate-ssh-public-key for member_vnf_index="\
+ "{} vdu_id={}".format(vca_deployed["member-vnf-index"],
+ vca_deployed["vdu_id"])
+ self.logger.debug(logging_text + step)
+ vca_deployed["step"] = db_nsr_update[database_entry + "step"] =\
+ "generate-ssh-key"
+ primitive_id = await self.n2vc.ExecutePrimitive(
+ vca_deployed["model"],
+ vca_deployed["application"],
+ "generate-ssh-key",
+ None,
+ )
+ vca_deployed["primitive_id"] = db_nsr_update[database_entry + "primitive_id"] =\
+ primitive_id
+ else: # failed for second time
+ raise LcmException(
+ "error executing primitive get-ssh-public-key: {}".format(primitive_result))
+
+ elif vca_deployed["step"] == "generate-ssh-key":
+ primitive_id = vca_deployed["primitive_id"]
+ primitive_status = await self.n2vc.GetPrimitiveStatus(vca_deployed["model"],
+ primitive_id)
+ if primitive_status in ("completed", "failed"):
+ primitive_result = await self.n2vc.GetPrimitiveOutput(vca_deployed["model"],
+ primitive_id)
+ vca_deployed["primitive_id"] = db_nsr_update[
+ database_entry + "primitive_id"] = None
+ if primitive_status == "completed":
+ step = "execute primitive get-ssh-public-key again for member_vnf_index={} "\
+ "vdu_id={}".format(vca_deployed["member-vnf-index"],
+ vca_deployed["vdu_id"])
+ self.logger.debug(logging_text + step)
+ vca_deployed["step"] = db_nsr_update[database_entry + "step"] = \
+ "retry-get-ssh-public-key"
+ primitive_id = await self.n2vc.ExecutePrimitive(
+ vca_deployed["model"],
+ vca_deployed["application"],
+ "get-ssh-public-key",
+ None,
+ )
+ vca_deployed["primitive_id"] = db_nsr_update[database_entry + "primitive_id"] =\
+ primitive_id
+
+ else: # primitive_status == "failed":
+ raise LcmException("error executing primitive generate-ssh-key: {}"
+ .format(primitive_result))
+
+ if vca_deployed["step"] != "ssh-public-key-obtained":
+ all_active = False
+
+ if all_active:
+ break
+ await asyncio.sleep(5)
+ else: # total_deploy_timeout
+ raise LcmException("Timeout waiting charm to be initialized for member_vnf_index={} vdu_id={}"
+ .format(vca_deployed["member-vnf-index"], vca_deployed["vdu_id"]))
+
+ # deploy RO
# get vnfds, instantiate at RO
- for vnfd_id, vnfd in db_vnfds.items():
+ for c_vnf in nsd.get("constituent-vnfd", ()):
+ member_vnf_index = c_vnf["member-vnf-index"]
+ vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
vnfd_ref = vnfd["id"]
- step = db_nsr_update["detailed-status"] = "Creating vnfd={} at RO".format(vnfd_ref)
+ step = db_nsr_update["detailed-status"] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(
+ vnfd_ref, member_vnf_index)
# self.logger.debug(logging_text + step)
- vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, vnfd_ref[:23])
- descriptor_id_2_RO[vnfd_ref] = vnfd_id_RO
+ vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
+ vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
RO_descriptor_number += 1
+ # look position at deployed.RO.vnfd if not present it will be appended at the end
+ for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
+ if vnf_deployed["member-vnf-index"] == member_vnf_index:
+ break
+ else:
+ index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
+ db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
+
# look if present
- vnfd_list = await RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
+ RO_update = {"member-vnf-index": member_vnf_index}
+ vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
if vnfd_list:
- db_nsr_update["_admin.deployed.RO.vnfd_id.{}".format(vnfd_id)] = vnfd_list[0]["uuid"]
- self.logger.debug(logging_text + "vnfd={} exists at RO. Using RO_id={}".format(
- vnfd_ref, vnfd_list[0]["uuid"]))
+ RO_update["id"] = vnfd_list[0]["uuid"]
+ self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
+ format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
else:
- vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO)
- desc = await RO.create("vnfd", descriptor=vnfd_RO)
- db_nsr_update["_admin.deployed.RO.vnfd_id.{}".format(vnfd_id)] = desc["uuid"]
- db_nsr_update["_admin.nsState"] = "INSTANTIATED"
- self.logger.debug(logging_text + "vnfd={} created at RO. RO_id={}".format(
- vnfd_ref, desc["uuid"]))
+ vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
+ get("additionalParamsForVnf"), nsr_id)
+ desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
+ RO_update["id"] = desc["uuid"]
+ self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
+ vnfd_ref, member_vnf_index, desc["uuid"]))
+ db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
+ db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
self.update_db_2("nsrs", nsr_id, db_nsr_update)
# create nsd at RO
# self.logger.debug(logging_text + step)
RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
- descriptor_id_2_RO[nsd_ref] = RO_osm_nsd_id
RO_descriptor_number += 1
- nsd_list = await RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
+ nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
if nsd_list:
db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
nsd_RO.pop("_id", None)
nsd_RO.pop("_admin", None)
for c_vnf in nsd_RO.get("constituent-vnfd", ()):
- vnfd_id = c_vnf["vnfd-id-ref"]
- c_vnf["vnfd-id-ref"] = descriptor_id_2_RO[vnfd_id]
- desc = await RO.create("nsd", descriptor=nsd_RO)
+ member_vnf_index = c_vnf["member-vnf-index"]
+ c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
+ for c_vld in nsd_RO.get("vld", ()):
+ for cp in c_vld.get("vnfd-connection-point-ref", ()):
+ member_vnf_index = cp["member-vnf-index-ref"]
+ cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
+
+ desc = await self.RO.create("nsd", descriptor=nsd_RO)
db_nsr_update["_admin.nsState"] = "INSTANTIATED"
db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
# Crate ns at RO
# if present use it unless in error status
- RO_nsr_id = db_nsr["_admin"].get("deployed", {}).get("RO", {}).get("nsr_id")
+ RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
if RO_nsr_id:
try:
step = db_nsr_update["detailed-status"] = "Looking for existing ns at RO"
# self.logger.debug(logging_text + step + " RO_ns_id={}".format(RO_nsr_id))
- desc = await RO.show("ns", RO_nsr_id)
+ desc = await self.RO.show("ns", RO_nsr_id)
except ROclient.ROClientException as e:
if e.http_code != HTTPStatus.NOT_FOUND:
raise
RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
if RO_nsr_id:
- ns_status, ns_status_info = RO.check_ns_status(desc)
+ ns_status, ns_status_info = self.RO.check_ns_status(desc)
db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
if ns_status == "ERROR":
step = db_nsr_update["detailed-status"] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
self.logger.debug(logging_text + step)
- await RO.delete("ns", RO_nsr_id)
+ await self.RO.delete("ns", RO_nsr_id)
RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
if not RO_nsr_id:
step = db_nsr_update["detailed-status"] = "Checking dependencies"
# feature 1429. Add n2vc public key to needed VMs
n2vc_key = await self.n2vc.GetPublicKey()
- RO_ns_params = self.ns_params_2_RO(ns_params, nsd, db_vnfds_ref, [n2vc_key])
+ n2vc_key_list.append(n2vc_key)
+ RO_ns_params = self.ns_params_2_RO(ns_params, nsd, db_vnfds_ref, n2vc_key_list)
- step = db_nsr_update["detailed-status"] = "Creating ns at RO"
- desc = await RO.create("ns", descriptor=RO_ns_params,
- name=db_nsr["name"],
- scenario=RO_nsd_uuid)
+ step = db_nsr_update["detailed-status"] = "Deploying ns at VIM"
+ desc = await self.RO.create("ns", descriptor=RO_ns_params,
+ name=db_nsr["name"],
+ scenario=RO_nsd_uuid)
RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
db_nsr_update["_admin.nsState"] = "INSTANTIATED"
db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
# wait until NS is ready
- step = ns_status_detailed = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
+ step = ns_status_detailed = detailed_status = "Waiting VIM to deploy ns. RO_id={}".format(RO_nsr_id)
detailed_status_old = None
self.logger.debug(logging_text + step)
while time() <= start_deploy + self.total_deploy_timeout:
- desc = await RO.show("ns", RO_nsr_id)
- ns_status, ns_status_info = RO.check_ns_status(desc)
- db_nsr_update["admin.deployed.RO.nsr_status"] = ns_status
+ desc = await self.RO.show("ns", RO_nsr_id)
+ ns_status, ns_status_info = self.RO.check_ns_status(desc)
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
if ns_status == "ERROR":
raise ROclient.ROClientException(ns_status_info)
elif ns_status == "BUILD":
step = "Updating NSR"
self.ns_update_nsr(db_nsr_update, db_nsr, desc)
+ db_nsr_update["operational-status"] = "running"
db_nsr["detailed-status"] = "Configuring vnfr"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
- # The parameters we'll need to deploy a charm
- number_to_configure = 0
-
- def deploy_charm(vnf_index, vdu_id, vdu_name, vdu_count_index, mgmt_ip_address, n2vc_info,
- config_primitive=None):
- """An inner function to deploy the charm from either vnf or vdu
- vnf_index is mandatory. vdu_id can be None for a vnf configuration or the id for vdu configuration
- """
- if not mgmt_ip_address:
- raise LcmException("vnfd/vdu has not management ip address to configure it")
- # Login to the VCA.
- # if number_to_configure == 0:
- # self.logger.debug("Logging into N2VC...")
- # task = asyncio.ensure_future(self.n2vc.login())
- # yield from asyncio.wait_for(task, 30.0)
- # self.logger.debug("Logged into N2VC!")
-
- # # await self.n2vc.login()
-
- # Note: The charm needs to exist on disk at the location
- # specified by charm_path.
- base_folder = vnfd["_admin"]["storage"]
- storage_params = self.fs.get_params()
- charm_path = "{}{}/{}/charms/{}".format(
- storage_params["path"],
- base_folder["folder"],
- base_folder["pkg-dir"],
- proxy_charm
- )
-
- # Setup the runtime parameters for this VNF
- params = {'rw_mgmt_ip': mgmt_ip_address}
- if config_primitive:
- params["initial-config-primitive"] = config_primitive
-
- # ns_name will be ignored in the current version of N2VC
- # but will be implemented for the next point release.
- model_name = 'default' # TODO bug 581 : change to nsr_id
- if vdu_id:
- vdu_id_text = vdu_id
+ # Configure proxy charms once VMs are up
+ for vca_index, vca_deployed in enumerate(vca_deployed_list):
+ vnf_index = vca_deployed.get("member-vnf-index")
+ vdu_id = vca_deployed.get("vdu_id")
+ vdu_name = None
+ vdu_count_index = None
+
+ step = "executing proxy charm initial primitives for member_vnf_index={} vdu_id={}".format(vnf_index,
+ vdu_id)
+ add_params = {}
+ initial_config_primitive_list = None
+ if vnf_index:
+ if db_vnfrs[vnf_index].get("additionalParamsForVnf"):
+ add_params = db_vnfrs[vnf_index]["additionalParamsForVnf"].copy()
+ vnfd = db_vnfds_index[vnf_index]
+
+ if vdu_id:
+ for vdu_index, vdu in enumerate(get_iterable(vnfd, 'vdu')):
+ if vdu["id"] == vdu_id:
+ initial_config_primitive_list = vdu['vdu-configuration'].get('initial-config-primitive')
+ break
+ else:
+ raise LcmException("Not found vdu_id={} at vnfd:vdu".format(vdu_id))
+ vdur = db_vnfrs[vnf_index]["vdur"][vdu_index]
+ # TODO for the moment only first vdu_id contains a charm deployed
+ if vdur["vdu-id-ref"] != vdu["id"]:
+ raise LcmException("Mismatch vdur {}, vdu {} at index {} for vnf {}"
+ .format(vdur["vdu-id-ref"], vdu["id"], vdu_index, vnf_index))
+ add_params["rw_mgmt_ip"] = vdur["ip-address"]
+ else:
+ add_params["rw_mgmt_ip"] = db_vnfrs[vnf_index]["ip-address"]
+ initial_config_primitive_list = vnfd["vnf-configuration"].get('initial-config-primitive')
else:
- vdu_id_text = "vnfd" # TODO bug 581 remove and add just an empty string ""
- application_name = self.n2vc.FormatApplicationName(nsr_name, vnf_index, vdu_id_text)
- # TODO bug 581 Add "-" as a final argument
-
- vca_index = len(vca_deployed_list)
- # trunk name and add two char index at the end to ensure that it is unique. It is assumed no more than
- # 26*26 charm in the same NS
- # TODO bug 581 uncoment
- # application_name = application_name[0:48]
- # application_name += chr(97 + vca_index / 26) + chr(97 + vca_index % 26)
- vca_deployed_ = {
- "member-vnf-index": vnf_index,
- "vdu_id": vdu_id,
- "model": model_name,
- "application": application_name,
- "operational-status": "init",
- "detailed-status": "",
- "vnfd_id": vnfd_id,
- "vdu_name": vdu_name,
- "vdu_count_index": vdu_count_index,
- }
- vca_deployed_list.append(vca_deployed_)
- db_nsr_update["_admin.deployed.VCA.{}".format(vca_index)] = vca_deployed_
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
-
- self.logger.debug("Task create_ns={} Passing artifacts path '{}' for {}".format(nsr_id, charm_path,
- proxy_charm))
- if not n2vc_info:
- n2vc_info["nsr_id"] = nsr_id
- n2vc_info["nslcmop_id"] = nslcmop_id
- n2vc_info["n2vc_event"] = asyncio.Event(loop=self.loop)
- n2vc_info["lcmOperationType"] = "instantiate"
- n2vc_info["deployed"] = vca_deployed_list
- n2vc_info["db_update"] = db_nsr_update
- task = asyncio.ensure_future(
- self.n2vc.DeployCharms(
- model_name, # The network service name
- application_name, # The application name
- vnfd, # The vnf descriptor
- charm_path, # Path to charm
- params, # Runtime params, like mgmt ip
- {}, # for native charms only
- self.n2vc_callback, # Callback for status changes
- n2vc_info, # Callback parameter
- None, # Callback parameter (task)
- )
- )
- task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, None, None,
- n2vc_info))
- self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "create_charm:" + application_name, task)
-
- step = "Looking for needed vnfd to configure"
+ if db_nsr.get("additionalParamsForNs"):
+ add_params = db_nsr["additionalParamsForNs"].copy()
+ for k, v in add_params.items():
+ if isinstance(v, str) and v.startswith("!!yaml "):
+ add_params[k] = yaml.safe_load(v[7:])
+ add_params["rw_mgmt_ip"] = None
+ add_params["ns_config_info"] = self._get_ns_config_info(vca_deployed_list)
+ initial_config_primitive_list = nsd["ns-configuration"].get('initial-config-primitive')
+
+ # add primitive verify-ssh-credentials to the list after config only when is a vnf or vdu charm
+ # add config if not present for NS charm
+ initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
+ vca_deployed)
+
+ for initial_config_primitive in initial_config_primitive_list:
+ primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, add_params)
+ self.logger.debug(logging_text + step + " primitive '{}' params '{}'"
+ .format(initial_config_primitive["name"], primitive_params_))
+ primitive_result, primitive_detail = await self._ns_execute_primitive(
+ db_nsr["_admin"]["deployed"], vnf_index, vdu_id, vdu_name, vdu_count_index,
+ initial_config_primitive["name"],
+ primitive_params_,
+ retries=10 if initial_config_primitive["name"] == "verify-ssh-credentials" else 0,
+ retries_interval=30)
+ if primitive_result != "COMPLETED":
+ raise LcmException("charm error executing primitive {} for member_vnf_index={} vdu_id={}: '{}'"
+ .format(initial_config_primitive["name"], vca_deployed["member-vnf-index"],
+ vca_deployed["vdu_id"], primitive_detail))
+
+ # Deploy native charms
+ step = "Looking for needed vnfd to configure with native charm"
self.logger.debug(logging_text + step)
for c_vnf in get_iterable(nsd, "constituent-vnfd"):
vnf_index = str(c_vnf["member-vnf-index"])
vnfd = db_vnfds_ref[vnfd_id]
+ # Get additional parameters
+ vnfr_params = {}
+ if db_vnfrs[vnf_index].get("additionalParamsForVnf"):
+ vnfr_params = db_vnfrs[vnf_index]["additionalParamsForVnf"].copy()
+ for k, v in vnfr_params.items():
+ if isinstance(v, str) and v.startswith("!!yaml "):
+ vnfr_params[k] = yaml.safe_load(v[7:])
+
# Check if this VNF has a charm configuration
vnf_config = vnfd.get("vnf-configuration")
-
if vnf_config and vnf_config.get("juju"):
+ native_charm = vnf_config["juju"].get("proxy") is False
proxy_charm = vnf_config["juju"]["charm"]
- config_primitive = None
+ if native_charm and proxy_charm:
+ if not vca_model_name:
+ step = "creating VCA model name '{}'".format(nsr_id)
+ self.logger.debug(logging_text + step)
+ await self.n2vc.CreateNetworkService(nsr_id)
+ vca_model_name = nsr_id
+ db_nsr_update["_admin.deployed.VCA-model-name"] = nsr_id
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ step = "deploying native charm for vnf_member_index={}".format(vnf_index)
+ self.logger.debug(logging_text + step)
- if proxy_charm:
- if 'initial-config-primitive' in vnf_config:
- config_primitive = vnf_config['initial-config-primitive']
+ vnfr_params["rw_mgmt_ip"] = db_vnfrs[vnf_index]["ip-address"]
+ charm_params = {
+ "user_values": vnfr_params,
+ "rw_mgmt_ip": db_vnfrs[vnf_index]["ip-address"],
+ "initial-config-primitive": vnf_config.get('initial-config-primitive') or {},
+ }
+
+ # get username
+ # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+ # merged. Meanwhile let's get username from initial-config-primitive
+ if vnf_config.get("initial-config-primitive"):
+ for param in vnf_config["initial-config-primitive"][0].get("parameter", ()):
+ if param["name"] == "ssh-username":
+ charm_params["username"] = param["value"]
+ if vnf_config.get("config-access") and vnf_config["config-access"].get("ssh-access"):
+ if vnf_config["config-access"]["ssh-access"].get("required"):
+ charm_params["username"] = vnf_config["config-access"]["ssh-access"].get("default-user")
# Login to the VCA. If there are multiple calls to login(),
# subsequent calls will be a nop and return immediately.
- step = "connecting to N2VC to configure vnf {}".format(vnf_index)
await self.n2vc.login()
- deploy_charm(vnf_index, None, None, None, db_vnfrs[vnf_index]["ip-address"], n2vc_info,
- config_primitive)
+
+ deploy_charm(vnf_index, None, None, None, charm_params, n2vc_info, native_charm)
number_to_configure += 1
# Deploy charms for each VDU that supports one.
for vdu_index, vdu in enumerate(get_iterable(vnfd, 'vdu')):
vdu_config = vdu.get('vdu-configuration')
- proxy_charm = None
- config_primitive = None
+ native_charm = False
if vdu_config and vdu_config.get("juju"):
+ native_charm = vdu_config["juju"].get("proxy") is False
proxy_charm = vdu_config["juju"]["charm"]
+ if native_charm and proxy_charm:
+ if not vca_model_name:
+ step = "creating VCA model name"
+ await self.n2vc.CreateNetworkService(nsr_id)
+ vca_model_name = nsr_id
+ db_nsr_update["_admin.deployed.VCA-model-name"] = nsr_id
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ step = "deploying native charm for vnf_member_index={} vdu_id={}".format(vnf_index,
+ vdu["id"])
- if 'initial-config-primitive' in vdu_config:
- config_primitive = vdu_config['initial-config-primitive']
+ self.logger.debug(logging_text + step)
- if proxy_charm:
- step = "connecting to N2VC to configure vdu {} from vnf {}".format(vdu["id"], vnf_index)
- await self.n2vc.login()
vdur = db_vnfrs[vnf_index]["vdur"][vdu_index]
+
# TODO for the moment only first vdu_id contains a charm deployed
if vdur["vdu-id-ref"] != vdu["id"]:
raise LcmException("Mismatch vdur {}, vdu {} at index {} for vnf {}"
.format(vdur["vdu-id-ref"], vdu["id"], vdu_index, vnf_index))
+ vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
+ charm_params = {
+ "user_values": vnfr_params,
+ "rw_mgmt_ip": vdur["ip-address"],
+ "initial-config-primitive": vdu_config.get('initial-config-primitive') or {}
+ }
+
+ # get username
+ # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+ # merged. Meanwhile let's get username from initial-config-primitive
+ if vdu_config.get("initial-config-primitive"):
+ for param in vdu_config["initial-config-primitive"][0].get("parameter", ()):
+ if param["name"] == "ssh-username":
+ charm_params["username"] = param["value"]
+ if vdu_config.get("config-access") and vdu_config["config-access"].get("ssh-access"):
+ if vdu_config["config-access"]["ssh-access"].get("required"):
+ charm_params["username"] = vdu_config["config-access"]["ssh-access"].get(
+ "default-user")
+
+ await self.n2vc.login()
+
deploy_charm(vnf_index, vdu["id"], vdur.get("name"), vdur["count-index"],
- vdur["ip-address"], n2vc_info, config_primitive)
+ charm_params, n2vc_info, native_charm)
number_to_configure += 1
- db_nsr_update["operational-status"] = "running"
+ # Check if this NS has a charm configuration
+
+ ns_config = nsd.get("ns-configuration")
+ if ns_config and ns_config.get("juju"):
+ native_charm = ns_config["juju"].get("proxy") is False
+ proxy_charm = ns_config["juju"]["charm"]
+ if native_charm and proxy_charm:
+ step = "deploying native charm to configure ns"
+ # TODO is NS magmt IP address needed?
+
+ # Get additional parameters
+ additional_params = {}
+ if db_nsr.get("additionalParamsForNs"):
+ additional_params = db_nsr["additionalParamsForNs"].copy()
+ for k, v in additional_params.items():
+ if isinstance(v, str) and v.startswith("!!yaml "):
+ additional_params[k] = yaml.safe_load(v[7:])
+
+ # additional_params["rw_mgmt_ip"] = db_nsr["ip-address"]
+ charm_params = {
+ "user_values": additional_params,
+ "rw_mgmt_ip": db_nsr.get("ip-address"),
+ "initial-config-primitive": ns_config.get('initial-config-primitive') or {}
+ }
+
+ # get username
+ # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+ # merged. Meanwhile let's get username from initial-config-primitive
+ if ns_config.get("initial-config-primitive"):
+ for param in ns_config["initial-config-primitive"][0].get("parameter", ()):
+ if param["name"] == "ssh-username":
+ charm_params["username"] = param["value"]
+ if ns_config.get("config-access") and ns_config["config-access"].get("ssh-access"):
+ if ns_config["config-access"]["ssh-access"].get("required"):
+ charm_params["username"] = ns_config["config-access"]["ssh-access"].get("default-user")
+
+ # Login to the VCA. If there are multiple calls to login(),
+ # subsequent calls will be a nop and return immediately.
+ await self.n2vc.login()
+ deploy_charm(None, None, None, None, charm_params, n2vc_info, native_charm)
+ number_to_configure += 1
+
+ # waiting all charms are ok
configuration_failed = False
if number_to_configure:
+ step = "Waiting all charms are active"
old_status = "configuring: init: {}".format(number_to_configure)
db_nsr_update["config-status"] = old_status
db_nsr_update["detailed-status"] = old_status
self.update_db_2("nsrs", nsr_id, db_nsr_update)
if db_nslcmop_update:
self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- # TODO add a fake tast that set n2vc_event after some time
+ # TODO add a fake task that set n2vc_event after some time
await n2vc_info["n2vc_event"].wait()
n2vc_info["n2vc_event"].clear()
all_active = True
db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
db_nslcmop_update["operationState"] = nslcmop_operation_state = "FAILED"
db_nslcmop_update["statusEnteredTime"] = time()
- if db_nsr:
- db_nsr_update["_admin.nslcmop"] = None
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
- if db_nslcmop_update:
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ try:
+ if db_nsr:
+ db_nsr_update["_admin.nslcmop"] = None
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ if db_nslcmop_update:
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ except DbException as e:
+ self.logger.error(logging_text + "Cannot update database: {}".format(e))
if nslcmop_operation_state:
try:
await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
- "operationState": nslcmop_operation_state})
+ "operationState": nslcmop_operation_state},
+ loop=self.loop)
except Exception as e:
self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
+ async def _destroy_charm(self, model, application):
+ """
+ Order N2VC destroy a charm
+ :param model:
+ :param application:
+ :return: True if charm does not exist. False if it exist
+ """
+ if not await self.n2vc.HasApplication(model, application):
+ return True # Already removed
+ await self.n2vc.RemoveCharms(model, application)
+ return False
+
+ async def _wait_charm_destroyed(self, model, application, timeout):
+ """
+ Wait until charm does not exist
+ :param model:
+ :param application:
+ :param timeout:
+ :return: True if not exist, False if timeout
+ """
+ while True:
+ if not await self.n2vc.HasApplication(model, application):
+ return True
+ if timeout < 0:
+ return False
+ await asyncio.sleep(10)
+ timeout -= 10
+
+ # Check if this VNFD has a configured terminate action
+ def _has_terminate_config_primitive(self, vnfd):
+ vnf_config = vnfd.get("vnf-configuration")
+ if vnf_config and vnf_config.get("terminate-config-primitive"):
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _get_terminate_config_primitive_seq_list(vnfd):
+ """ Get a numerically sorted list of the sequences for this VNFD's terminate action """
+ # No need to check for existing primitive twice, already done before
+ vnf_config = vnfd.get("vnf-configuration")
+ seq_list = vnf_config.get("terminate-config-primitive")
+ # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
+ seq_list_sorted = sorted(seq_list, key=lambda x: int(x['seq']))
+ return seq_list_sorted
+
+ @staticmethod
+ def _create_nslcmop(nsr_id, operation, params):
+ """
+ Creates a ns-lcm-opp content to be stored at database.
+ :param nsr_id: internal id of the instance
+ :param operation: instantiate, terminate, scale, action, ...
+ :param params: user parameters for the operation
+ :return: dictionary following SOL005 format
+ """
+ # Raise exception if invalid arguments
+ if not (nsr_id and operation and params):
+ raise LcmException(
+ "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
+ now = time()
+ _id = str(uuid4())
+ nslcmop = {
+ "id": _id,
+ "_id": _id,
+ # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
+ "operationState": "PROCESSING",
+ "statusEnteredTime": now,
+ "nsInstanceId": nsr_id,
+ "lcmOperationType": operation,
+ "startTime": now,
+ "isAutomaticInvocation": False,
+ "operationParams": params,
+ "isCancelPending": False,
+ "links": {
+ "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
+ "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
+ }
+ }
+ return nslcmop
+
+ def _get_terminate_primitive_params(self, seq, vnf_index):
+ primitive = seq.get('name')
+ primitive_params = {}
+ params = {
+ "member_vnf_index": vnf_index,
+ "primitive": primitive,
+ "primitive_params": primitive_params,
+ }
+ desc_params = {}
+ return self._map_primitive_params(seq, params, desc_params)
+
+ # sub-operations
+
+ def _reintent_or_skip_suboperation(self, db_nslcmop, op_index):
+ op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
+ if (op.get('operationState') == 'COMPLETED'):
+ # b. Skip sub-operation
+ # _ns_execute_primitive() or RO.create_action() will NOT be executed
+ return self.SUBOPERATION_STATUS_SKIP
+ else:
+ # c. Reintent executing sub-operation
+ # The sub-operation exists, and operationState != 'COMPLETED'
+ # Update operationState = 'PROCESSING' to indicate a reintent.
+ operationState = 'PROCESSING'
+ detailed_status = 'In progress'
+ self._update_suboperation_status(
+ db_nslcmop, op_index, operationState, detailed_status)
+ # Return the sub-operation index
+ # _ns_execute_primitive() or RO.create_action() will be called from scale()
+ # with arguments extracted from the sub-operation
+ return op_index
+
+ # Find a sub-operation where all keys in a matching dictionary must match
+ # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
+ def _find_suboperation(self, db_nslcmop, match):
+ if (db_nslcmop and match):
+ op_list = db_nslcmop.get('_admin', {}).get('operations', [])
+ for i, op in enumerate(op_list):
+ if all(op.get(k) == match[k] for k in match):
+ return i
+ return self.SUBOPERATION_STATUS_NOT_FOUND
+
+ # Update status for a sub-operation given its index
+ def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
+ # Update DB for HA tasks
+ q_filter = {'_id': db_nslcmop['_id']}
+ update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
+ '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
+ self.db.set_one("nslcmops",
+ q_filter=q_filter,
+ update_dict=update_dict,
+ fail_on_empty=False)
+
+ # Add sub-operation, return the index of the added sub-operation
+ # Optionally, set operationState, detailed-status, and operationType
+ # Status and type are currently set for 'scale' sub-operations:
+ # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
+ # 'detailed-status' : status message
+ # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
+ # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
+ def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index,
+ vdu_name, primitive, mapped_primitive_params,
+ operationState=None, detailed_status=None, operationType=None,
+ RO_nsr_id=None, RO_scaling_info=None):
+ if not (db_nslcmop):
+ return self.SUBOPERATION_STATUS_NOT_FOUND
+ # Get the "_admin.operations" list, if it exists
+ db_nslcmop_admin = db_nslcmop.get('_admin', {})
+ op_list = db_nslcmop_admin.get('operations')
+ # Create or append to the "_admin.operations" list
+ new_op = {'member_vnf_index': vnf_index,
+ 'vdu_id': vdu_id,
+ 'vdu_count_index': vdu_count_index,
+ 'primitive': primitive,
+ 'primitive_params': mapped_primitive_params}
+ if operationState:
+ new_op['operationState'] = operationState
+ if detailed_status:
+ new_op['detailed-status'] = detailed_status
+ if operationType:
+ new_op['lcmOperationType'] = operationType
+ if RO_nsr_id:
+ new_op['RO_nsr_id'] = RO_nsr_id
+ if RO_scaling_info:
+ new_op['RO_scaling_info'] = RO_scaling_info
+ if not op_list:
+ # No existing operations, create key 'operations' with current operation as first list element
+ db_nslcmop_admin.update({'operations': [new_op]})
+ op_list = db_nslcmop_admin.get('operations')
+ else:
+ # Existing operations, append operation to list
+ op_list.append(new_op)
+
+ db_nslcmop_update = {'_admin.operations': op_list}
+ self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
+ op_index = len(op_list) - 1
+ return op_index
+
+ # Helper methods for scale() sub-operations
+
+ # pre-scale/post-scale:
+ # Check for 3 different cases:
+ # a. New: First time execution, return SUBOPERATION_STATUS_NEW
+ # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
+ # c. Reintent: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
+ def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index,
+ vnf_config_primitive, primitive_params, operationType,
+ RO_nsr_id=None, RO_scaling_info=None):
+ # Find this sub-operation
+ if (RO_nsr_id and RO_scaling_info):
+ operationType = 'SCALE-RO'
+ match = {
+ 'member_vnf_index': vnf_index,
+ 'RO_nsr_id': RO_nsr_id,
+ 'RO_scaling_info': RO_scaling_info,
+ }
+ else:
+ match = {
+ 'member_vnf_index': vnf_index,
+ 'primitive': vnf_config_primitive,
+ 'primitive_params': primitive_params,
+ 'lcmOperationType': operationType
+ }
+ op_index = self._find_suboperation(db_nslcmop, match)
+ if (op_index == self.SUBOPERATION_STATUS_NOT_FOUND):
+ # a. New sub-operation
+ # The sub-operation does not exist, add it.
+ # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
+ # The following parameters are set to None for all kind of scaling:
+ vdu_id = None
+ vdu_count_index = None
+ vdu_name = None
+ if (RO_nsr_id and RO_scaling_info):
+ vnf_config_primitive = None
+ primitive_params = None
+ else:
+ RO_nsr_id = None
+ RO_scaling_info = None
+ # Initial status for sub-operation
+ operationState = 'PROCESSING'
+ detailed_status = 'In progress'
+ # Add sub-operation for pre/post-scaling (zero or more operations)
+ self._add_suboperation(db_nslcmop,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ vnf_config_primitive,
+ primitive_params,
+ operationState,
+ detailed_status,
+ operationType,
+ RO_nsr_id,
+ RO_scaling_info)
+ return self.SUBOPERATION_STATUS_NEW
+ else:
+ # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
+ # or op_index (operationState != 'COMPLETED')
+ return self._reintent_or_skip_suboperation(db_nslcmop, op_index)
+
+ # Helper methods for terminate()
+
+ async def _terminate_action(self, db_nslcmop, nslcmop_id, nsr_id):
+ """ Create a primitive with params from VNFD
+ Called from terminate() before deleting instance
+ Calls action() to execute the primitive """
+ logging_text = "Task ns={} _terminate_action={} ".format(nsr_id, nslcmop_id)
+ db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+ db_vnfds = {}
+ # Loop over VNFRs
+ for vnfr in db_vnfrs_list:
+ vnfd_id = vnfr["vnfd-id"]
+ vnf_index = vnfr["member-vnf-index-ref"]
+ if vnfd_id not in db_vnfds:
+ step = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_id)
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ db_vnfds[vnfd_id] = vnfd
+ vnfd = db_vnfds[vnfd_id]
+ if not self._has_terminate_config_primitive(vnfd):
+ continue
+ # Get the primitive's sorted sequence list
+ seq_list = self._get_terminate_config_primitive_seq_list(vnfd)
+ for seq in seq_list:
+ # For each sequence in list, get primitive and call _ns_execute_primitive()
+ step = "Calling terminate action for vnf_member_index={} primitive={}".format(
+ vnf_index, seq.get("name"))
+ self.logger.debug(logging_text + step)
+ # Create the primitive for each sequence, i.e. "primitive": "touch"
+ primitive = seq.get('name')
+ mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
+ # The following 3 parameters are currently set to None for 'terminate':
+ # vdu_id, vdu_count_index, vdu_name
+ vdu_id = db_nslcmop["operationParams"].get("vdu_id")
+ vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
+ vdu_name = db_nslcmop["operationParams"].get("vdu_name")
+ # Add sub-operation
+ self._add_suboperation(db_nslcmop,
+ nslcmop_id,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ primitive,
+ mapped_primitive_params)
+ # Sub-operations: Call _ns_execute_primitive() instead of action()
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ nsr_deployed = db_nsr["_admin"]["deployed"]
+ result, result_detail = await self._ns_execute_primitive(
+ nsr_deployed, vnf_index, vdu_id, vdu_name, vdu_count_index, primitive,
+ mapped_primitive_params)
+
+ # nslcmop_operation_state, nslcmop_operation_state_detail = await self.action(
+ # nsr_id, nslcmop_terminate_action_id)
+ # Launch Exception if action() returns other than ['COMPLETED', 'PARTIALLY_COMPLETED']
+ result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
+ if result not in result_ok:
+ raise LcmException(
+ "terminate_primitive_action for vnf_member_index={}",
+ " primitive={} fails with error {}".format(
+ vnf_index, seq.get("name"), result_detail))
+
async def terminate(self, nsr_id, nslcmop_id):
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
self.logger.debug(logging_text + "Enter")
db_nsr = None
db_nslcmop = None
exc = None
failed_detail = [] # annotates all failed error messages
- vca_task_list = []
- vca_task_dict = {}
- vca_application_name2index = {}
+ vca_time_destroy = None # time of where destroy charm order
db_nsr_update = {"_admin.nslcmop": nslcmop_id}
db_nslcmop_update = {}
nslcmop_operation_state = None
+ autoremove = False # autoremove after terminated
try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
+
step = "Getting nslcmop={} from db".format(nslcmop_id)
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
step = "Getting nsr={} from db".format(nsr_id)
nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed"))
if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
return
- # TODO ALF remove
- # db_vim = self.db.get_one("vim_accounts", {"_id": db_nsr["datacenter"]})
# #TODO check if VIM is creating and wait
# RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
+ # Call internal terminate action
+ await self._terminate_action(db_nslcmop, nslcmop_id, nsr_id)
db_nsr_update["operational-status"] = "terminating"
db_nsr_update["config-status"] = "terminating"
- if nsr_deployed and nsr_deployed.get("VCA"):
+ if nsr_deployed and nsr_deployed.get("VCA-model-name"):
+ vca_model_name = nsr_deployed["VCA-model-name"]
+ step = "deleting VCA model name '{}' and all charms".format(vca_model_name)
+ self.logger.debug(logging_text + step)
+ try:
+ await self.n2vc.DestroyNetworkService(vca_model_name)
+ except NetworkServiceDoesNotExist:
+ pass
+ db_nsr_update["_admin.deployed.VCA-model-name"] = None
+ if nsr_deployed.get("VCA"):
+ for vca_index in range(0, len(nsr_deployed["VCA"])):
+ db_nsr_update["_admin.deployed.VCA.{}".format(vca_index)] = None
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ # for backward compatibility if charm have been created with "default" model name delete one by one
+ elif nsr_deployed and nsr_deployed.get("VCA"):
try:
step = "Scheduling configuration charms removing"
db_nsr_update["detailed-status"] = "Deleting charms"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
for vca_index, vca_deployed in enumerate(nsr_deployed["VCA"]):
- if vca_deployed: # TODO it would be desirable having a and deploy_info.get("deployed"):
- task = asyncio.ensure_future(
- self.n2vc.RemoveCharms(
- vca_deployed['model'],
- vca_deployed["application"],
- # self.n2vc_callback,
- # db_nsr,
- # db_nslcmop,
- )
- )
- vca_application_name2index[vca_deployed["application"]] = vca_index
- vca_task_list.append(task)
- vca_task_dict[vca_deployed["application"]] = task
- # task.add_done_callback(functools.partial(self.n2vc_callback, vca_deployed['model'],
- # vca_deployed['application'], None, db_nsr,
- # db_nslcmop, vnf_index))
- self.lcm_tasks.register("ns", nsr_id, nslcmop_id,
- "delete_charm:" + vca_deployed["application"], task)
+ if vca_deployed:
+ if await self._destroy_charm(vca_deployed['model'], vca_deployed["application"]):
+ vca_deployed.clear()
+ db_nsr["_admin.deployed.VCA.{}".format(vca_index)] = None
+ else:
+ vca_time_destroy = time()
except Exception as e:
self.logger.debug(logging_text + "Failed while deleting charms: {}".format(e))
# remove from RO
RO_fail = False
- RO = ROclient.ROClient(self.loop, **self.ro_config)
# Delete ns
RO_nsr_id = RO_delete_action = None
RO_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
try:
if RO_nsr_id:
- step = db_nsr_update["detailed-status"] = db_nslcmop_update["detailed-status"] = "Deleting ns at RO"
+ step = db_nsr_update["detailed-status"] = db_nslcmop_update["detailed-status"] = \
+ "Deleting ns from VIM"
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
self.logger.debug(logging_text + step)
- desc = await RO.delete("ns", RO_nsr_id)
+ desc = await self.RO.delete("ns", RO_nsr_id)
RO_delete_action = desc["action_id"]
db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = RO_delete_action
db_nsr_update["_admin.deployed.RO.nsr_id"] = None
db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
if RO_delete_action:
# wait until NS is deleted from VIM
- step = detailed_status = "Waiting ns deleted from VIM. RO_id={}".format(RO_nsr_id)
+ step = detailed_status = "Waiting ns deleted from VIM. RO_id={} RO_delete_action={}".\
+ format(RO_nsr_id, RO_delete_action)
detailed_status_old = None
self.logger.debug(logging_text + step)
delete_timeout = 20 * 60 # 20 minutes
while delete_timeout > 0:
- desc = await RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
- extra_item_id=RO_delete_action)
- ns_status, ns_status_info = RO.check_action_status(desc)
+ desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
+ extra_item_id=RO_delete_action)
+ ns_status, ns_status_info = self.RO.check_action_status(desc)
if ns_status == "ERROR":
raise ROclient.ROClientException(ns_status_info)
elif ns_status == "BUILD":
detailed_status = step + "; {}".format(ns_status_info)
elif ns_status == "ACTIVE":
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
break
else:
assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
- await asyncio.sleep(5, loop=self.loop)
- delete_timeout -= 5
if detailed_status != detailed_status_old:
- detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
+ detailed_status_old = db_nslcmop_update["detailed-status"] = \
+ db_nsr_update["detailed-status"] = detailed_status
self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ await asyncio.sleep(5, loop=self.loop)
+ delete_timeout -= 5
else: # delete_timeout <= 0:
raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
if e.http_code == 404: # not found
db_nsr_update["_admin.deployed.RO.nsr_id"] = None
db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(RO_nsr_id))
elif e.http_code == 409: # conflict
failed_detail.append("RO_ns_id={} delete conflict: {}".format(RO_nsr_id, e))
RO_nsd_id = nsr_deployed["RO"]["nsd_id"]
try:
step = db_nsr_update["detailed-status"] = db_nslcmop_update["detailed-status"] =\
- "Deleting nsd at RO"
- await RO.delete("nsd", RO_nsd_id)
+ "Deleting nsd from RO"
+ await self.RO.delete("nsd", RO_nsd_id)
self.logger.debug(logging_text + "RO_nsd_id={} deleted".format(RO_nsd_id))
db_nsr_update["_admin.deployed.RO.nsd_id"] = None
except ROclient.ROClientException as e:
self.logger.error(logging_text + failed_detail[-1])
RO_fail = True
- if not RO_fail and nsr_deployed and nsr_deployed.get("RO") and nsr_deployed["RO"].get("vnfd_id"):
- for vnf_id, RO_vnfd_id in nsr_deployed["RO"]["vnfd_id"].items():
- if not RO_vnfd_id:
+ if not RO_fail and nsr_deployed and nsr_deployed.get("RO") and nsr_deployed["RO"].get("vnfd"):
+ for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
+ if not vnf_deployed or not vnf_deployed["id"]:
continue
try:
+ RO_vnfd_id = vnf_deployed["id"]
step = db_nsr_update["detailed-status"] = db_nslcmop_update["detailed-status"] =\
- "Deleting vnfd={} at RO".format(vnf_id)
- await RO.delete("vnfd", RO_vnfd_id)
+ "Deleting member_vnf_index={} RO_vnfd_id={} from RO".format(
+ vnf_deployed["member-vnf-index"], RO_vnfd_id)
+ await self.RO.delete("vnfd", RO_vnfd_id)
self.logger.debug(logging_text + "RO_vnfd_id={} deleted".format(RO_vnfd_id))
- db_nsr_update["_admin.deployed.RO.vnfd_id.{}".format(vnf_id)] = None
+ db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
except ROclient.ROClientException as e:
if e.http_code == 404: # not found
- db_nsr_update["_admin.deployed.RO.vnfd_id.{}".format(vnf_id)] = None
+ db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
self.logger.debug(logging_text + "RO_vnfd_id={} already deleted ".format(RO_vnfd_id))
elif e.http_code == 409: # conflict
failed_detail.append("RO_vnfd_id={} delete conflict: {}".format(RO_vnfd_id, e))
failed_detail.append("RO_vnfd_id={} delete error: {}".format(RO_vnfd_id, e))
self.logger.error(logging_text + failed_detail[-1])
- if vca_task_list:
- db_nsr_update["detailed-status"] = db_nslcmop_update["detailed-status"] =\
+ # wait until charm deleted
+ if vca_time_destroy:
+ db_nsr_update["detailed-status"] = db_nslcmop_update["detailed-status"] = step = \
"Waiting for deletion of configuration charms"
self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
self.update_db_2("nsrs", nsr_id, db_nsr_update)
- await asyncio.wait(vca_task_list, timeout=300)
- for application_name, task in vca_task_dict.items():
- if task.cancelled():
- failed_detail.append("VCA[application_name={}] Deletion has been cancelled"
- .format(application_name))
- elif task.done():
- exc = task.exception()
- if exc:
- failed_detail.append("VCA[application_name={}] Deletion exception: {}"
- .format(application_name, exc))
+ for vca_index, vca_deployed in enumerate(nsr_deployed["VCA"]):
+ if not vca_deployed:
+ continue
+ step = "Waiting for deletion of charm application_name={}".format(vca_deployed["application"])
+ timeout = self.timeout_charm_delete - int(time() - vca_time_destroy)
+ if not await self._wait_charm_destroyed(vca_deployed['model'], vca_deployed["application"],
+ timeout):
+ failed_detail.append("VCA[application_name={}] Deletion timeout".format(
+ vca_deployed["application"]))
else:
- vca_index = vca_application_name2index[application_name]
- db_nsr_update["_admin.deployed.VCA.{}".format(vca_index)] = None
- else: # timeout
- # TODO Should it be cancelled?!!
- task.cancel()
- failed_detail.append("VCA[application_name={}] Deletion timeout".format(application_name))
+ db_nsr["_admin.deployed.VCA.{}".format(vca_index)] = None
if failed_detail:
self.logger.error(logging_text + " ;".join(failed_detail))
db_nslcmop_update["detailed-status"] = "; ".join(failed_detail)
db_nslcmop_update["operationState"] = nslcmop_operation_state = "FAILED"
db_nslcmop_update["statusEnteredTime"] = time()
- elif db_nslcmop["operationParams"].get("autoremove"):
- self.db.del_one("nsrs", {"_id": nsr_id})
- db_nsr_update.clear()
- self.db.del_list("nslcmops", {"nsInstanceId": nsr_id})
- nslcmop_operation_state = "COMPLETED"
- db_nslcmop_update.clear()
- self.db.del_list("vnfrs", {"nsr-id-ref": nsr_id})
- self.db.set_list("pdus", {"_admin.usage.nsr_id": nsr_id},
- {"_admin.usageSate": "NOT_IN_USE", "_admin.usage": None})
- self.logger.debug(logging_text + "Delete from database")
else:
db_nsr_update["operational-status"] = "terminated"
db_nsr_update["detailed-status"] = "Done"
db_nslcmop_update["detailed-status"] = "Done"
db_nslcmop_update["operationState"] = nslcmop_operation_state = "COMPLETED"
db_nslcmop_update["statusEnteredTime"] = time()
+ if db_nslcmop["operationParams"].get("autoremove"):
+ autoremove = True
- except (ROclient.ROClientException, DbException) as e:
+ except (ROclient.ROClientException, DbException, LcmException) as e:
self.logger.error(logging_text + "Exit Exception {}".format(e))
exc = e
except asyncio.CancelledError:
db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
db_nslcmop_update["operationState"] = nslcmop_operation_state = "FAILED"
db_nslcmop_update["statusEnteredTime"] = time()
- if db_nslcmop_update:
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- if db_nsr:
- db_nsr_update["_admin.nslcmop"] = None
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ try:
+ if db_nslcmop and db_nslcmop_update:
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ if db_nsr:
+ db_nsr_update["_admin.nslcmop"] = None
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ except DbException as e:
+ self.logger.error(logging_text + "Cannot update database: {}".format(e))
if nslcmop_operation_state:
try:
await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
- "operationState": nslcmop_operation_state})
+ "operationState": nslcmop_operation_state,
+ "autoremove": autoremove},
+ loop=self.loop)
except Exception as e:
self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
- async def _ns_execute_primitive(self, db_deployed, nsr_name, member_vnf_index, vdu_id, vdu_name, vdu_count_index,
- primitive, primitive_params):
-
- for vca_deployed in db_deployed["VCA"]:
- if not vca_deployed:
- continue
- if member_vnf_index != vca_deployed["member-vnf-index"] or vdu_id != vca_deployed["vdu_id"]:
- continue
- if vdu_name and vdu_name != vca_deployed["vdu_name"]:
- continue
- if vdu_count_index and vdu_count_index != vca_deployed["vdu_count_index"]:
- continue
- break
- else:
- raise LcmException("charm for member_vnf_index={} vdu_id={} vdu_name={} vdu_count_index={} is not deployed"
- .format(member_vnf_index, vdu_id, vdu_name, vdu_count_index))
- model_name = vca_deployed.get("model")
- application_name = vca_deployed.get("application")
- if not model_name or not application_name:
- raise LcmException("charm for member_vnf_index={} vdu_id={} vdu_name={} vdu_count_index={} has not model "
- "or application name" .format(member_vnf_index, vdu_id, vdu_name, vdu_count_index))
- if vca_deployed["operational-status"] != "active":
- raise LcmException("charm for member_vnf_index={} vdu_id={} operational_status={} not 'active'".format(
- member_vnf_index, vdu_id, vca_deployed["operational-status"]))
- callback = None # self.n2vc_callback
- callback_args = () # [db_nsr, db_nslcmop, member_vnf_index, None]
- await self.n2vc.login()
- task = asyncio.ensure_future(
- self.n2vc.ExecutePrimitive(
- model_name,
- application_name,
- primitive, callback,
- *callback_args,
- **primitive_params
- )
- )
- # task.add_done_callback(functools.partial(self.n2vc_callback, model_name, application_name, None,
- # db_nsr, db_nslcmop, member_vnf_index))
- # self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "action:" + primitive, task)
- # wait until completed with timeout
- await asyncio.wait((task,), timeout=600)
-
- result = "FAILED" # by default
- result_detail = ""
- if task.cancelled():
- result_detail = "Task has been cancelled"
- elif task.done():
- exc = task.exception()
- if exc:
- result_detail = str(exc)
+ @staticmethod
+ def _map_primitive_params(primitive_desc, params, instantiation_params):
+ """
+ Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
+ The default-value is used. If it is between < > it look for a value at instantiation_params
+ :param primitive_desc: portion of VNFD/NSD that describes primitive
+ :param params: Params provided by user
+ :param instantiation_params: Instantiation params provided by user
+ :return: a dictionary with the calculated params
+ """
+ calculated_params = {}
+ for parameter in primitive_desc.get("parameter", ()):
+ param_name = parameter["name"]
+ if param_name in params:
+ calculated_params[param_name] = params[param_name]
+ elif "default-value" in parameter or "value" in parameter:
+ if "value" in parameter:
+ calculated_params[param_name] = parameter["value"]
+ else:
+ calculated_params[param_name] = parameter["default-value"]
+ if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
+ and calculated_params[param_name].endswith(">"):
+ if calculated_params[param_name][1:-1] in instantiation_params:
+ calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
+ else:
+ raise LcmException("Parameter {} needed to execute primitive {} not provided".
+ format(parameter["default-value"], primitive_desc["name"]))
+ else:
+ raise LcmException("Parameter {} needed to execute primitive {} not provided".
+ format(param_name, primitive_desc["name"]))
+
+ if isinstance(calculated_params[param_name], (dict, list, tuple)):
+ calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
+ width=256)
+ elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
+ calculated_params[param_name] = calculated_params[param_name][7:]
+
+ # add always ns_config_info if primitive name is config
+ if primitive_desc["name"] == "config":
+ if "ns_config_info" in instantiation_params:
+ calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
+ return calculated_params
+
+ async def _ns_execute_primitive(self, db_deployed, member_vnf_index, vdu_id, vdu_name, vdu_count_index,
+ primitive, primitive_params, retries=0, retries_interval=30):
+ start_primitive_time = time()
+ try:
+ for vca_deployed in db_deployed["VCA"]:
+ if not vca_deployed:
+ continue
+ if member_vnf_index != vca_deployed["member-vnf-index"] or vdu_id != vca_deployed["vdu_id"]:
+ continue
+ if vdu_name and vdu_name != vca_deployed["vdu_name"]:
+ continue
+ if vdu_count_index and vdu_count_index != vca_deployed["vdu_count_index"]:
+ continue
+ break
else:
- # TODO revise with Adam if action is finished and ok when task is done or callback is needed
- result = "COMPLETED"
- result_detail = "Done"
- else: # timeout
- # TODO Should it be cancelled?!!
- task.cancel()
- result_detail = "timeout"
- return result, result_detail
+ raise LcmException("charm for member_vnf_index={} vdu_id={} vdu_name={} vdu_count_index={} is not "
+ "deployed".format(member_vnf_index, vdu_id, vdu_name, vdu_count_index))
+ model_name = vca_deployed.get("model")
+ application_name = vca_deployed.get("application")
+ if not model_name or not application_name:
+ raise LcmException("charm for member_vnf_index={} vdu_id={} vdu_name={} vdu_count_index={} has not "
+ "model or application name" .format(member_vnf_index, vdu_id, vdu_name,
+ vdu_count_index))
+ # if vca_deployed["operational-status"] != "active":
+ # raise LcmException("charm for member_vnf_index={} vdu_id={} operational_status={} not 'active'".format(
+ # member_vnf_index, vdu_id, vca_deployed["operational-status"]))
+ callback = None # self.n2vc_callback
+ callback_args = () # [db_nsr, db_nslcmop, member_vnf_index, None]
+ await self.n2vc.login()
+ if primitive == "config":
+ primitive_params = {"params": primitive_params}
+ while retries >= 0:
+ primitive_id = await self.n2vc.ExecutePrimitive(
+ model_name,
+ application_name,
+ primitive,
+ callback,
+ *callback_args,
+ **primitive_params
+ )
+ while time() - start_primitive_time < self.timeout_primitive:
+ primitive_result_ = await self.n2vc.GetPrimitiveStatus(model_name, primitive_id)
+ if primitive_result_ in ("completed", "failed"):
+ primitive_result = "COMPLETED" if primitive_result_ == "completed" else "FAILED"
+ detailed_result = await self.n2vc.GetPrimitiveOutput(model_name, primitive_id)
+ break
+ elif primitive_result_ is None and primitive == "config":
+ primitive_result = "COMPLETED"
+ detailed_result = None
+ break
+ else: # ("running", "pending", None):
+ pass
+ await asyncio.sleep(5)
+ else:
+ raise LcmException("timeout after {} seconds".format(self.timeout_primitive))
+ if primitive_result == "COMPLETED":
+ break
+ retries -= 1
+ if retries >= 0:
+ await asyncio.sleep(retries_interval)
+
+ return primitive_result, detailed_result
+ except (N2VCPrimitiveExecutionFailed, LcmException) as e:
+ return "FAILED", str(e)
async def action(self, nsr_id, nslcmop_id):
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
self.logger.debug(logging_text + "Enter")
# get all needed from database
db_nsr_update = {"_admin.nslcmop": nslcmop_id}
db_nslcmop_update = {}
nslcmop_operation_state = None
+ nslcmop_operation_state_detail = None
exc = None
try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
step = "Getting information from database"
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
nsr_deployed = db_nsr["_admin"].get("deployed")
- nsr_name = db_nsr["name"]
- vnf_index = db_nslcmop["operationParams"]["member_vnf_index"]
+ vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
vdu_id = db_nslcmop["operationParams"].get("vdu_id")
vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
vdu_name = db_nslcmop["operationParams"].get("vdu_name")
- # look if previous tasks in process
- task_name, task_dependency = self.lcm_tasks.lookfor_related("ns", nsr_id, nslcmop_id)
- if task_dependency:
- step = db_nslcmop_update["detailed-status"] = \
- "Waiting for related tasks to be completed: {}".format(task_name)
- self.logger.debug(logging_text + step)
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- _, pending = await asyncio.wait(task_dependency, timeout=3600)
- if pending:
- raise LcmException("Timeout waiting related tasks to be completed")
+ if vnf_index:
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
+ step = "Getting vnfd from database"
+ db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
+ else:
+ if db_nsr.get("nsd"):
+ db_nsd = db_nsr.get("nsd") # TODO this will be removed
+ else:
+ step = "Getting nsd from database"
+ db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
# for backward compatibility
if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
self.update_db_2("nsrs", nsr_id, db_nsr_update)
- # TODO check if ns is in a proper status
primitive = db_nslcmop["operationParams"]["primitive"]
primitive_params = db_nslcmop["operationParams"]["primitive_params"]
- result, result_detail = await self._ns_execute_primitive(nsr_deployed, nsr_name, vnf_index, vdu_id,
- vdu_name, vdu_count_index, primitive,
- primitive_params)
- db_nslcmop_update["detailed-status"] = result_detail
+
+ # look for primitive
+ config_primitive_desc = None
+ if vdu_id:
+ for vdu in get_iterable(db_vnfd, "vdu"):
+ if vdu_id == vdu["id"]:
+ for config_primitive in vdu.get("vdu-configuration", {}).get("config-primitive", ()):
+ if config_primitive["name"] == primitive:
+ config_primitive_desc = config_primitive
+ break
+ elif vnf_index:
+ for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
+ if config_primitive["name"] == primitive:
+ config_primitive_desc = config_primitive
+ break
+ else:
+ for config_primitive in db_nsd.get("ns-configuration", {}).get("config-primitive", ()):
+ if config_primitive["name"] == primitive:
+ config_primitive_desc = config_primitive
+ break
+
+ if not config_primitive_desc:
+ raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
+ format(primitive))
+
+ desc_params = {}
+ if vnf_index:
+ if db_vnfr.get("additionalParamsForVnf"):
+ desc_params.update(db_vnfr["additionalParamsForVnf"])
+ else:
+ if db_nsr.get("additionalParamsForVnf"):
+ desc_params.update(db_nsr["additionalParamsForNs"])
+
+ # TODO check if ns is in a proper status
+ result, result_detail = await self._ns_execute_primitive(
+ nsr_deployed, vnf_index, vdu_id, vdu_name, vdu_count_index, primitive,
+ self._map_primitive_params(config_primitive_desc, primitive_params, desc_params))
+ db_nslcmop_update["detailed-status"] = nslcmop_operation_state_detail = result_detail
db_nslcmop_update["operationState"] = nslcmop_operation_state = result
db_nslcmop_update["statusEnteredTime"] = time()
self.logger.debug(logging_text + " task Done with result {} {}".format(result, result_detail))
self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
finally:
if exc and db_nslcmop:
- db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
+ db_nslcmop_update["detailed-status"] = nslcmop_operation_state_detail = \
+ "FAILED {}: {}".format(step, exc)
db_nslcmop_update["operationState"] = nslcmop_operation_state = "FAILED"
db_nslcmop_update["statusEnteredTime"] = time()
- if db_nslcmop_update:
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- if db_nsr:
- db_nsr_update["_admin.nslcmop"] = None
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ try:
+ if db_nslcmop_update:
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ if db_nsr:
+ db_nsr_update["_admin.nslcmop"] = None
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ except DbException as e:
+ self.logger.error(logging_text + "Cannot update database: {}".format(e))
self.logger.debug(logging_text + "Exit")
if nslcmop_operation_state:
try:
await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
- "operationState": nslcmop_operation_state})
+ "operationState": nslcmop_operation_state},
+ loop=self.loop)
except Exception as e:
self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
+ return nslcmop_operation_state, nslcmop_operation_state_detail
async def scale(self, nsr_id, nslcmop_id):
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
self.logger.debug(logging_text + "Enter")
# get all needed from database
old_config_status = ""
vnfr_scaled = False
try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
step = "Getting nslcmop from database"
+ self.logger.debug(step + " after having waited for previous tasks to be completed")
db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
step = "Getting nsr from database"
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
- nsr_name = db_nsr["name"]
+
old_operational_status = db_nsr["operational-status"]
old_config_status = db_nsr["config-status"]
-
- # look if previous tasks in process
- task_name, task_dependency = self.lcm_tasks.lookfor_related("ns", nsr_id, nslcmop_id)
- if task_dependency:
- step = db_nslcmop_update["detailed-status"] = \
- "Waiting for related tasks to be completed: {}".format(task_name)
- self.logger.debug(logging_text + step)
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- _, pending = await asyncio.wait(task_dependency, timeout=3600)
- if pending:
- raise LcmException("Timeout waiting related tasks to be completed")
-
step = "Parsing scaling parameters"
+ # self.logger.debug(step)
db_nsr_update["operational-status"] = "scaling"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
nsr_deployed = db_nsr["_admin"].get("deployed")
db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
step = "Getting vnfd from database"
db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
+
step = "Getting scaling-group-descriptor"
for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
if scaling_descriptor["name"] == scaling_group:
else:
raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
"at vnfd:scaling-group-descriptor".format(scaling_group))
+
# cooldown_time = 0
# for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
# cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
# break
# TODO check if ns is in a proper status
- step = "Sending scale order to RO"
+ step = "Sending scale order to VIM"
nb_scale_op = 0
if not db_nsr["_admin"].get("scaling-group"):
self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
if scaling_type == "SCALE_OUT":
# count if max-instance-count is reached
- if "max-instance-count" in scaling_descriptor and scaling_descriptor["max-instance-count"] is not None:
- max_instance_count = int(scaling_descriptor["max-instance-count"])
- if nb_scale_op >= max_instance_count:
- raise LcmException("reached the limit of {} (max-instance-count) scaling-out operations for the"
- " scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
- nb_scale_op = nb_scale_op + 1
+ max_instance_count = scaling_descriptor.get("max-instance-count", 10)
+ # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
+ if nb_scale_op >= max_instance_count:
+ raise LcmException("reached the limit of {} (max-instance-count) "
+ "scaling-out operations for the "
+ "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
+
+ nb_scale_op += 1
vdu_scaling_info["scaling_direction"] = "OUT"
vdu_scaling_info["vdu-create"] = {}
for vdu_scale_info in scaling_descriptor["vdu"]:
RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
"type": "create", "count": vdu_scale_info.get("count", 1)})
vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
+
elif scaling_type == "SCALE_IN":
# count if min-instance-count is reached
min_instance_count = 0
if nb_scale_op <= min_instance_count:
raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
"scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
- nb_scale_op = nb_scale_op - 1
+ nb_scale_op -= 1
vdu_scaling_info["scaling_direction"] = "IN"
vdu_scaling_info["vdu-delete"] = {}
for vdu_scale_info in scaling_descriptor["vdu"]:
})
vdu_delete = vdu_scaling_info.pop("vdu-delete")
- # execute primitive service PRE-SCALING
+ # PRE-SCALE BEGIN
step = "Executing pre-scale vnf-config-primitive"
if scaling_descriptor.get("scaling-config-action"):
for scaling_config_action in scaling_descriptor["scaling-config-action"]:
- if scaling_config_action.get("trigger") and scaling_config_action["trigger"] == "pre-scale-in" \
- and scaling_type == "SCALE_IN":
+ if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
+ or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
step = db_nslcmop_update["detailed-status"] = \
"executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
+
# look for primitive
- primitive_params = {}
for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
if config_primitive["name"] == vnf_config_primitive:
- for parameter in config_primitive.get("parameter", ()):
- if 'default-value' in parameter and \
- parameter['default-value'] == "<VDU_SCALE_INFO>":
- primitive_params[parameter["name"]] = yaml.safe_dump(vdu_scaling_info,
- default_flow_style=True,
- width=256)
break
else:
raise LcmException(
"Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
- "[vnf-config-primitive-name-ref='{}'] does not match any vnf-cnfiguration:config-"
+ "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
"primitive".format(scaling_group, config_primitive))
+
+ vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
+ if db_vnfr.get("additionalParamsForVnf"):
+ vnfr_params.update(db_vnfr["additionalParamsForVnf"])
scale_process = "VCA"
db_nsr_update["config-status"] = "configuring pre-scaling"
- result, result_detail = await self._ns_execute_primitive(nsr_deployed, nsr_name, vnf_index,
- None, None, None, vnf_config_primitive,
- primitive_params)
- self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
- vnf_config_primitive, result, result_detail))
+ primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
+
+ # Pre-scale reintent check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
+ if (op_index == self.SUBOPERATION_STATUS_SKIP):
+ # Skip sub-operation
+ result = 'COMPLETED'
+ result_detail = 'Done'
+ self.logger.debug(logging_text +
+ "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
+ vnf_config_primitive, result, result_detail))
+ else:
+ if (op_index == self.SUBOPERATION_STATUS_NEW):
+ # New sub-operation: Get index of this sub-operation
+ op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
+ self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
+ format(vnf_config_primitive))
+ else:
+ # Reintent: Get registered params for this existing sub-operation
+ op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
+ vnf_index = op.get('member_vnf_index')
+ vnf_config_primitive = op.get('primitive')
+ primitive_params = op.get('primitive_params')
+ self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation reintent".
+ format(vnf_config_primitive))
+ # Execute the primitive, either with new (first-time) or registered (reintent) args
+ result, result_detail = await self._ns_execute_primitive(
+ nsr_deployed, vnf_index, None, None, None, vnf_config_primitive, primitive_params)
+ self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
+ vnf_config_primitive, result, result_detail))
+ # Update operationState = COMPLETED | FAILED
+ self._update_suboperation_status(
+ db_nslcmop, op_index, result, result_detail)
+
if result == "FAILED":
raise LcmException(result_detail)
db_nsr_update["config-status"] = old_config_status
scale_process = None
+ # PRE-SCALE END
+ # SCALE RO - BEGIN
+ # Should this block be skipped if 'RO_nsr_id' == None ?
+ # if (RO_nsr_id and RO_scaling_info):
if RO_scaling_info:
scale_process = "RO"
- RO = ROclient.ROClient(self.loop, **self.ro_config)
- RO_desc = await RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
- db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
- db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
- # wait until ready
- RO_nslcmop_id = RO_desc["instance_action_id"]
- db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
-
- RO_task_done = False
- step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
- detailed_status_old = None
- self.logger.debug(logging_text + step)
-
- deployment_timeout = 1 * 3600 # One hour
- while deployment_timeout > 0:
- if not RO_task_done:
- desc = await RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
- extra_item_id=RO_nslcmop_id)
- ns_status, ns_status_info = RO.check_action_status(desc)
- if ns_status == "ERROR":
- raise ROclient.ROClientException(ns_status_info)
- elif ns_status == "BUILD":
- detailed_status = step + "; {}".format(ns_status_info)
- elif ns_status == "ACTIVE":
- RO_task_done = True
- step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
- self.logger.debug(logging_text + step)
- else:
- assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
+ # Scale RO reintent check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
+ if (op_index == self.SUBOPERATION_STATUS_SKIP):
+ # Skip sub-operation
+ result = 'COMPLETED'
+ result_detail = 'Done'
+ self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
+ result, result_detail))
+ else:
+ if (op_index == self.SUBOPERATION_STATUS_NEW):
+ # New sub-operation: Get index of this sub-operation
+ op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
+ self.logger.debug(logging_text + "New sub-operation RO")
else:
- desc = await RO.show("ns", RO_nsr_id)
- ns_status, ns_status_info = RO.check_ns_status(desc)
- if ns_status == "ERROR":
- raise ROclient.ROClientException(ns_status_info)
- elif ns_status == "BUILD":
- detailed_status = step + "; {}".format(ns_status_info)
- elif ns_status == "ACTIVE":
- step = detailed_status = \
- "Waiting for management IP address reported by the VIM. Updating VNFRs"
- if not vnfr_scaled:
- self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
- vnfr_scaled = True
- try:
- desc = await RO.show("ns", RO_nsr_id)
- # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
- self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
- break
- except LcmExceptionNoMgmtIP:
- pass
+ # Reintent: Get registered params for this existing sub-operation
+ op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
+ RO_nsr_id = op.get('RO_nsr_id')
+ RO_scaling_info = op.get('RO_scaling_info')
+ self.logger.debug(logging_text + "Sub-operation RO reintent".format(
+ vnf_config_primitive))
+
+ RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
+ db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
+ db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
+ # wait until ready
+ RO_nslcmop_id = RO_desc["instance_action_id"]
+ db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
+
+ RO_task_done = False
+ step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
+ detailed_status_old = None
+ self.logger.debug(logging_text + step)
+
+ deployment_timeout = 1 * 3600 # One hour
+ while deployment_timeout > 0:
+ if not RO_task_done:
+ desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
+ extra_item_id=RO_nslcmop_id)
+ ns_status, ns_status_info = self.RO.check_action_status(desc)
+ if ns_status == "ERROR":
+ raise ROclient.ROClientException(ns_status_info)
+ elif ns_status == "BUILD":
+ detailed_status = step + "; {}".format(ns_status_info)
+ elif ns_status == "ACTIVE":
+ RO_task_done = True
+ step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
+ self.logger.debug(logging_text + step)
+ else:
+ assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
else:
- assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
- if detailed_status != detailed_status_old:
- detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ if ns_status == "ERROR":
+ raise ROclient.ROClientException(ns_status_info)
+ elif ns_status == "BUILD":
+ detailed_status = step + "; {}".format(ns_status_info)
+ elif ns_status == "ACTIVE":
+ step = detailed_status = \
+ "Waiting for management IP address reported by the VIM. Updating VNFRs"
+ if not vnfr_scaled:
+ self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
+ vnfr_scaled = True
+ try:
+ desc = await self.RO.show("ns", RO_nsr_id)
+ # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
+ self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
+ break
+ except LcmExceptionNoMgmtIP:
+ pass
+ else:
+ assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
+ if detailed_status != detailed_status_old:
+ self._update_suboperation_status(
+ db_nslcmop, op_index, 'COMPLETED', detailed_status)
+ detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- await asyncio.sleep(5, loop=self.loop)
- deployment_timeout -= 5
- if deployment_timeout <= 0:
- raise ROclient.ROClientException("Timeout waiting ns to be ready")
-
- # update VDU_SCALING_INFO with the obtained ip_addresses
- if vdu_scaling_info["scaling_direction"] == "OUT":
- for vdur in reversed(db_vnfr["vdur"]):
- if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
- vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
- vdu_scaling_info["vdu"].append({
- "name": vdur["name"],
- "vdu_id": vdur["vdu-id-ref"],
- "interface": []
- })
- for interface in vdur["interfaces"]:
- vdu_scaling_info["vdu"][-1]["interface"].append({
- "name": interface["name"],
- "ip_address": interface["ip-address"],
- "mac_address": interface.get("mac-address"),
+ await asyncio.sleep(5, loop=self.loop)
+ deployment_timeout -= 5
+ if deployment_timeout <= 0:
+ self._update_suboperation_status(
+ db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
+ raise ROclient.ROClientException("Timeout waiting ns to be ready")
+
+ # update VDU_SCALING_INFO with the obtained ip_addresses
+ if vdu_scaling_info["scaling_direction"] == "OUT":
+ for vdur in reversed(db_vnfr["vdur"]):
+ if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
+ vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
+ vdu_scaling_info["vdu"].append({
+ "name": vdur["name"],
+ "vdu_id": vdur["vdu-id-ref"],
+ "interface": []
})
- del vdu_scaling_info["vdu-create"]
+ for interface in vdur["interfaces"]:
+ vdu_scaling_info["vdu"][-1]["interface"].append({
+ "name": interface["name"],
+ "ip_address": interface["ip-address"],
+ "mac_address": interface.get("mac-address"),
+ })
+ del vdu_scaling_info["vdu-create"]
+
+ self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
+ # SCALE RO - END
scale_process = None
if db_nsr_update:
self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ # POST-SCALE BEGIN
# execute primitive service POST-SCALING
step = "Executing post-scale vnf-config-primitive"
if scaling_descriptor.get("scaling-config-action"):
for scaling_config_action in scaling_descriptor["scaling-config-action"]:
- if scaling_config_action.get("trigger") and scaling_config_action["trigger"] == "post-scale-out" \
- and scaling_type == "SCALE_OUT":
+ if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
+ or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
step = db_nslcmop_update["detailed-status"] = \
"executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
+
+ vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
+ if db_vnfr.get("additionalParamsForVnf"):
+ vnfr_params.update(db_vnfr["additionalParamsForVnf"])
+
# look for primitive
- primitive_params = {}
for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
if config_primitive["name"] == vnf_config_primitive:
- for parameter in config_primitive.get("parameter", ()):
- if 'default-value' in parameter and \
- parameter['default-value'] == "<VDU_SCALE_INFO>":
- primitive_params[parameter["name"]] = yaml.safe_dump(vdu_scaling_info,
- default_flow_style=True,
- width=256)
break
else:
raise LcmException("Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:"
config_primitive))
scale_process = "VCA"
db_nsr_update["config-status"] = "configuring post-scaling"
+ primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
+
+ # Post-scale reintent check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
+ if (op_index == self.SUBOPERATION_STATUS_SKIP):
+ # Skip sub-operation
+ result = 'COMPLETED'
+ result_detail = 'Done'
+ self.logger.debug(logging_text +
+ "vnf_config_primitive={} Skipped sub-operation, result {} {}".
+ format(vnf_config_primitive, result, result_detail))
+ else:
+ if (op_index == self.SUBOPERATION_STATUS_NEW):
+ # New sub-operation: Get index of this sub-operation
+ op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
+ self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
+ format(vnf_config_primitive))
+ else:
+ # Reintent: Get registered params for this existing sub-operation
+ op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
+ vnf_index = op.get('member_vnf_index')
+ vnf_config_primitive = op.get('primitive')
+ primitive_params = op.get('primitive_params')
+ self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation reintent".
+ format(vnf_config_primitive))
+ # Execute the primitive, either with new (first-time) or registered (reintent) args
+ result, result_detail = await self._ns_execute_primitive(
+ nsr_deployed, vnf_index, None, None, None, vnf_config_primitive, primitive_params)
+ self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
+ vnf_config_primitive, result, result_detail))
+ # Update operationState = COMPLETED | FAILED
+ self._update_suboperation_status(
+ db_nslcmop, op_index, result, result_detail)
- result, result_detail = await self._ns_execute_primitive(nsr_deployed, nsr_name, vnf_index,
- None, None, None, vnf_config_primitive,
- primitive_params)
- self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
- vnf_config_primitive, result, result_detail))
if result == "FAILED":
raise LcmException(result_detail)
db_nsr_update["config-status"] = old_config_status
scale_process = None
+ # POST-SCALE END
db_nslcmop_update["operationState"] = nslcmop_operation_state = "COMPLETED"
db_nslcmop_update["statusEnteredTime"] = time()
db_nslcmop_update["detailed-status"] = "done"
db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
- db_nsr_update["operational-status"] = old_operational_status
+ db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
+ else old_operational_status
db_nsr_update["config-status"] = old_config_status
return
except (ROclient.ROClientException, DbException, LcmException) as e:
db_nsr_update["operational-status"] = "failed"
db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
exc)
- if db_nslcmop_update:
- self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
- if db_nsr:
- db_nsr_update["_admin.nslcmop"] = None
- self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ try:
+ if db_nslcmop and db_nslcmop_update:
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+ if db_nsr:
+ db_nsr_update["_admin.nslcmop"] = None
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ except DbException as e:
+ self.logger.error(logging_text + "Cannot update database: {}".format(e))
if nslcmop_operation_state:
try:
await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
- "operationState": nslcmop_operation_state})
+ "operationState": nslcmop_operation_state},
+ loop=self.loop)
# if cooldown_time:
# await asyncio.sleep(cooldown_time)
# await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})