from osm_lcm.ng_ro import NgRoClient, NgRoException
from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
from osm_lcm.data_utils.nsd import get_vnf_profiles
-from osm_lcm.data_utils.vnfd import get_vnf_configuration, get_vdu_list, get_vdu_profile, \
+from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \
get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \
- get_kdu_list, get_virtual_link_profiles, get_vdu, get_vdu_configuration, get_kdu_configuration, \
- get_vdu_index, get_scaling_aspect, get_number_of_instances
+ get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \
+ get_vdu_index, get_scaling_aspect, get_number_of_instances, get_juju_ee_ref
from osm_lcm.data_utils.list_utils import find_in_list
-from osm_lcm.data_utils.vnfr import get_osm_params
+from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index
from osm_lcm.data_utils.dict_utils import parse_yaml_strings
from osm_lcm.data_utils.database.vim_account import VimAccountDB
from n2vc.k8s_helm_conn import K8sHelmConnector
self.n2vc = N2VCJujuConnector(
log=self.logger,
loop=self.loop,
- url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
- username=self.vca_config.get('user', None),
- vca_config=self.vca_config,
on_update_db=self._on_update_n2vc_db,
fs=self.fs,
db=self.db
self.conn_helm_ee = LCMHelmConn(
log=self.logger,
loop=self.loop,
- url=None,
- username=None,
vca_config=self.vca_config,
on_update_db=self._on_update_n2vc_db
)
juju_command=self.vca_config.get("jujupath"),
log=self.logger,
loop=self.loop,
- on_update_db=None,
- vca_config=self.vca_config,
+ on_update_db=self._on_update_k8s_db,
fs=self.fs,
db=self.db
)
except Exception as e:
self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
- async def _on_update_n2vc_db(self, table, filter, path, updated_data):
+ async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
# remove last dot from path (if exists)
if path.endswith('.'):
# self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
# .format(table, filter, path, updated_data))
-
try:
nsr_id = filter.get('_id')
current_ns_status = nsr.get('nsState')
# get vca status for NS
- status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
+ status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False, vca_id=vca_id)
# vcaStatus
db_dict = dict()
db_dict['vcaStatus'] = status_dict
+ await self.n2vc.update_vca_status(db_dict['vcaStatus'], vca_id=vca_id)
# update configurationStatus for this VCA
try:
except Exception as e:
self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
+ async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None, vca_id=None):
+ """
+ Updating vca status in NSR record
+ :param cluster_uuid: UUID of a k8s cluster
+ :param kdu_instance: The unique name of the KDU instance
+ :param filter: To get nsr_id
+ :return: none
+ """
+
+ # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
+ # .format(cluster_uuid, kdu_instance, filter))
+
+ try:
+ nsr_id = filter.get('_id')
+
+ # get vca status for NS
+ vca_status = await self.k8sclusterjuju.status_kdu(
+ cluster_uuid,
+ kdu_instance,
+ complete_status=True,
+ yaml_format=False,
+ vca_id=vca_id,
+ )
+ # vcaStatus
+ db_dict = dict()
+ db_dict['vcaStatus'] = {nsr_id: vca_status}
+
+ await self.k8sclusterjuju.update_vca_status(
+ db_dict['vcaStatus'],
+ kdu_instance,
+ vca_id=vca_id,
+ )
+
+ # write to database
+ self.update_db_2("nsrs", nsr_id, db_dict)
+
+ except (asyncio.CancelledError, asyncio.TimeoutError):
+ raise
+ except Exception as e:
+ self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
+
@staticmethod
def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
try:
# remove unused by RO configuration, monitoring, scaling and internal keys
vnfd_RO.pop("_id", None)
vnfd_RO.pop("_admin", None)
- vnfd_RO.pop("vnf-configuration", None)
vnfd_RO.pop("monitoring-param", None)
vnfd_RO.pop("scaling-group-descriptor", None)
vnfd_RO.pop("kdu", None)
for param in ("vim-network-name", "vim-network-id"):
if vld_params.get(param):
if isinstance(vld_params[param], dict):
- for vim, vim_net in vld_params[param]:
+ for vim, vim_net in vld_params[param].items():
other_target_vim = "vim:" + vim
populate_dict(target_vld["vim_info"], (other_target_vim, param.replace("-", "_")), vim_net)
else: # isinstance str
}
# check if this network needs SDN assist
if vld.get("pci-interfaces"):
- db_vim = VimAccountDB.get_vim_account_with_id(target_vld["vim_info"][0]["vim_account_id"])
+ db_vim = get_vim_account(ns_params["vimAccountId"])
sdnc_id = db_vim["config"].get("sdn-controller")
if sdnc_id:
- target_vld["vim_info"].append({"sdnc_id": sdnc_id})
+ sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
+ target_sdn = "sdn:{}".format(sdnc_id)
+ target_vld["vim_info"][target_sdn] = {
+ "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
nsd_vnf_profiles = get_vnf_profiles(nsd)
for nsd_vnf_profile in nsd_vnf_profiles:
# check at nsd descriptor, if there is an ip-profile
vld_params = {}
- virtual_link_profiles = get_virtual_link_profiles(nsd)
+ nsd_vlp = find_in_list(
+ get_virtual_link_profiles(nsd),
+ lambda a_link_profile: a_link_profile["virtual-link-desc-id"] == vld["id"])
+ if nsd_vlp and nsd_vlp.get("virtual-link-protocol-data") and \
+ nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"):
+ ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"]["l3-protocol-data"]
+ ip_profile_dest_data = {}
+ if "ip-version" in ip_profile_source_data:
+ ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"]
+ if "cidr" in ip_profile_source_data:
+ ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"]
+ if "gateway-ip" in ip_profile_source_data:
+ ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"]
+ if "dhcp-enabled" in ip_profile_source_data:
+ ip_profile_dest_data["dhcp-params"] = {
+ "enabled": ip_profile_source_data["dhcp-enabled"]
+ }
+ vld_params["ip-profile"] = ip_profile_dest_data
- for vlp in virtual_link_profiles:
- ip_profile = find_in_list(nsd["ip-profiles"],
- lambda profile: profile["name"] == vlp["ip-profile-ref"])
- vld_params["ip-profile"] = ip_profile["ip-profile-params"]
# update vld_params with instantiation params
vld_instantiation_params = find_in_list(get_iterable(ns_params, "vld"),
lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]))
self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
if ssh_keys_all:
- vdu_configuration = get_vdu_configuration(vnfd, vdur["vdu-id-ref"])
- vnf_configuration = get_vnf_configuration(vnfd)
+ vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
+ vnf_configuration = get_configuration(vnfd, vnfd["id"])
if vdu_configuration and vdu_configuration.get("config-access") and \
vdu_configuration.get("config-access").get("ssh-access"):
vdur["ssh-keys"] = ssh_keys_all
ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
if target_vim not in ns_flavor["vim_info"]:
ns_flavor["vim_info"][target_vim] = {}
- # image
- ns_image = target["image"][int(vdur["ns-image-id"])]
+
+ # deal with images
+ # in case alternative images are provided we must check if they should be applied
+ # for the vim_type, modify the vim_type taking into account
+ ns_image_id = int(vdur["ns-image-id"])
+ if vdur.get("alt-image-ids"):
+ db_vim = get_vim_account(vnfr["vim-account-id"])
+ vim_type = db_vim["vim_type"]
+ for alt_image_id in vdur.get("alt-image-ids"):
+ ns_alt_image = target["image"][int(alt_image_id)]
+ if vim_type == ns_alt_image.get("vim-type"):
+ # must use alternative image
+ self.logger.debug("use alternative image id: {}".format(alt_image_id))
+ ns_image_id = alt_image_id
+ vdur["ns-image-id"] = ns_image_id
+ break
+ ns_image = target["image"][int(ns_image_id)]
if target_vim not in ns_image["vim_info"]:
ns_image["vim_info"][target_vim] = {}
raise LcmException("Configuration aborted because dependent charm/s timeout")
+ def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
+ return (
+ deep_get(db_vnfr, ("vca-id",)) or
+ deep_get(db_nsr, ("instantiate_params", "vcaId"))
+ )
+
async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
ee_config_descriptor):
if vnfr_id:
element_type = 'VNF'
element_under_configuration = vnfr_id
- namespace += ".{}".format(vnfr_id)
+ namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
if vdu_id:
namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
element_type = 'VDU'
element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
osm_config["osm"]["vdu_id"] = vdu_id
elif kdu_name:
- namespace += ".{}".format(kdu_name)
+ namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
element_type = 'KDU'
element_under_configuration = kdu_name
osm_config["osm"]["kdu_name"] = kdu_name
# find old ee_id if exists
ee_id = vca_deployed.get("ee_id")
- vim_account_id = (
- deep_get(db_vnfr, ("vim-account-id",)) or
- deep_get(deploy_params, ("OSM", "vim_account_id"))
- )
- vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id)
- vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id)
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
# create or register execution environment in VCA
if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
)
step = "create execution environment"
- self.logger.debug(logging_text + step)
+ self.logger.debug(logging_text + step)
ee_id = None
credentials = None
namespace=namespace,
artifact_path=artifact_path,
db_dict=db_dict,
- cloud_name=vca_k8s_cloud,
- credential_name=vca_k8s_cloud_credential,
+ vca_id=vca_id,
)
- elif vca_type == "helm" or vca_type == "helm-v3":
+ elif vca_type == "helm" or vca_type == "helm-v3":
ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
namespace=namespace,
reuse_ee_id=ee_id,
db_dict=db_dict,
- cloud_name=vca_cloud,
- credential_name=vca_cloud_credential,
+ config=osm_config,
+ artifact_path=artifact_path,
+ vca_type=vca_type
)
- else:
+ else:
ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
namespace=namespace,
reuse_ee_id=ee_id,
db_dict=db_dict,
- cloud_name=vca_cloud,
- credential_name=vca_cloud_credential,
+ vca_id=vca_id,
)
elif vca_type == "native_charm":
credentials=credentials,
namespace=namespace,
db_dict=db_dict,
- cloud_name=vca_cloud,
- credential_name=vca_cloud_credential,
+ vca_id=vca_id,
)
# for compatibility with MON/POL modules, the need model and application name at database
db_dict=db_dict,
config=config,
num_units=num_units,
+ vca_id=vca_id,
)
# write in db flag of configuration_sw already installed
# add relations for this VCA (wait for other peers related with this VCA)
await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
- vca_index=vca_index, vca_type=vca_type)
+ vca_index=vca_index, vca_id=vca_id, vca_type=vca_type)
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
# Needed to inject a ssh key
user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
step = "Install configuration Software, getting public ssh key"
- pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
+ pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
+ ee_id=ee_id,
+ db_dict=db_dict,
+ vca_id=vca_id
+ )
step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
else:
ee_id=ee_id,
primitive_name=initial_config_primitive["name"],
params_dict=primitive_params_,
- db_dict=db_dict
+ db_dict=db_dict,
+ vca_id=vca_id,
)
# Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
if check_if_terminated_needed:
# wait for any previous tasks in process
await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
- stage[1] = "Sync filesystem from database."
- self.fs.sync() # TODO, make use of partial sync, only for the needed packages
-
# STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
stage[1] = "Reading from database."
# nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ self.fs.sync(db_nsr["nsd-id"])
db_nsr["nsd"] = nsd
# nsr_name = db_nsr["name"] # TODO short-name??
db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
vnfd_id = vnfr["vnfd-id"]
vnfd_ref = vnfr["vnfd-ref"]
+ self.fs.sync(vnfd_id)
# if we haven't this vnfd, read it from db
if vnfd_id not in db_vnfds:
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
# store vnfd
- db_vnfds.append(vnfd) # vnfd's indexed by id
+ db_vnfds.append(vnfd)
# Get or generates the _admin.deployed.VCA list
vca_deployed_list = None
if db_vnfr.get("additionalParamsForVnf"):
deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy()))
- descriptor_config = get_vnf_configuration(vnfd)
+ descriptor_config = get_configuration(vnfd, vnfd["id"])
if descriptor_config:
self._deploy_n2vc(
logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
# Deploy charms for each VDU that supports one.
for vdud in get_vdu_list(vnfd):
vdu_id = vdud["id"]
- descriptor_config = get_vdu_configuration(vnfd, vdu_id)
+ descriptor_config = get_configuration(vnfd, vdu_id)
vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id)
if vdur.get("additionalParams"):
)
for kdud in get_kdu_list(vnfd):
kdu_name = kdud["name"]
- descriptor_config = get_kdu_configuration(vnfd, kdu_name)
+ descriptor_config = get_configuration(vnfd, kdu_name)
if descriptor_config:
vdu_id = None
vdu_index = 0
vdu_name = None
# Get additional parameters
- deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
if db_nsr.get("additionalParamsForNs"):
deploy_params.update(parse_yaml_strings(db_nsr["additionalParamsForNs"].copy()))
base_folder = nsd["_admin"]["storage"]
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
- async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
- timeout: int = 3600, vca_type: str = None) -> bool:
+ async def _add_vca_relations(
+ self,
+ logging_text,
+ nsr_id,
+ vca_index: int,
+ timeout: int = 3600,
+ vca_type: str = None,
+ vca_id: str = None,
+ ) -> bool:
# steps:
# 1. find all relations for this VCA
db_vnfd_list = db_nsr.get('vnfd-id')
if db_vnfd_list:
for vnfd in db_vnfd_list:
+ db_vnf_relations = None
db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
- db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
+ db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
+ if db_vnf_configuration:
+ db_vnf_relations = db_vnf_configuration.get("relation", [])
if db_vnf_relations:
for r in db_vnf_relations:
# check if this VCA is in the relation
ee_id_1=from_vca_ee_id,
ee_id_2=to_vca_ee_id,
endpoint_1=from_vca_endpoint,
- endpoint_2=to_vca_endpoint)
+ endpoint_2=to_vca_endpoint,
+ vca_id=vca_id,
+ )
# remove entry from relations list
ns_relations.remove(r)
else:
ee_id_1=from_vca_ee_id,
ee_id_2=to_vca_ee_id,
endpoint_1=from_vca_endpoint,
- endpoint_2=to_vca_endpoint)
+ endpoint_2=to_vca_endpoint,
+ vca_id=vca_id,
+ )
# remove entry from relations list
vnf_relations.remove(r)
else:
return False
async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
- vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
+ vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600,
+ vca_id: str = None):
try:
k8sclustertype = k8s_instance_info["k8scluster-type"]
"filter": {"_id": nsr_id},
"path": nsr_db_path}
- kdu_instance = await self.k8scluster_map[k8sclustertype].install(
+ kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name(
+ db_dict=db_dict_install,
+ kdu_model=k8s_instance_info["kdu-model"],
+ kdu_name=k8s_instance_info["kdu-name"],
+ )
+ self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
+ await self.k8scluster_map[k8sclustertype].install(
cluster_uuid=k8s_instance_info["k8scluster-uuid"],
kdu_model=k8s_instance_info["kdu-model"],
atomic=True,
db_dict=db_dict_install,
timeout=timeout,
kdu_name=k8s_instance_info["kdu-name"],
- namespace=k8s_instance_info["namespace"])
+ namespace=k8s_instance_info["namespace"],
+ kdu_instance=kdu_instance,
+ vca_id=vca_id,
+ )
self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
# Obtain services to obtain management service ip
# Obtain management service info (if exists)
vnfr_update_dict = {}
+ kdu_config = get_configuration(vnfd, kdud["name"])
+ if kdu_config:
+ target_ee_list = kdu_config.get("execution-environment-list", [])
+ else:
+ target_ee_list = []
+
if services:
vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
vnfr_update_dict["ip-address"] = ip
+ if find_in_list(
+ target_ee_list,
+ lambda ee: ee.get("external-connection-point-ref", "") == service_external_cp
+ ):
+ vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
break
else:
self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
- kdu_config = kdud.get("kdu-configuration")
- if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None:
+ kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
+ if kdu_config and kdu_config.get("initial-config-primitive") and \
+ get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None:
initial_config_primitive_list = kdu_config.get("initial-config-primitive")
initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
cluster_uuid=k8s_instance_info["k8scluster-uuid"],
kdu_instance=kdu_instance,
primitive_name=initial_config_primitive["name"],
- params=primitive_params_, db_dict={}),
- timeout=timeout)
+ params=primitive_params_, db_dict=db_dict_install,
+ vca_id=vca_id,
+ ),
+ timeout=timeout
+ )
except Exception as e:
# Prepare update db with error and raise exception
updated_v3_cluster_list = []
for vnfr_data in db_vnfrs.values():
+ vca_id = self.get_vca_id(vnfr_data, {})
for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
# Step 0: Prepare and set parameters
desc_params = parse_yaml_strings(kdur.get("additionalParams"))
vnfd_id = vnfr_data.get('vnfd-id')
- kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"])
+ vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id)
+ kdud = next(kdud for kdud in vnfd_with_id["kdu"] if kdud["name"] == kdur["kdu-name"])
namespace = kdur.get("k8s-namespace")
if kdur.get("helm-chart"):
kdumodel = kdur["helm-chart"]
format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
# check if kdumodel is a file and exists
try:
- storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
+ vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id)
+ storage = deep_get(vnfd_with_id, ('_admin', 'storage'))
if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
# path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
db_path = "_admin.deployed.K8s.{}".format(index)
db_nsr_update[db_path] = k8s_instance_info
self.update_db_2("nsrs", nsr_id, db_nsr_update)
-
+ vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id)
task = asyncio.ensure_future(
- self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, db_vnfds[vnfd_id],
- k8s_instance_info, k8params=desc_params, timeout=600))
+ self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id,
+ k8s_instance_info, k8params=desc_params, timeout=600, vca_id=vca_id))
self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
# fill db_nsr._admin.deployed.VCA.<index>
self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
- if descriptor_config.get("juju"): # There is one execution envioronment of type juju
- ee_list = [descriptor_config]
- elif descriptor_config.get("execution-environment-list"):
- ee_list = descriptor_config.get("execution-environment-list")
+ if "execution-environment-list" in descriptor_config:
+ ee_list = descriptor_config.get("execution-environment-list", [])
else: # other types as script are not supported
ee_list = []
if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
return vca["ee_id"]
- async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
- vca_index, destroy_ee=True, exec_primitives=True):
+ async def destroy_N2VC(
+ self,
+ logging_text,
+ db_nslcmop,
+ vca_deployed,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=True,
+ scaling_in=False,
+ vca_id: str = None,
+ ):
"""
Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
:param logging_text:
:param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
:param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
not executed properly
+ :param scaling_in: True destroys the application, False destroys the model
:return: None or exception
"""
mapped_primitive_params)
# Sub-operations: Call _ns_execute_primitive() instead of action()
try:
- result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
- mapped_primitive_params,
- vca_type=vca_type)
+ result, result_detail = await self._ns_execute_primitive(
+ vca_deployed["ee_id"], primitive,
+ mapped_primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
except LcmException:
# this happens when VCA is not deployed. In this case it is not needed to terminate
continue
await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
if destroy_ee:
- await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
+ await self.vca_map[vca_type].delete_execution_environment(
+ vca_deployed["ee_id"],
+ scaling_in=scaling_in,
+ vca_id=vca_id,
+ )
- async def _delete_all_N2VC(self, db_nsr: dict):
+ async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
namespace = "." + db_nsr["_id"]
try:
- await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
+ await self.n2vc.delete_namespace(
+ namespace=namespace,
+ total_timeout=self.timeout_charm_delete,
+ vca_id=vca_id,
+ )
except N2VCNotFound: # already deleted. Skip
pass
self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
stage[1] = "Getting vnf descriptors from db."
db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+ db_vnfrs_dict = {db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list}
db_vnfds_from_id = {}
db_vnfds_from_member_index = {}
# Loop over VNFRs
for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
config_descriptor = None
+
+ vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr)
if not vca or not vca.get("ee_id"):
continue
if not vca.get("member-vnf-index"):
config_descriptor = db_nsr.get("ns-configuration")
elif vca.get("vdu_id"):
db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
- vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
- if vdud:
- config_descriptor = vdud.get("vdu-configuration")
+ config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
elif vca.get("kdu_name"):
db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
- kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
- if kdud:
- config_descriptor = kdud.get("kdu-configuration")
+ config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
else:
- config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
+ db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
+ config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
vca_type = vca.get("type")
exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
vca.get("needed_terminate"))
# self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
# vca_index, vca.get("ee_id"), vca_type, destroy_ee))
task = asyncio.ensure_future(
- self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
- destroy_ee, exec_terminate_primitives))
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee,
+ exec_terminate_primitives,
+ vca_id=vca_id,
+ )
+ )
tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
# wait for pending tasks of terminate primitives
if nsr_deployed.get("VCA"):
stage[1] = "Deleting all execution environments."
self.logger.debug(logging_text + stage[1])
- task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
- timeout=self.timeout_charm_delete))
+ vca_id = self.get_vca_id({}, db_nsr)
+ task_delete_ee = asyncio.ensure_future(
+ asyncio.wait_for(
+ self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
+ timeout=self.timeout_charm_delete
+ )
+ )
# task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
tasks_dict_info[task_delete_ee] = "Terminating all VCA"
continue
kdu_instance = kdu.get("kdu-instance")
if kdu.get("k8scluster-type") in self.k8scluster_map:
+ # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
+ vca_id = self.get_vca_id({}, db_nsr)
task_delete_kdu_instance = asyncio.ensure_future(
self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
cluster_uuid=kdu.get("k8scluster-uuid"),
- kdu_instance=kdu_instance))
+ kdu_instance=kdu_instance,
+ vca_id=vca_id,
+ )
+ )
else:
self.logger.error(logging_text + "Unknown k8s deployment type {}".
format(kdu.get("k8scluster-type")))
.format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
return ee_id, vca_type
- async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0, retries_interval=30,
- timeout=None, vca_type=None, db_dict=None) -> (str, str):
+ async def _ns_execute_primitive(
+ self,
+ ee_id,
+ primitive,
+ primitive_params,
+ retries=0,
+ retries_interval=30,
+ timeout=None,
+ vca_type=None,
+ db_dict=None,
+ vca_id: str = None,
+ ) -> (str, str):
try:
if primitive == "config":
primitive_params = {"params": primitive_params}
params_dict=primitive_params,
progress_timeout=self.timeout_progress_primitive,
total_timeout=self.timeout_primitive,
- db_dict=db_dict),
+ db_dict=db_dict,
+ vca_id=vca_id,
+ ),
timeout=timeout or self.timeout_primitive)
# execution was OK
break
except Exception as e:
return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
+ async def vca_status_refresh(self, nsr_id, nslcmop_id):
+ """
+ Updating the vca_status with latest juju information in nsrs record
+ :param: nsr_id: Id of the nsr
+ :param: nslcmop_id: Id of the nslcmop
+ :return: None
+ """
+
+ self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ vca_id = self.get_vca_id({}, db_nsr)
+ if db_nsr['_admin']['deployed']['K8s']:
+ for k8s_index, k8s in enumerate(db_nsr['_admin']['deployed']['K8s']):
+ cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
+ await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id}, vca_id=vca_id)
+ else:
+ for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']):
+ table, filter = "nsrs", {"_id": nsr_id}
+ path = "_admin.deployed.VCA.{}.".format(vca_index)
+ await self._on_update_n2vc_db(table, filter, path, {})
+
+ self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
+
async def action(self, nsr_id, nslcmop_id):
# Try to lock HA task here
task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
step = "Getting nsd from database"
db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
# for backward compatibility
if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
# look for primitive
config_primitive_desc = descriptor_configuration = None
if vdu_id:
- descriptor_configuration = get_vdu_configuration(db_vnfd, vdu_id)
+ descriptor_configuration = get_configuration(db_vnfd, vdu_id)
elif kdu_name:
- descriptor_configuration = get_kdu_configuration(db_vnfd, kdu_name)
+ descriptor_configuration = get_configuration(db_vnfd, kdu_name)
elif vnf_index:
- descriptor_configuration = get_vnf_configuration(db_vnfd)
+ descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
else:
descriptor_configuration = db_nsd.get("ns-configuration")
desc_params = parse_yaml_strings(db_vnfr.get("additionalParamsForVnf"))
else:
desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
-
- if kdu_name and get_kdu_configuration(db_vnfd):
- kdu_action = True if not get_kdu_configuration(db_vnfd)["juju"] else False
+ if kdu_name and get_configuration(db_vnfd, kdu_name):
+ kdu_configuration = get_configuration(db_vnfd, kdu_name)
+ actions = set()
+ for primitive in kdu_configuration.get("initial-config-primitive", []):
+ actions.add(primitive["name"])
+ for primitive in kdu_configuration.get("config-primitive", []):
+ actions.add(primitive["name"])
+ kdu_action = True if primitive_name in actions else False
# TODO check if ns is in a proper status
if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
detailed_status = await asyncio.wait_for(
self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
cluster_uuid=kdu.get("k8scluster-uuid"),
- kdu_instance=kdu.get("kdu-instance")),
- timeout=timeout_ns_action)
+ kdu_instance=kdu.get("kdu-instance"),
+ vca_id=vca_id,
+ ),
+ timeout=timeout_ns_action
+ )
else:
kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
kdu_instance=kdu_instance,
primitive_name=primitive_name,
params=params, db_dict=db_dict,
- timeout=timeout_ns_action),
- timeout=timeout_ns_action)
+ timeout=timeout_ns_action,
+ vca_id=vca_id,
+ ),
+ timeout=timeout_ns_action
+ )
if detailed_status:
nslcmop_operation_state = 'COMPLETED'
ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], member_vnf_index=vnf_index,
vdu_id=vdu_id, vdu_count_index=vdu_count_index,
ee_descriptor_id=ee_descriptor_id)
- db_nslcmop_notif = {"collection": "nslcmops",
- "filter": {"_id": nslcmop_id},
- "path": "admin.VCA"}
+ for vca_index, vca_deployed in enumerate(db_nsr['_admin']['deployed']['VCA']):
+ if vca_deployed.get("member-vnf-index") == vnf_index:
+ db_dict = {"collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": "_admin.deployed.VCA.{}.".format(vca_index)}
+ break
nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
ee_id,
primitive=primitive_name,
primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
timeout=timeout_ns_action,
vca_type=vca_type,
- db_dict=db_nslcmop_notif)
+ db_dict=db_dict,
+ vca_id=vca_id,
+ )
db_nslcmop_update["detailed-status"] = detailed_status
error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
stage = ['', '', '']
+ tasks_dict_info = {}
# ^ stage, step, VIM progress
self.logger.debug(logging_text + "Enter")
# get all needed from database
scale_process = None
old_operational_status = ""
old_config_status = ""
+ nsi_id = None
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
step = "Getting vnfr from database"
db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
+
step = "Getting vnfd from database"
db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
+ base_folder = db_vnfd["_admin"]["storage"]
+
step = "Getting scaling-group-descriptor"
scaling_descriptor = find_in_list(
get_scaling_aspect(
admin_scale_index += 1
db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
RO_scaling_info = []
+ VCA_scaling_info = []
vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
if scaling_type == "SCALE_OUT":
if "aspect-delta-details" not in scaling_descriptor:
for delta in deltas:
for vdu_delta in delta["vdu-delta"]:
vdud = get_vdu(db_vnfd, vdu_delta["id"])
- vdu_index = get_vdu_index(db_vnfr, vdu_delta["id"])
+ vdu_index = get_vdur_index(db_vnfr, vdu_delta)
cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
if cloud_init_text:
additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {}
if vdu_profile and "max-number-of-instances" in vdu_profile:
max_instance_count = vdu_profile.get("max-number-of-instances", 10)
- deafult_instance_num = get_number_of_instances(db_vnfd, vdud["id"])
+ default_instance_num = get_number_of_instances(db_vnfd, vdud["id"])
nb_scale_op += vdu_delta.get("number-of-instances", 1)
- if nb_scale_op + deafult_instance_num > max_instance_count:
+ if nb_scale_op + default_instance_num > max_instance_count:
raise LcmException(
"reached the limit of {} (max-instance-count) "
"scaling-out operations for the "
vdud["id"]
)
)
+ VCA_scaling_info.append(
+ {
+ "osm_vdu_id": vdu_delta["id"],
+ "member-vnf-index": vnf_index,
+ "type": "create",
+ "vdu_index": vdu_index + x
+ }
+ )
RO_scaling_info.append(
{
"osm_vdu_id": vdu_delta["id"],
deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
for delta in deltas:
for vdu_delta in delta["vdu-delta"]:
+ vdu_index = get_vdur_index(db_vnfr, vdu_delta)
min_instance_count = 0
vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
if vdu_profile and "min-number-of-instances" in vdu_profile:
min_instance_count = vdu_profile["min-number-of-instances"]
- deafult_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"])
+ default_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"])
nb_scale_op -= vdu_delta.get("number-of-instances", 1)
- if nb_scale_op + deafult_instance_num < min_instance_count:
+ if nb_scale_op + default_instance_num < min_instance_count:
raise LcmException(
"reached the limit of {} (min-instance-count) scaling-in operations for the "
"scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)
)
RO_scaling_info.append({"osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index,
- "type": "delete", "count": vdu_delta.get("number-of-instances", 1)})
+ "type": "delete", "count": vdu_delta.get("number-of-instances", 1),
+ "vdu_index": vdu_index - 1})
+ for x in range(vdu_delta.get("number-of-instances", 1)):
+ VCA_scaling_info.append(
+ {
+ "osm_vdu_id": vdu_delta["id"],
+ "member-vnf-index": vnf_index,
+ "type": "delete",
+ "vdu_index": vdu_index - 1 - x
+ }
+ )
vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1)
# update VDU_SCALING_INFO with the VDUs to delete ip_addresses
"executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
# look for primitive
- for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
+ for config_primitive in (get_configuration(
+ db_vnfd, db_vnfd["id"]
+ ) or {}).get("config-primitive", ()):
if config_primitive["name"] == vnf_config_primitive:
break
else:
vdu_count_index=None,
ee_descriptor_id=ee_descriptor_id)
result, result_detail = await self._ns_execute_primitive(
- ee_id, primitive_name, primitive_params, vca_type)
+ ee_id, primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
vnf_config_primitive, result, result_detail))
# Update operationState = COMPLETED | FAILED
db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
+ # SCALE-IN VCA - BEGIN
+ if VCA_scaling_info:
+ step = db_nslcmop_update["detailed-status"] = \
+ "Deleting the execution environments"
+ scale_process = "VCA"
+ for vdu_info in VCA_scaling_info:
+ if vdu_info["type"] == "delete":
+ member_vnf_index = str(vdu_info["member-vnf-index"])
+ self.logger.debug(logging_text + "vdu info: {}".format(vdu_info))
+ vdu_id = vdu_info["osm_vdu_id"]
+ vdu_index = int(vdu_info["vdu_index"])
+ stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index)
+ stage[2] = step = "Scaling in VCA"
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage=stage
+ )
+ vca_update = db_nsr["_admin"]["deployed"]["VCA"]
+ config_update = db_nsr["configurationStatus"]
+ for vca_index, vca in enumerate(vca_update):
+ if (vca or vca.get("ee_id")) and vca["member-vnf-index"] == member_vnf_index and \
+ vca["vdu_count_index"] == vdu_index:
+ if vca.get("vdu_id"):
+ config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
+ elif vca.get("kdu_name"):
+ config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
+ else:
+ config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
+ operation_params = db_nslcmop.get("operationParams") or {}
+ exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
+ vca.get("needed_terminate"))
+ task = asyncio.ensure_future(
+ asyncio.wait_for(
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=exec_terminate_primitives,
+ scaling_in=True,
+ vca_id=vca_id,
+ ),
+ timeout=self.timeout_charm_delete
+ )
+ )
+ tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
+ del vca_update[vca_index]
+ del config_update[vca_index]
+ # wait for pending tasks of terminate primitives
+ if tasks_dict_info:
+ self.logger.debug(logging_text +
+ 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
+ error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
+ min(self.timeout_charm_delete,
+ self.timeout_ns_terminate),
+ stage, nslcmop_id)
+ tasks_dict_info.clear()
+ if error_list:
+ raise LcmException("; ".join(error_list))
+
+ db_vca_and_config_update = {
+ "_admin.deployed.VCA": vca_update,
+ "configurationStatus": config_update
+ }
+ self.update_db_2("nsrs", db_nsr["_id"], db_vca_and_config_update)
+ scale_process = None
+ # SCALE-IN VCA - END
+
# SCALE RO - BEGIN
if RO_scaling_info:
scale_process = "RO"
scale_process = None
if db_nsr_update:
self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ # SCALE RO - END
+
+ # SCALE-UP VCA - BEGIN
+ if VCA_scaling_info:
+ step = db_nslcmop_update["detailed-status"] = \
+ "Creating new execution environments"
+ scale_process = "VCA"
+ for vdu_info in VCA_scaling_info:
+ if vdu_info["type"] == "create":
+ member_vnf_index = str(vdu_info["member-vnf-index"])
+ self.logger.debug(logging_text + "vdu info: {}".format(vdu_info))
+ vnfd_id = db_vnfr["vnfd-ref"]
+ vdu_index = int(vdu_info["vdu_index"])
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy()))
+ descriptor_config = get_configuration(db_vnfd, db_vnfd["id"])
+ if descriptor_config:
+ vdu_id = None
+ vdu_name = None
+ kdu_name = None
+ self._deploy_n2vc(
+ logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage
+ )
+ vdu_id = vdu_info["osm_vdu_id"]
+ vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id)
+ descriptor_config = get_configuration(db_vnfd, vdu_id)
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
+ else:
+ deploy_params_vdu = deploy_params
+ deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=vdu_index)
+ if descriptor_config:
+ vdu_name = None
+ kdu_name = None
+ stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index)
+ stage[2] = step = "Scaling out VCA"
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage=stage
+ )
+ self._deploy_n2vc(
+ logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage
+ )
+ # SCALE-UP VCA - END
+ scale_process = None
# POST-SCALE BEGIN
# execute primitive service POST-SCALING
vnfr_params.update(db_vnfr["additionalParamsForVnf"])
# look for primitive
- for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
+ for config_primitive in (
+ get_configuration(db_vnfd, db_vnfd["id"]) or {}
+ ).get("config-primitive", ()):
if config_primitive["name"] == vnf_config_primitive:
break
else:
vdu_count_index=None,
ee_descriptor_id=ee_descriptor_id)
result, result_detail = await self._ns_execute_primitive(
- ee_id, primitive_name, primitive_params, vca_type)
+ ee_id,
+ primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
vnf_config_primitive, result, result_detail))
# Update operationState = COMPLETED | FAILED
self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
finally:
self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", current_operation_id=None)
+ if tasks_dict_info:
+ stage[1] = "Waiting for instantiate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ exc = await self._wait_for_tasks(logging_text, tasks_dict_info, self.timeout_ns_deploy,
+ stage, nslcmop_id, nsr_id=nsr_id)
if exc:
db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
nslcmop_operation_state = "FAILED"