##
import asyncio
+from typing import Any, Dict
import yaml
import logging
import logging.handlers
)
from osm_lcm import ROclient
+from osm_lcm.data_utils.nsr import (
+ get_deployed_kdu,
+ get_deployed_vca,
+ get_deployed_vca_list,
+ get_nsd,
+)
+from osm_lcm.data_utils.vca import (
+ DeployedComponent,
+ DeployedK8sResource,
+ DeployedVCA,
+ EELevel,
+ Relation,
+ EERelation,
+ safe_get_ee_relation,
+)
from osm_lcm.ng_ro import NgRoClient, NgRoException
from osm_lcm.lcm_utils import (
LcmException,
get_iterable,
populate_dict,
)
-from osm_lcm.data_utils.nsd import get_vnf_profiles
+from osm_lcm.data_utils.nsd import (
+ get_ns_configuration_relation_list,
+ get_vnf_profile,
+ get_vnf_profiles,
+)
from osm_lcm.data_utils.vnfd import (
+ get_relation_list,
get_vdu_list,
get_vdu_profile,
get_ee_sorted_initial_config_primitive_list,
get_scaling_aspect,
get_number_of_instances,
get_juju_ee_ref,
+ get_kdu_resource_profile,
)
from osm_lcm.data_utils.list_utils import find_in_list
-from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index
+from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
from osm_lcm.data_utils.dict_utils import parse_yaml_strings
from osm_lcm.data_utils.database.vim_account import VimAccountDB
+from n2vc.definitions import RelationEndpoint
from n2vc.k8s_helm_conn import K8sHelmConnector
from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
try:
if vdu.get("cloud-init-file"):
base_folder = vnfd["_admin"]["storage"]
- cloud_init_file = "{}/{}/cloud_init/{}".format(
- base_folder["folder"],
- base_folder["pkg-dir"],
- vdu["cloud-init-file"],
- )
+ if base_folder["pkg-dir"]:
+ cloud_init_file = "{}/{}/cloud_init/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ vdu["cloud-init-file"],
+ )
+ else:
+ cloud_init_file = "{}/Scripts/cloud_init/{}".format(
+ base_folder["folder"],
+ vdu["cloud-init-file"],
+ )
with self.fs.file_open(cloud_init_file, "r") as ci_file:
cloud_init_content = ci_file.read()
elif vdu.get("cloud-init"):
# read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
if vdur["cloud-init"] not in target["cloud_init_content"]:
base_folder = vnfd["_admin"]["storage"]
- cloud_init_file = "{}/{}/cloud_init/{}".format(
- base_folder["folder"],
- base_folder["pkg-dir"],
- vdud.get("cloud-init-file"),
- )
+ if base_folder["pkg-dir"]:
+ cloud_init_file = "{}/{}/cloud_init/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ vdud.get("cloud-init-file"),
+ )
+ else:
+ cloud_init_file = "{}/Scripts/cloud_init/{}".format(
+ base_folder["folder"],
+ vdud.get("cloud-init-file"),
+ )
with self.fs.file_open(cloud_init_file, "r") as ci_file:
target["cloud_init_content"][
vdur["cloud-init"]
raise LcmException("Configuration aborted because dependent charm/s timeout")
def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
- return deep_get(db_vnfr, ("vca-id",)) or deep_get(
- db_nsr, ("instantiate_params", "vcaId")
- )
+ vca_id = None
+ if db_vnfr:
+ vca_id = deep_get(db_vnfr, ("vca-id",))
+ elif db_nsr:
+ vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
+ vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
+ return vca_id
async def instantiate_N2VC(
self,
namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
+ if vca_type == "native_charm":
+ index_number = 0
+ else:
+ index_number = vdu_index or 0
+
if vnfr_id:
element_type = "VNF"
element_under_configuration = vnfr_id
- namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
+ namespace += ".{}-{}".format(vnfr_id, index_number)
if vdu_id:
- namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
+ namespace += ".{}-{}".format(vdu_id, index_number)
element_type = "VDU"
- element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
+ element_under_configuration = "{}-{}".format(vdu_id, index_number)
osm_config["osm"]["vdu_id"] = vdu_id
elif kdu_name:
- namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
+ namespace += ".{}".format(kdu_name)
element_type = "KDU"
element_under_configuration = kdu_name
osm_config["osm"]["kdu_name"] = kdu_name
# Get artifact path
- artifact_path = "{}/{}/{}/{}".format(
- base_folder["folder"],
- base_folder["pkg-dir"],
- "charms"
- if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
- else "helm-charts",
- vca_name,
- )
+ if base_folder["pkg-dir"]:
+ artifact_path = "{}/{}/{}/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ "charms"
+ if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
+ else "helm-charts",
+ vca_name,
+ )
+ else:
+ artifact_path = "{}/Scripts/{}/{}/".format(
+ base_folder["folder"],
+ "charms"
+ if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
+ else "helm-charts",
+ vca_name,
+ )
self.logger.debug("Artifact path > {}".format(artifact_path))
config=config,
num_units=num_units,
vca_id=vca_id,
+ vca_type=vca_type,
)
# write in db flag of configuration_sw already installed
await self._add_vca_relations(
logging_text=logging_text,
nsr_id=nsr_id,
- vca_index=vca_index,
- vca_id=vca_id,
vca_type=vca_type,
+ vca_index=vca_index,
)
# if SSH access is required, then get execution environment SSH public
params_dict=primitive_params_,
db_dict=db_dict,
vca_id=vca_id,
+ vca_type=vca_type,
)
# Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
if check_if_terminated_needed:
deploy_params_vdu["OSM"] = get_osm_params(
db_vnfr, vdu_id, vdu_count_index=0
)
- vdud_count = get_vdu_profile(vnfd, vdu_id).get(
- "max-number-of-instances", 1
- )
+ vdud_count = get_number_of_instances(vnfd, vdu_id)
self.logger.debug("VDUD > {}".format(vdud))
self.logger.debug(
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
+ def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
+ if vnfd_id not in cached_vnfds:
+ cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
+ return cached_vnfds[vnfd_id]
+
+ def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
+ if vnf_profile_id not in cached_vnfrs:
+ cached_vnfrs[vnf_profile_id] = self.db.get_one(
+ "vnfrs",
+ {
+ "member-vnf-index-ref": vnf_profile_id,
+ "nsr-id-ref": nsr_id,
+ },
+ )
+ return cached_vnfrs[vnf_profile_id]
+
+ def _is_deployed_vca_in_relation(
+ self, vca: DeployedVCA, relation: Relation
+ ) -> bool:
+ found = False
+ for endpoint in (relation.provider, relation.requirer):
+ if endpoint["kdu-resource-profile-id"]:
+ continue
+ found = (
+ vca.vnf_profile_id == endpoint.vnf_profile_id
+ and vca.vdu_profile_id == endpoint.vdu_profile_id
+ and vca.execution_environment_ref == endpoint.execution_environment_ref
+ )
+ if found:
+ break
+ return found
+
+ def _update_ee_relation_data_with_implicit_data(
+ self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
+ ):
+ ee_relation_data = safe_get_ee_relation(
+ nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
+ )
+ ee_relation_level = EELevel.get_level(ee_relation_data)
+ if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
+ "execution-environment-ref"
+ ]:
+ vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
+ vnfd_id = vnf_profile["vnfd-id"]
+ db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+ entity_id = (
+ vnfd_id
+ if ee_relation_level == EELevel.VNF
+ else ee_relation_data["vdu-profile-id"]
+ )
+ ee = get_juju_ee_ref(db_vnfd, entity_id)
+ if not ee:
+ raise Exception(
+ f"not execution environments found for ee_relation {ee_relation_data}"
+ )
+ ee_relation_data["execution-environment-ref"] = ee["id"]
+ return ee_relation_data
+
+ def _get_ns_relations(
+ self,
+ nsr_id: str,
+ nsd: Dict[str, Any],
+ vca: DeployedVCA,
+ cached_vnfds: Dict[str, Any],
+ ):
+ relations = []
+ db_ns_relations = get_ns_configuration_relation_list(nsd)
+ for r in db_ns_relations:
+ relation_provider = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, r["provider"], cached_vnfds
+ )
+ relation_requirer = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, r["requirer"], cached_vnfds
+ )
+ provider = EERelation(relation_provider)
+ requirer = EERelation(relation_requirer)
+ relation = Relation(r["name"], provider, requirer)
+ vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
+ if vca_in_relation:
+ relations.append(relation)
+ return relations
+
+ def _get_vnf_relations(
+ self,
+ nsr_id: str,
+ nsd: Dict[str, Any],
+ vca: DeployedVCA,
+ cached_vnfds: Dict[str, Any],
+ ):
+ relations = []
+ vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
+ vnf_profile_id = vnf_profile["id"]
+ vnfd_id = vnf_profile["vnfd-id"]
+ db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+ db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
+ for r in db_vnf_relations:
+ relation_provider = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, r["provider"], cached_vnfds, vnf_profile_id=vnf_profile_id
+ )
+ relation_requirer = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, r["requirer"], cached_vnfds, vnf_profile_id=vnf_profile_id
+ )
+ provider = EERelation(relation_provider)
+ requirer = EERelation(relation_requirer)
+ relation = Relation(r["name"], provider, requirer)
+ vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
+ if vca_in_relation:
+ relations.append(relation)
+ return relations
+
+ def _get_kdu_resource_data(
+ self,
+ ee_relation: EERelation,
+ db_nsr: Dict[str, Any],
+ cached_vnfds: Dict[str, Any],
+ ) -> DeployedK8sResource:
+ nsd = get_nsd(db_nsr)
+ vnf_profiles = get_vnf_profiles(nsd)
+ vnfd_id = find_in_list(
+ vnf_profiles,
+ lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
+ )["vnfd-id"]
+ db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+ kdu_resource_profile = get_kdu_resource_profile(
+ db_vnfd, ee_relation.kdu_resource_profile_id
+ )
+ kdu_name = kdu_resource_profile["kdu-name"]
+ deployed_kdu, _ = get_deployed_kdu(
+ db_nsr.get("_admin", ()).get("deployed", ()),
+ kdu_name,
+ ee_relation.vnf_profile_id,
+ )
+ deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
+ return deployed_kdu
+
+ def _get_deployed_component(
+ self,
+ ee_relation: EERelation,
+ db_nsr: Dict[str, Any],
+ cached_vnfds: Dict[str, Any],
+ ) -> DeployedComponent:
+ nsr_id = db_nsr["_id"]
+ deployed_component = None
+ ee_level = EELevel.get_level(ee_relation)
+ if ee_level == EELevel.NS:
+ vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
+ if vca:
+ deployed_component = DeployedVCA(nsr_id, vca)
+ elif ee_level == EELevel.VNF:
+ vca = get_deployed_vca(
+ db_nsr,
+ {
+ "vdu_id": None,
+ "member-vnf-index": ee_relation.vnf_profile_id,
+ "ee_descriptor_id": ee_relation.execution_environment_ref,
+ },
+ )
+ if vca:
+ deployed_component = DeployedVCA(nsr_id, vca)
+ elif ee_level == EELevel.VDU:
+ vca = get_deployed_vca(
+ db_nsr,
+ {
+ "vdu_id": ee_relation.vdu_profile_id,
+ "member-vnf-index": ee_relation.vnf_profile_id,
+ "ee_descriptor_id": ee_relation.execution_environment_ref,
+ },
+ )
+ if vca:
+ deployed_component = DeployedVCA(nsr_id, vca)
+ elif ee_level == EELevel.KDU:
+ kdu_resource_data = self._get_kdu_resource_data(
+ ee_relation, db_nsr, cached_vnfds
+ )
+ if kdu_resource_data:
+ deployed_component = DeployedK8sResource(kdu_resource_data)
+ return deployed_component
+
+ async def _add_relation(
+ self,
+ relation: Relation,
+ vca_type: str,
+ db_nsr: Dict[str, Any],
+ cached_vnfds: Dict[str, Any],
+ cached_vnfrs: Dict[str, Any],
+ ) -> bool:
+ deployed_provider = self._get_deployed_component(
+ relation.provider, db_nsr, cached_vnfds
+ )
+ deployed_requirer = self._get_deployed_component(
+ relation.requirer, db_nsr, cached_vnfds
+ )
+ if (
+ deployed_provider
+ and deployed_requirer
+ and deployed_provider.config_sw_installed
+ and deployed_requirer.config_sw_installed
+ ):
+ provider_db_vnfr = (
+ self._get_vnfr(
+ relation.provider.nsr_id,
+ relation.provider.vnf_profile_id,
+ cached_vnfrs,
+ )
+ if relation.provider.vnf_profile_id
+ else None
+ )
+ requirer_db_vnfr = (
+ self._get_vnfr(
+ relation.requirer.nsr_id,
+ relation.requirer.vnf_profile_id,
+ cached_vnfrs,
+ )
+ if relation.requirer.vnf_profile_id
+ else None
+ )
+ provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
+ requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
+ provider_relation_endpoint = RelationEndpoint(
+ deployed_provider.ee_id,
+ provider_vca_id,
+ relation.provider.endpoint,
+ )
+ requirer_relation_endpoint = RelationEndpoint(
+ deployed_requirer.ee_id,
+ requirer_vca_id,
+ relation.requirer.endpoint,
+ )
+ await self.vca_map[vca_type].add_relation(
+ provider=provider_relation_endpoint,
+ requirer=requirer_relation_endpoint,
+ )
+ # remove entry from relations list
+ return True
+ return False
+
async def _add_vca_relations(
self,
logging_text,
nsr_id,
+ vca_type: str,
vca_index: int,
timeout: int = 3600,
- vca_type: str = None,
- vca_id: str = None,
) -> bool:
# steps:
# 3. add relations
try:
- vca_type = vca_type or "lxc_proxy_charm"
-
# STEP 1: find all relations for this VCA
# read nsr record
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
- nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ nsd = get_nsd(db_nsr)
# this VCA data
- my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
-
- # read all ns-configuration relations
- ns_relations = list()
- db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
- if db_ns_relations:
- for r in db_ns_relations:
- # check if this VCA is in the relation
- if my_vca.get("member-vnf-index") in (
- r.get("entities")[0].get("id"),
- r.get("entities")[1].get("id"),
- ):
- ns_relations.append(r)
-
- # read all vnf-configuration relations
- vnf_relations = list()
- db_vnfd_list = db_nsr.get("vnfd-id")
- if db_vnfd_list:
- for vnfd in db_vnfd_list:
- db_vnf_relations = None
- db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
- db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
- if db_vnf_configuration:
- db_vnf_relations = db_vnf_configuration.get("relation", [])
- if db_vnf_relations:
- for r in db_vnf_relations:
- # check if this VCA is in the relation
- if my_vca.get("vdu_id") in (
- r.get("entities")[0].get("id"),
- r.get("entities")[1].get("id"),
- ):
- vnf_relations.append(r)
+ deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
+ my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
+
+ cached_vnfds = {}
+ cached_vnfrs = {}
+ relations = []
+ relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
+ relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
# if no relations, terminate
- if not ns_relations and not vnf_relations:
+ if not relations:
self.logger.debug(logging_text + " No relations")
return True
- self.logger.debug(
- logging_text
- + " adding relations\n {}\n {}".format(
- ns_relations, vnf_relations
- )
- )
+ self.logger.debug(logging_text + " adding relations {}".format(relations))
# add all relations
start = time()
self.logger.error(logging_text + " : timeout adding relations")
return False
- # reload nsr from database (we need to update record: _admin.deloyed.VCA)
+ # reload nsr from database (we need to update record: _admin.deployed.VCA)
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
- # for each defined NS relation, find the VCA's related
- for r in ns_relations.copy():
- from_vca_ee_id = None
- to_vca_ee_id = None
- from_vca_endpoint = None
- to_vca_endpoint = None
- vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
- for vca in vca_list:
- if vca.get("member-vnf-index") == r.get("entities")[0].get(
- "id"
- ) and vca.get("config_sw_installed"):
- from_vca_ee_id = vca.get("ee_id")
- from_vca_endpoint = r.get("entities")[0].get("endpoint")
- if vca.get("member-vnf-index") == r.get("entities")[1].get(
- "id"
- ) and vca.get("config_sw_installed"):
- to_vca_ee_id = vca.get("ee_id")
- to_vca_endpoint = r.get("entities")[1].get("endpoint")
- if from_vca_ee_id and to_vca_ee_id:
- # add relation
- await self.vca_map[vca_type].add_relation(
- ee_id_1=from_vca_ee_id,
- ee_id_2=to_vca_ee_id,
- endpoint_1=from_vca_endpoint,
- endpoint_2=to_vca_endpoint,
- vca_id=vca_id,
- )
- # remove entry from relations list
- ns_relations.remove(r)
- else:
- # check failed peers
- try:
- vca_status_list = db_nsr.get("configurationStatus")
- if vca_status_list:
- for i in range(len(vca_list)):
- vca = vca_list[i]
- vca_status = vca_status_list[i]
- if vca.get("member-vnf-index") == r.get("entities")[
- 0
- ].get("id"):
- if vca_status.get("status") == "BROKEN":
- # peer broken: remove relation from list
- ns_relations.remove(r)
- if vca.get("member-vnf-index") == r.get("entities")[
- 1
- ].get("id"):
- if vca_status.get("status") == "BROKEN":
- # peer broken: remove relation from list
- ns_relations.remove(r)
- except Exception:
- # ignore
- pass
-
- # for each defined VNF relation, find the VCA's related
- for r in vnf_relations.copy():
- from_vca_ee_id = None
- to_vca_ee_id = None
- from_vca_endpoint = None
- to_vca_endpoint = None
- vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
- for vca in vca_list:
- key_to_check = "vdu_id"
- if vca.get("vdu_id") is None:
- key_to_check = "vnfd_id"
- if vca.get(key_to_check) == r.get("entities")[0].get(
- "id"
- ) and vca.get("config_sw_installed"):
- from_vca_ee_id = vca.get("ee_id")
- from_vca_endpoint = r.get("entities")[0].get("endpoint")
- if vca.get(key_to_check) == r.get("entities")[1].get(
- "id"
- ) and vca.get("config_sw_installed"):
- to_vca_ee_id = vca.get("ee_id")
- to_vca_endpoint = r.get("entities")[1].get("endpoint")
- if from_vca_ee_id and to_vca_ee_id:
- # add relation
- await self.vca_map[vca_type].add_relation(
- ee_id_1=from_vca_ee_id,
- ee_id_2=to_vca_ee_id,
- endpoint_1=from_vca_endpoint,
- endpoint_2=to_vca_endpoint,
- vca_id=vca_id,
- )
- # remove entry from relations list
- vnf_relations.remove(r)
- else:
- # check failed peers
- try:
- vca_status_list = db_nsr.get("configurationStatus")
- if vca_status_list:
- for i in range(len(vca_list)):
- vca = vca_list[i]
- vca_status = vca_status_list[i]
- if vca.get("vdu_id") == r.get("entities")[0].get(
- "id"
- ):
- if vca_status.get("status") == "BROKEN":
- # peer broken: remove relation from list
- vnf_relations.remove(r)
- if vca.get("vdu_id") == r.get("entities")[1].get(
- "id"
- ):
- if vca_status.get("status") == "BROKEN":
- # peer broken: remove relation from list
- vnf_relations.remove(r)
- except Exception:
- # ignore
- pass
-
- # wait for next try
- await asyncio.sleep(5.0)
+ # for each relation, find the VCA's related
+ for relation in relations.copy():
+ added = await self._add_relation(
+ relation,
+ vca_type,
+ db_nsr,
+ cached_vnfds,
+ cached_vnfrs,
+ )
+ if added:
+ relations.remove(relation)
- if not ns_relations and not vnf_relations:
+ if not relations:
self.logger.debug("Relations added")
break
+ await asyncio.sleep(5.0)
return True
"path": nsr_db_path,
}
- kdu_instance = self.k8scluster_map[
- k8sclustertype
- ].generate_kdu_instance_name(
- db_dict=db_dict_install,
- kdu_model=k8s_instance_info["kdu-model"],
- kdu_name=k8s_instance_info["kdu-name"],
- )
+ if k8s_instance_info.get("kdu-deployment-name"):
+ kdu_instance = k8s_instance_info.get("kdu-deployment-name")
+ else:
+ kdu_instance = self.k8scluster_map[
+ k8sclustertype
+ ].generate_kdu_instance_name(
+ db_dict=db_dict_install,
+ kdu_model=k8s_instance_info["kdu-model"],
+ kdu_name=k8s_instance_info["kdu-name"],
+ )
self.update_db_2(
"nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
)
if kdud["name"] == kdur["kdu-name"]
)
namespace = kdur.get("k8s-namespace")
+ kdu_deployment_name = kdur.get("kdu-deployment-name")
if kdur.get("helm-chart"):
kdumodel = kdur["helm-chart"]
# Default version: helm3, if helm-version is v2 assign v2
db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
)
storage = deep_get(vnfd_with_id, ("_admin", "storage"))
- if storage and storage.get(
- "pkg-dir"
- ): # may be not present if vnfd has not artifacts
+ if storage: # may be not present if vnfd has not artifacts
# path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
- filename = "{}/{}/{}s/{}".format(
- storage["folder"],
- storage["pkg-dir"],
- k8sclustertype,
- kdumodel,
- )
+ if storage["pkg-dir"]:
+ filename = "{}/{}/{}s/{}".format(
+ storage["folder"],
+ storage["pkg-dir"],
+ k8sclustertype,
+ kdumodel,
+ )
+ else:
+ filename = "{}/Scripts/{}s/{}".format(
+ storage["folder"],
+ k8sclustertype,
+ kdumodel,
+ )
if self.fs.file_exists(
filename, mode="file"
) or self.fs.file_exists(filename, mode="dir"):
"kdu-name": kdur["kdu-name"],
"kdu-model": kdumodel,
"namespace": namespace,
+ "kdu-deployment-name": kdu_deployment_name,
}
db_path = "_admin.deployed.K8s.{}".format(index)
db_nsr_update[db_path] = k8s_instance_info
)
if "execution-environment-list" in descriptor_config:
ee_list = descriptor_config.get("execution-environment-list", [])
+ elif "juju" in descriptor_config:
+ ee_list = [descriptor_config] # ns charms
else: # other types as script are not supported
ee_list = []
await self.vca_map[vca_type].delete_execution_environment(
vca_deployed["ee_id"],
scaling_in=scaling_in,
+ vca_type=vca_type,
vca_id=vca_id,
)
for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
config_descriptor = None
-
- vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr)
+ vca_member_vnf_index = vca.get("member-vnf-index")
+ vca_id = self.get_vca_id(
+ db_vnfrs_dict.get(vca_member_vnf_index)
+ if vca_member_vnf_index
+ else None,
+ db_nsr,
+ )
if not vca or not vca.get("ee_id"):
continue
if not vca.get("member-vnf-index"):
total_timeout=self.timeout_primitive,
db_dict=db_dict,
vca_id=vca_id,
+ vca_type=vca_type,
),
timeout=timeout or self.timeout_primitive,
)
)
step = "Getting vnfd from database"
db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
+
+ # Sync filesystem before running a primitive
+ self.fs.sync(db_vnfr["vnfd-id"])
else:
step = "Getting nsd from database"
db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
self.update_db_2("nsrs", nsr_id, db_nsr_update)
nsr_deployed = db_nsr["_admin"].get("deployed")
- #######
- nsr_deployed = db_nsr["_admin"].get("deployed")
- vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
- # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
- # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
- # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
- #######
-
vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
"scaleByStepData"
]["member-vnf-index"]
db_nsr_update[
"_admin.scaling-group.{}.name".format(admin_scale_index)
] = scaling_group
- RO_scaling_info = []
- VCA_scaling_info = []
- vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
+
+ vca_scaling_info = []
+ scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
if scaling_type == "SCALE_OUT":
if "aspect-delta-details" not in scaling_descriptor:
raise LcmException(
# count if max-instance-count is reached
deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
- vdu_scaling_info["scaling_direction"] = "OUT"
- vdu_scaling_info["vdu-create"] = {}
+ scaling_info["scaling_direction"] = "OUT"
+ scaling_info["vdu-create"] = {}
+ scaling_info["kdu-create"] = {}
for delta in deltas:
- for vdu_delta in delta["vdu-delta"]:
+ for vdu_delta in delta.get("vdu-delta", {}):
vdud = get_vdu(db_vnfd, vdu_delta["id"])
- vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ # vdu_index also provides the number of instance of the targeted vdu
+ vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
cloud_init_text = self._get_vdu_cloud_init_content(
vdud, db_vnfd
)
default_instance_num = get_number_of_instances(
db_vnfd, vdud["id"]
)
+ instances_number = vdu_delta.get("number-of-instances", 1)
+ nb_scale_op += instances_number
+
+ new_instance_count = nb_scale_op + default_instance_num
+ # Control if new count is over max and vdu count is less than max.
+ # Then assign new instance count
+ if new_instance_count > max_instance_count > vdu_count:
+ instances_number = new_instance_count - max_instance_count
+ else:
+ instances_number = instances_number
- nb_scale_op += vdu_delta.get("number-of-instances", 1)
-
- if nb_scale_op + default_instance_num > max_instance_count:
+ if new_instance_count > max_instance_count:
raise LcmException(
"reached the limit of {} (max-instance-count) "
"scaling-out operations for the "
vdud["id"],
)
)
- VCA_scaling_info.append(
+ vca_scaling_info.append(
{
"osm_vdu_id": vdu_delta["id"],
"member-vnf-index": vnf_index,
"vdu_index": vdu_index + x,
}
)
- RO_scaling_info.append(
+ scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
+ for kdu_delta in delta.get("kdu-resource-delta", {}):
+ kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
+ kdu_name = kdu_profile["kdu-name"]
+ resource_name = kdu_profile["resource-name"]
+
+ # Might have different kdus in the same delta
+ # Should have list for each kdu
+ if not scaling_info["kdu-create"].get(kdu_name, None):
+ scaling_info["kdu-create"][kdu_name] = []
+
+ kdur = get_kdur(db_vnfr, kdu_name)
+ if kdur.get("helm-chart"):
+ k8s_cluster_type = "helm-chart-v3"
+ self.logger.debug("kdur: {}".format(kdur))
+ if (
+ kdur.get("helm-version")
+ and kdur.get("helm-version") == "v2"
+ ):
+ k8s_cluster_type = "helm-chart"
+ raise NotImplementedError
+ elif kdur.get("juju-bundle"):
+ k8s_cluster_type = "juju-bundle"
+ else:
+ raise LcmException(
+ "kdu type for kdu='{}.{}' is neither helm-chart nor "
+ "juju-bundle. Maybe an old NBI version is running".format(
+ db_vnfr["member-vnf-index-ref"], kdu_name
+ )
+ )
+
+ max_instance_count = 10
+ if kdu_profile and "max-number-of-instances" in kdu_profile:
+ max_instance_count = kdu_profile.get(
+ "max-number-of-instances", 10
+ )
+
+ nb_scale_op += kdu_delta.get("number-of-instances", 1)
+ deployed_kdu, _ = get_deployed_kdu(
+ nsr_deployed, kdu_name, vnf_index
+ )
+ if deployed_kdu is None:
+ raise LcmException(
+ "KDU '{}' for vnf '{}' not deployed".format(
+ kdu_name, vnf_index
+ )
+ )
+ kdu_instance = deployed_kdu.get("kdu-instance")
+ instance_num = await self.k8scluster_map[
+ k8s_cluster_type
+ ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
+ kdu_replica_count = instance_num + kdu_delta.get(
+ "number-of-instances", 1
+ )
+
+ # Control if new count is over max and instance_num is less than max.
+ # Then assign max instance number to kdu replica count
+ if kdu_replica_count > max_instance_count > instance_num:
+ kdu_replica_count = max_instance_count
+ if kdu_replica_count > max_instance_count:
+ raise LcmException(
+ "reached the limit of {} (max-instance-count) "
+ "scaling-out operations for the "
+ "scaling-group-descriptor '{}'".format(
+ instance_num, scaling_group
+ )
+ )
+
+ for x in range(kdu_delta.get("number-of-instances", 1)):
+ vca_scaling_info.append(
+ {
+ "osm_kdu_id": kdu_name,
+ "member-vnf-index": vnf_index,
+ "type": "create",
+ "kdu_index": instance_num + x - 1,
+ }
+ )
+ scaling_info["kdu-create"][kdu_name].append(
{
- "osm_vdu_id": vdu_delta["id"],
"member-vnf-index": vnf_index,
"type": "create",
- "count": vdu_delta.get("number-of-instances", 1),
+ "k8s-cluster-type": k8s_cluster_type,
+ "resource-name": resource_name,
+ "scale": kdu_replica_count,
}
)
- if cloud_init_list:
- RO_scaling_info[-1]["cloud_init"] = cloud_init_list
- vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get(
- "number-of-instances", 1
- )
-
elif scaling_type == "SCALE_IN":
- if (
- "min-instance-count" in scaling_descriptor
- and scaling_descriptor["min-instance-count"] is not None
- ):
- min_instance_count = int(scaling_descriptor["min-instance-count"])
-
- vdu_scaling_info["scaling_direction"] = "IN"
- vdu_scaling_info["vdu-delete"] = {}
deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
+
+ scaling_info["scaling_direction"] = "IN"
+ scaling_info["vdu-delete"] = {}
+ scaling_info["kdu-delete"] = {}
+
for delta in deltas:
- for vdu_delta in delta["vdu-delta"]:
- vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ for vdu_delta in delta.get("vdu-delta", {}):
+ vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
min_instance_count = 0
vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
if vdu_profile and "min-number-of-instances" in vdu_profile:
default_instance_num = get_number_of_instances(
db_vnfd, vdu_delta["id"]
)
+ instance_num = vdu_delta.get("number-of-instances", 1)
+ nb_scale_op -= instance_num
+
+ new_instance_count = nb_scale_op + default_instance_num
- nb_scale_op -= vdu_delta.get("number-of-instances", 1)
- if nb_scale_op + default_instance_num < min_instance_count:
+ if new_instance_count < min_instance_count < vdu_count:
+ instances_number = min_instance_count - new_instance_count
+ else:
+ instances_number = instance_num
+
+ if new_instance_count < min_instance_count:
raise LcmException(
"reached the limit of {} (min-instance-count) scaling-in operations for the "
"scaling-group-descriptor '{}'".format(
nb_scale_op, scaling_group
)
)
- RO_scaling_info.append(
- {
- "osm_vdu_id": vdu_delta["id"],
- "member-vnf-index": vnf_index,
- "type": "delete",
- "count": vdu_delta.get("number-of-instances", 1),
- "vdu_index": vdu_index - 1,
- }
- )
for x in range(vdu_delta.get("number-of-instances", 1)):
- VCA_scaling_info.append(
+ vca_scaling_info.append(
{
"osm_vdu_id": vdu_delta["id"],
"member-vnf-index": vnf_index,
"vdu_index": vdu_index - 1 - x,
}
)
- vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get(
+ scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
+ for kdu_delta in delta.get("kdu-resource-delta", {}):
+ kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
+ kdu_name = kdu_profile["kdu-name"]
+ resource_name = kdu_profile["resource-name"]
+
+ if not scaling_info["kdu-delete"].get(kdu_name, None):
+ scaling_info["kdu-delete"][kdu_name] = []
+
+ kdur = get_kdur(db_vnfr, kdu_name)
+ if kdur.get("helm-chart"):
+ k8s_cluster_type = "helm-chart-v3"
+ self.logger.debug("kdur: {}".format(kdur))
+ if (
+ kdur.get("helm-version")
+ and kdur.get("helm-version") == "v2"
+ ):
+ k8s_cluster_type = "helm-chart"
+ raise NotImplementedError
+ elif kdur.get("juju-bundle"):
+ k8s_cluster_type = "juju-bundle"
+ else:
+ raise LcmException(
+ "kdu type for kdu='{}.{}' is neither helm-chart nor "
+ "juju-bundle. Maybe an old NBI version is running".format(
+ db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
+ )
+ )
+
+ min_instance_count = 0
+ if kdu_profile and "min-number-of-instances" in kdu_profile:
+ min_instance_count = kdu_profile["min-number-of-instances"]
+
+ nb_scale_op -= kdu_delta.get("number-of-instances", 1)
+ deployed_kdu, _ = get_deployed_kdu(
+ nsr_deployed, kdu_name, vnf_index
+ )
+ if deployed_kdu is None:
+ raise LcmException(
+ "KDU '{}' for vnf '{}' not deployed".format(
+ kdu_name, vnf_index
+ )
+ )
+ kdu_instance = deployed_kdu.get("kdu-instance")
+ instance_num = await self.k8scluster_map[
+ k8s_cluster_type
+ ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
+ kdu_replica_count = instance_num - kdu_delta.get(
"number-of-instances", 1
)
+ if kdu_replica_count < min_instance_count < instance_num:
+ kdu_replica_count = min_instance_count
+ if kdu_replica_count < min_instance_count:
+ raise LcmException(
+ "reached the limit of {} (min-instance-count) scaling-in operations for the "
+ "scaling-group-descriptor '{}'".format(
+ instance_num, scaling_group
+ )
+ )
+
+ for x in range(kdu_delta.get("number-of-instances", 1)):
+ vca_scaling_info.append(
+ {
+ "osm_kdu_id": kdu_name,
+ "member-vnf-index": vnf_index,
+ "type": "delete",
+ "kdu_index": instance_num - x - 1,
+ }
+ )
+ scaling_info["kdu-delete"][kdu_name].append(
+ {
+ "member-vnf-index": vnf_index,
+ "type": "delete",
+ "k8s-cluster-type": k8s_cluster_type,
+ "resource-name": resource_name,
+ "scale": kdu_replica_count,
+ }
+ )
+
# update VDU_SCALING_INFO with the VDUs to delete ip_addresses
- vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
- if vdu_scaling_info["scaling_direction"] == "IN":
+ vdu_delete = copy(scaling_info.get("vdu-delete"))
+ if scaling_info["scaling_direction"] == "IN":
for vdur in reversed(db_vnfr["vdur"]):
if vdu_delete.get(vdur["vdu-id-ref"]):
vdu_delete[vdur["vdu-id-ref"]] -= 1
- vdu_scaling_info["vdu"].append(
+ scaling_info["vdu"].append(
{
"name": vdur.get("name") or vdur.get("vdu-name"),
"vdu_id": vdur["vdu-id-ref"],
}
)
for interface in vdur["interfaces"]:
- vdu_scaling_info["vdu"][-1]["interface"].append(
+ scaling_info["vdu"][-1]["interface"].append(
{
"name": interface["name"],
"ip_address": interface["ip-address"],
"primitive".format(scaling_group, vnf_config_primitive)
)
- vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
+ vnfr_params = {"VDU_SCALE_INFO": scaling_info}
if db_vnfr.get("additionalParamsForVnf"):
vnfr_params.update(db_vnfr["additionalParamsForVnf"])
# Pre-scale retry check: Check if this sub-operation has been executed before
op_index = self._check_or_add_scale_suboperation(
db_nslcmop,
- nslcmop_id,
vnf_index,
vnf_config_primitive,
primitive_params,
] = time()
# SCALE-IN VCA - BEGIN
- if VCA_scaling_info:
+ if vca_scaling_info:
step = db_nslcmop_update[
"detailed-status"
] = "Deleting the execution environments"
scale_process = "VCA"
- for vdu_info in VCA_scaling_info:
- if vdu_info["type"] == "delete":
- member_vnf_index = str(vdu_info["member-vnf-index"])
+ for vca_info in vca_scaling_info:
+ if vca_info["type"] == "delete":
+ member_vnf_index = str(vca_info["member-vnf-index"])
self.logger.debug(
- logging_text + "vdu info: {}".format(vdu_info)
- )
- vdu_id = vdu_info["osm_vdu_id"]
- vdu_index = int(vdu_info["vdu_index"])
- stage[
- 1
- ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
- member_vnf_index, vdu_id, vdu_index
+ logging_text + "vdu info: {}".format(vca_info)
)
+ if vca_info.get("osm_vdu_id"):
+ vdu_id = vca_info["osm_vdu_id"]
+ vdu_index = int(vca_info["vdu_index"])
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ )
+ else:
+ vdu_index = 0
+ kdu_id = vca_info["osm_kdu_id"]
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
+ member_vnf_index, kdu_id, vdu_index
+ )
stage[2] = step = "Scaling in VCA"
self._write_op_status(op_id=nslcmop_id, stage=stage)
vca_update = db_nsr["_admin"]["deployed"]["VCA"]
# SCALE-IN VCA - END
# SCALE RO - BEGIN
- if RO_scaling_info:
+ if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
scale_process = "RO"
if self.ro_config.get("ng"):
await self._scale_ng_ro(
- logging_text,
- db_nsr,
- db_nslcmop,
- db_vnfr,
- vdu_scaling_info,
- stage,
+ logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
)
- vdu_scaling_info.pop("vdu-create", None)
- vdu_scaling_info.pop("vdu-delete", None)
+ scaling_info.pop("vdu-create", None)
+ scaling_info.pop("vdu-delete", None)
scale_process = None
+ # SCALE RO - END
+
+ # SCALE KDU - BEGIN
+ if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
+ scale_process = "KDU"
+ await self._scale_kdu(
+ logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
+ )
+ scaling_info.pop("kdu-create", None)
+ scaling_info.pop("kdu-delete", None)
+
+ scale_process = None
+ # SCALE KDU - END
+
if db_nsr_update:
self.update_db_2("nsrs", nsr_id, db_nsr_update)
- # SCALE RO - END
# SCALE-UP VCA - BEGIN
- if VCA_scaling_info:
+ if vca_scaling_info:
step = db_nslcmop_update[
"detailed-status"
] = "Creating new execution environments"
scale_process = "VCA"
- for vdu_info in VCA_scaling_info:
- if vdu_info["type"] == "create":
- member_vnf_index = str(vdu_info["member-vnf-index"])
+ for vca_info in vca_scaling_info:
+ if vca_info["type"] == "create":
+ member_vnf_index = str(vca_info["member-vnf-index"])
self.logger.debug(
- logging_text + "vdu info: {}".format(vdu_info)
+ logging_text + "vdu info: {}".format(vca_info)
)
vnfd_id = db_vnfr["vnfd-ref"]
- vdu_index = int(vdu_info["vdu_index"])
- deploy_params = {"OSM": get_osm_params(db_vnfr)}
- if db_vnfr.get("additionalParamsForVnf"):
- deploy_params.update(
- parse_yaml_strings(
- db_vnfr["additionalParamsForVnf"].copy()
+ if vca_info.get("osm_vdu_id"):
+ vdu_index = int(vca_info["vdu_index"])
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(
+ parse_yaml_strings(
+ db_vnfr["additionalParamsForVnf"].copy()
+ )
)
+ descriptor_config = get_configuration(
+ db_vnfd, db_vnfd["id"]
)
- descriptor_config = get_configuration(db_vnfd, db_vnfd["id"])
- if descriptor_config:
- vdu_id = None
- vdu_name = None
- kdu_name = None
- self._deploy_n2vc(
- logging_text=logging_text
- + "member_vnf_index={} ".format(member_vnf_index),
- db_nsr=db_nsr,
- db_vnfr=db_vnfr,
- nslcmop_id=nslcmop_id,
- nsr_id=nsr_id,
- nsi_id=nsi_id,
- vnfd_id=vnfd_id,
- vdu_id=vdu_id,
- kdu_name=kdu_name,
- member_vnf_index=member_vnf_index,
- vdu_index=vdu_index,
- vdu_name=vdu_name,
- deploy_params=deploy_params,
- descriptor_config=descriptor_config,
- base_folder=base_folder,
- task_instantiation_info=tasks_dict_info,
- stage=stage,
- )
- vdu_id = vdu_info["osm_vdu_id"]
- vdur = find_in_list(
- db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
- )
- descriptor_config = get_configuration(db_vnfd, vdu_id)
- if vdur.get("additionalParams"):
- deploy_params_vdu = parse_yaml_strings(
- vdur["additionalParams"]
+ if descriptor_config:
+ vdu_id = None
+ vdu_name = None
+ kdu_name = None
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={} ".format(member_vnf_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ vdu_id = vca_info["osm_vdu_id"]
+ vdur = find_in_list(
+ db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
)
- else:
- deploy_params_vdu = deploy_params
- deploy_params_vdu["OSM"] = get_osm_params(
- db_vnfr, vdu_id, vdu_count_index=vdu_index
- )
- if descriptor_config:
- vdu_name = None
- kdu_name = None
- stage[
- 1
- ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
- member_vnf_index, vdu_id, vdu_index
+ descriptor_config = get_configuration(db_vnfd, vdu_id)
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = parse_yaml_strings(
+ vdur["additionalParams"]
+ )
+ else:
+ deploy_params_vdu = deploy_params
+ deploy_params_vdu["OSM"] = get_osm_params(
+ db_vnfr, vdu_id, vdu_count_index=vdu_index
)
- stage[2] = step = "Scaling out VCA"
- self._write_op_status(op_id=nslcmop_id, stage=stage)
- self._deploy_n2vc(
- logging_text=logging_text
- + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ if descriptor_config:
+ vdu_name = None
+ kdu_name = None
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
member_vnf_index, vdu_id, vdu_index
- ),
- db_nsr=db_nsr,
- db_vnfr=db_vnfr,
- nslcmop_id=nslcmop_id,
- nsr_id=nsr_id,
- nsi_id=nsi_id,
- vnfd_id=vnfd_id,
- vdu_id=vdu_id,
- kdu_name=kdu_name,
- member_vnf_index=member_vnf_index,
- vdu_index=vdu_index,
- vdu_name=vdu_name,
- deploy_params=deploy_params_vdu,
- descriptor_config=descriptor_config,
- base_folder=base_folder,
- task_instantiation_info=tasks_dict_info,
- stage=stage,
- )
+ )
+ stage[2] = step = "Scaling out VCA"
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ ),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ else:
+ kdu_name = vca_info["osm_kdu_id"]
+ descriptor_config = get_configuration(db_vnfd, kdu_name)
+ if descriptor_config:
+ vdu_id = None
+ kdu_index = int(vca_info["kdu_index"])
+ vdu_name = None
+ kdur = next(
+ x
+ for x in db_vnfr["kdur"]
+ if x["kdu-name"] == kdu_name
+ )
+ deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
+ if kdur.get("additionalParams"):
+ deploy_params_kdu = parse_yaml_strings(
+ kdur["additionalParams"]
+ )
+
+ self._deploy_n2vc(
+ logging_text=logging_text,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=kdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_kdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
# SCALE-UP VCA - END
scale_process = None
vnf_config_primitive
)
- vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
+ vnfr_params = {"VDU_SCALE_INFO": scaling_info}
if db_vnfr.get("additionalParamsForVnf"):
vnfr_params.update(db_vnfr["additionalParamsForVnf"])
# Post-scale retry check: Check if this sub-operation has been executed before
op_index = self._check_or_add_scale_suboperation(
db_nslcmop,
- nslcmop_id,
vnf_index,
vnf_config_primitive,
primitive_params,
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
+ async def _scale_kdu(
+ self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
+ ):
+ _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
+ for kdu_name in _scaling_info:
+ for kdu_scaling_info in _scaling_info[kdu_name]:
+ deployed_kdu, index = get_deployed_kdu(
+ nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
+ )
+ cluster_uuid = deployed_kdu["k8scluster-uuid"]
+ kdu_instance = deployed_kdu["kdu-instance"]
+ scale = int(kdu_scaling_info["scale"])
+ k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
+
+ db_dict = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": "_admin.deployed.K8s.{}".format(index),
+ }
+
+ step = "scaling application {}".format(
+ kdu_scaling_info["resource-name"]
+ )
+ self.logger.debug(logging_text + step)
+
+ if kdu_scaling_info["type"] == "delete":
+ kdu_config = get_configuration(db_vnfd, kdu_name)
+ if (
+ kdu_config
+ and kdu_config.get("terminate-config-primitive")
+ and get_juju_ee_ref(db_vnfd, kdu_name) is None
+ ):
+ terminate_config_primitive_list = kdu_config.get(
+ "terminate-config-primitive"
+ )
+ terminate_config_primitive_list.sort(
+ key=lambda val: int(val["seq"])
+ )
+
+ for (
+ terminate_config_primitive
+ ) in terminate_config_primitive_list:
+ primitive_params_ = self._map_primitive_params(
+ terminate_config_primitive, {}, {}
+ )
+ step = "execute terminate config primitive"
+ self.logger.debug(logging_text + step)
+ await asyncio.wait_for(
+ self.k8scluster_map[k8s_cluster_type].exec_primitive(
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
+ primitive_name=terminate_config_primitive["name"],
+ params=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ ),
+ timeout=600,
+ )
+
+ await asyncio.wait_for(
+ self.k8scluster_map[k8s_cluster_type].scale(
+ kdu_instance,
+ scale,
+ kdu_scaling_info["resource-name"],
+ vca_id=vca_id,
+ ),
+ timeout=self.timeout_vca_on_error,
+ )
+
+ if kdu_scaling_info["type"] == "create":
+ kdu_config = get_configuration(db_vnfd, kdu_name)
+ if (
+ kdu_config
+ and kdu_config.get("initial-config-primitive")
+ and get_juju_ee_ref(db_vnfd, kdu_name) is None
+ ):
+ initial_config_primitive_list = kdu_config.get(
+ "initial-config-primitive"
+ )
+ initial_config_primitive_list.sort(
+ key=lambda val: int(val["seq"])
+ )
+
+ for initial_config_primitive in initial_config_primitive_list:
+ primitive_params_ = self._map_primitive_params(
+ initial_config_primitive, {}, {}
+ )
+ step = "execute initial config primitive"
+ self.logger.debug(logging_text + step)
+ await asyncio.wait_for(
+ self.k8scluster_map[k8s_cluster_type].exec_primitive(
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
+ primitive_name=initial_config_primitive["name"],
+ params=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ ),
+ timeout=600,
+ )
+
async def _scale_ng_ro(
self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
):