from osm_lcm.frontend_pb2 import PrimitiveRequest
from osm_lcm.frontend_pb2 import SshKeyRequest, SshKeyReply
from osm_lcm.frontend_grpc import FrontendExecutorStub
-from osm_lcm.lcm_utils import LcmBase
+from osm_lcm.lcm_utils import LcmBase, get_ee_id_parts
from osm_lcm.data_utils.database.database import Database
from osm_lcm.data_utils.filesystem.filesystem import Filesystem
self.log.error("Error deploying chart ee: {}".format(e), exc_info=True)
raise N2VCException("Error deploying chart ee: {}".format(e))
+ async def upgrade_execution_environment(
+ self,
+ namespace: str,
+ db_dict: dict,
+ helm_id: str,
+ progress_timeout: float = None,
+ total_timeout: float = None,
+ config: dict = None,
+ artifact_path: str = None,
+ vca_type: str = None,
+ *kargs,
+ **kwargs,
+ ) -> (str, dict):
+ """
+ Creates a new helm execution environment deploying the helm-chat indicated in the
+ attifact_path
+ :param str namespace: This param is not used, all helm charts are deployed in the osm
+ system namespace
+ :param dict db_dict: where to write to database when the status changes.
+ It contains a dictionary with {collection: str, filter: {}, path: str},
+ e.g. {collection: "nsrs", filter: {_id: <nsd-id>, path:
+ "_admin.deployed.VCA.3"}
+ :param helm_id: unique name of the Helm release to upgrade
+ :param float progress_timeout:
+ :param float total_timeout:
+ :param dict config: General variables to instantiate KDU
+ :param str artifact_path: path of package content
+ :param str vca_type: Type of vca, must be type helm or helm-v3
+ :returns str, dict: id of the new execution environment including namespace.helm_id
+ and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
+ """
+
+ self.log.info(
+ "upgrade_execution_environment: namespace: {}, artifact_path: {}, db_dict: {}, "
+ )
+
+ # Validate helm_id is provided
+ if helm_id is None or len(helm_id) == 0:
+ raise N2VCBadArgumentsException(
+ message="helm_id is mandatory", bad_args=["helm_id"]
+ )
+
+ # Validate artifact-path is provided
+ if artifact_path is None or len(artifact_path) == 0:
+ raise N2VCBadArgumentsException(
+ message="artifact_path is mandatory", bad_args=["artifact_path"]
+ )
+
+ # Validate artifact-path exists and sync path
+ from_path = os.path.split(artifact_path)[0]
+ self.fs.sync(from_path)
+
+ # remove / in charm path
+ while artifact_path.find("//") >= 0:
+ artifact_path = artifact_path.replace("//", "/")
+
+ # check charm path
+ if self.fs.file_exists(artifact_path):
+ helm_chart_path = artifact_path
+ else:
+ msg = "artifact path does not exist: {}".format(artifact_path)
+ raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
+
+ if artifact_path.startswith("/"):
+ full_path = self.fs.path + helm_chart_path
+ else:
+ full_path = self.fs.path + "/" + helm_chart_path
+
+ while full_path.find("//") >= 0:
+ full_path = full_path.replace("//", "/")
+
+ try:
+ # Call helm conn upgrade
+ # Obtain system cluster id from database
+ system_cluster_uuid = await self._get_system_cluster_id()
+ # Add parameter osm if exist to global
+ if config and config.get("osm"):
+ if not config.get("global"):
+ config["global"] = {}
+ config["global"]["osm"] = config.get("osm")
+
+ self.log.debug("Ugrade helm chart: {}".format(full_path))
+ if vca_type == "helm":
+ await self._k8sclusterhelm2.upgrade(
+ system_cluster_uuid,
+ kdu_model=full_path,
+ kdu_instance=helm_id,
+ namespace=namespace,
+ params=config,
+ db_dict=db_dict,
+ timeout=progress_timeout,
+ force=True,
+ )
+ else:
+ await self._k8sclusterhelm3.upgrade(
+ system_cluster_uuid,
+ kdu_model=full_path,
+ kdu_instance=helm_id,
+ namespace=namespace,
+ params=config,
+ db_dict=db_dict,
+ timeout=progress_timeout,
+ force=True,
+ )
+
+ except N2VCException:
+ raise
+ except Exception as e:
+ self.log.error("Error upgrading chart ee: {}".format(e), exc_info=True)
+ raise N2VCException("Error upgrading chart ee: {}".format(e))
+
async def register_execution_environment(
self,
namespace: str,
try:
# Obtain ip_addr for the ee service, it is resolved by dns from the ee name by kubernetes
- version, namespace, helm_id = self._get_ee_id_parts(ee_id)
+ version, namespace, helm_id = get_ee_id_parts(ee_id)
ip_addr = socket.gethostbyname(helm_id)
# Obtain ssh_key from the ee, this method will implement retries to allow the ee
params_dict = dict()
try:
- version, namespace, helm_id = self._get_ee_id_parts(ee_id)
+ version, namespace, helm_id = get_ee_id_parts(ee_id)
ip_addr = socket.gethostbyname(helm_id)
except Exception as e:
self.log.error("Error getting ee ip ee: {}".format(e))
system_cluster_uuid = await self._get_system_cluster_id()
# Get helm_id
- version, namespace, helm_id = self._get_ee_id_parts(ee_id)
+ version, namespace, helm_id = get_ee_id_parts(ee_id)
# Uninstall chart, for backward compatibility we must assume that if there is no
# version it is helm-v2
)
self._system_cluster_id = k8s_hc_id
return self._system_cluster_id
-
- def _get_ee_id_parts(self, ee_id):
- """
- Parses ee_id stored at database that can be either 'version:namespace.helm_id' or only
- namespace.helm_id for backward compatibility
- If exists helm version can be helm-v3 or helm (helm-v2 old version)
- """
- version, _, part_id = ee_id.rpartition(":")
- namespace, _, helm_id = part_id.rpartition(".")
- return version, namespace, helm_id
target_dict[key_list[-1]] = value
+def get_ee_id_parts(ee_id):
+ """
+ Parses ee_id stored at database that can be either 'version:namespace.helm_id' or only
+ namespace.helm_id for backward compatibility
+ If exists helm version can be helm-v3 or helm (helm-v2 old version)
+ """
+ version, _, part_id = ee_id.rpartition(":")
+ namespace, _, helm_id = part_id.rpartition(".")
+ return version, namespace, helm_id
+
+
class LcmBase:
def __init__(self, msg, logger):
"""
populate_dict,
check_juju_bundle_existence,
get_charm_artifact_path,
+ get_ee_id_parts,
)
from osm_lcm.data_utils.nsd import (
get_ns_configuration_relation_list,
current_charm_artifact_path,
target_charm_artifact_path,
charm_artifact_paths,
- ) = ([], [], [])
+ helm_artifacts
+ ) = ([], [], [], [])
step = "Checking if revision has changed in VNFD"
if current_vnf_revision != latest_vnfd_revision:
step = (
"Get the charm-type, charm-id, ee-id if there is deployed VCA"
)
- base_folder = latest_vnfd["_admin"]["storage"]
+ current_base_folder = current_vnfd["_admin"]["storage"]
+ latest_base_folder = latest_vnfd["_admin"]["storage"]
- for charm_index, charm_deployed in enumerate(
+ for vca_index, vca_deployed in enumerate(
get_iterable(nsr_deployed, "VCA")
):
vnf_index = db_vnfr.get("member-vnf-index-ref")
# Getting charm-id and charm-type
- if charm_deployed.get("member-vnf-index") == vnf_index:
- charm_id = self.get_vca_id(db_vnfr, db_nsr)
- charm_type = charm_deployed.get("type")
+ if vca_deployed.get("member-vnf-index") == vnf_index:
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
+ vca_type = vca_deployed.get("type")
+ vdu_count_index = vca_deployed.get("vdu_count_index")
# Getting ee-id
- ee_id = charm_deployed.get("ee_id")
+ ee_id = vca_deployed.get("ee_id")
step = "Getting descriptor config"
descriptor_config = get_configuration(
step = "Setting Charm artifact paths"
current_charm_artifact_path.append(
get_charm_artifact_path(
- base_folder,
+ current_base_folder,
charm_name,
- charm_type,
+ vca_type,
current_vnf_revision,
)
)
target_charm_artifact_path.append(
get_charm_artifact_path(
- base_folder,
+ latest_base_folder,
charm_name,
- charm_type,
+ vca_type,
latest_vnfd_revision,
)
)
+ elif ee_item.get("helm-chart"):
+ # add chart to list and all parameters
+ step = "Getting helm chart name"
+ chart_name = ee_item.get("helm-chart")
+ if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
+ vca_type = "helm"
+ else:
+ vca_type = "helm-v3"
+ step = "Setting Helm chart artifact paths"
+
+ helm_artifacts.append({
+ "current_artifact_path": get_charm_artifact_path(
+ current_base_folder,
+ chart_name,
+ vca_type,
+ current_vnf_revision,
+ ),
+ "target_artifact_path": get_charm_artifact_path(
+ latest_base_folder,
+ chart_name,
+ vca_type,
+ latest_vnfd_revision,
+ ),
+ "ee_id": ee_id,
+ "vca_index": vca_index,
+ "vdu_index": vdu_count_index,
+ })
charm_artifact_paths = zip(
current_charm_artifact_path, target_charm_artifact_path
detailed_status,
) = await self._ns_charm_upgrade(
ee_id=ee_id,
- charm_id=charm_id,
- charm_type=charm_type,
+ charm_id=vca_id,
+ charm_type=vca_type,
path=self.fs.path + target_charm_path,
timeout=timeout_seconds,
)
detailed_status = "Done"
db_nslcmop_update["detailed-status"] = "Done"
+ # helm base EE
+ for item in helm_artifacts:
+ if (
+ not (
+ item["current_artifact_path"]
+ and item["target_artifact_path"]
+ and self.check_charm_hash_changed(
+ item["current_artifact_path"], item["target_artifact_path"]
+ )
+ )
+ ):
+ continue
+ db_update_entry = "_admin.deployed.VCA.{}.".format(item["vca_index"])
+ vnfr_id = db_vnfr["_id"]
+ osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
+ db_dict = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": db_update_entry,
+ }
+ vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
+ await self.vca_map[
+ vca_type
+ ].upgrade_execution_environment(
+ namespace=namespace,
+ helm_id=helm_id,
+ db_dict=db_dict,
+ config=osm_config,
+ artifact_path=item["target_artifact_path"],
+ vca_type=vca_type,
+ )
+ vnf_id = db_vnfr.get("vnfd-ref")
+ config_descriptor = get_configuration(latest_vnfd, vnf_id)
+ self.logger.debug("get ssh key block")
+ rw_mgmt_ip = None
+ if deep_get(
+ config_descriptor, ("config-access", "ssh-access", "required")
+ ):
+ # Needed to inject a ssh key
+ user = deep_get(
+ config_descriptor,
+ ("config-access", "ssh-access", "default-user"),
+ )
+ step = "Install configuration Software, getting public ssh key"
+ pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
+ ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
+ )
+
+ step = "Insert public key into VM user={} ssh_key={}".format(
+ user, pub_key
+ )
+ self.logger.debug(logging_text + step)
+
+ # wait for RO (ip-address) Insert pub_key into VM
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ None,
+ item["vdu_index"],
+ user=user,
+ pub_key=pub_key,
+ )
+
+ initial_config_primitive_list = config_descriptor.get(
+ "initial-config-primitive"
+ )
+ config_primitive = next(
+ (p for p in initial_config_primitive_list if p["name"] == "config"),
+ None,
+ )
+ if not config_primitive:
+ continue
+
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if rw_mgmt_ip:
+ deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(
+ parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
+ )
+ primitive_params_ = self._map_primitive_params(
+ config_primitive, {}, deploy_params
+ )
+
+ step = "execute primitive '{}' params '{}'".format(
+ config_primitive["name"], primitive_params_
+ )
+ self.logger.debug(logging_text + step)
+ await self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name=config_primitive["name"],
+ params_dict=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ vca_type=vca_type,
+ )
+
+ step = "Updating policies"
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+ detailed_status = "Done"
+ db_nslcmop_update["detailed-status"] = "Done"
+
# If nslcmop_operation_state is None, so any operation is not failed.
if not nslcmop_operation_state:
nslcmop_operation_state = "COMPLETED"