from osm_lcm.data_utils.filesystem.filesystem import Filesystem
from n2vc.n2vc_conn import N2VCConnector
-from n2vc.k8s_helm_conn import K8sHelmConnector
from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.exceptions import (
N2VCBadArgumentsException,
self._max_retry_time = self.vca_config.helm_max_retry_time
self.log.debug("Retry time: {}".format(self._max_retry_time))
- # initialize helm connector for helmv2 and helmv3
- self._k8sclusterhelm2 = K8sHelmConnector(
- kubectl_command=self.vca_config.kubectlpath,
- helm_command=self.vca_config.helmpath,
- fs=self.fs,
- db=self.db,
- log=self.log,
- on_update_db=None,
- )
-
+ # initialize helm connector for helmv3
self._k8sclusterhelm3 = K8sHelm3Connector(
kubectl_command=self.vca_config.kubectlpath,
helm_command=self.vca_config.helm3path,
(e.g. stable/openldap, stable/openldap:1.2.4)
- a path to a packaged chart (e.g. mychart.tgz)
- a path to an unpacked chart directory or a URL (e.g. mychart)
- :param str vca_type: Type of vca, must be type helm or helm-v3
+ :param str vca_type: Type of vca, must be type helm-v3
:returns str, dict: id of the new execution environment including namespace.helm_id
and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
"""
config["global"]["osm"] = config.get("osm")
self.log.debug("install helm chart: {}".format(full_path))
- if vca_type == "helm":
- helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
- db_dict=db_dict,
- kdu_model=kdu_model,
- )
- await self._k8sclusterhelm2.install(
- system_cluster_uuid,
- kdu_model=kdu_model,
- kdu_instance=helm_id,
- namespace=namespace,
- params=config,
- db_dict=db_dict,
- timeout=progress_timeout,
- )
- else:
- helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
- db_dict=db_dict,
- kdu_model=kdu_model,
- )
- await self._k8sclusterhelm3.install(
- system_cluster_uuid,
- kdu_model=kdu_model,
- kdu_instance=helm_id,
- namespace=namespace,
- params=config,
- db_dict=db_dict,
- timeout=progress_timeout,
- )
+ helm_id = self._k8sclusterhelm3.generate_kdu_instance_name(
+ db_dict=db_dict,
+ kdu_model=kdu_model,
+ )
+ await self._k8sclusterhelm3.install(
+ system_cluster_uuid,
+ kdu_model=kdu_model,
+ kdu_instance=helm_id,
+ namespace=namespace,
+ params=config,
+ db_dict=db_dict,
+ timeout=progress_timeout,
+ )
ee_id = "{}:{}.{}".format(vca_type, namespace, helm_id)
return ee_id, None
:param float total_timeout:
:param dict config: General variables to instantiate KDU
:param str artifact_path: path of package content
- :param str vca_type: Type of vca, must be type helm or helm-v3
+ :param str vca_type: Type of vca, must be type helm-v3
:returns str, dict: id of the new execution environment including namespace.helm_id
and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
"""
config["global"]["osm"] = config.get("osm")
self.log.debug("Ugrade helm chart: {}".format(full_path))
- if vca_type == "helm":
- await self._k8sclusterhelm2.upgrade(
- system_cluster_uuid,
- kdu_model=full_path,
- kdu_instance=helm_id,
- namespace=namespace,
- params=config,
- db_dict=db_dict,
- timeout=progress_timeout,
- force=True,
- )
- else:
- await self._k8sclusterhelm3.upgrade(
- system_cluster_uuid,
- kdu_model=full_path,
- kdu_instance=helm_id,
- namespace=namespace,
- params=config,
- db_dict=db_dict,
- timeout=progress_timeout,
- force=True,
- )
+ await self._k8sclusterhelm3.upgrade(
+ system_cluster_uuid,
+ kdu_model=full_path,
+ kdu_instance=helm_id,
+ namespace=namespace,
+ params=config,
+ db_dict=db_dict,
+ timeout=progress_timeout,
+ force=True,
+ )
except N2VCException:
raise
# Get helm_id
version, namespace, helm_id = get_ee_id_parts(ee_id)
- # Uninstall chart, for backward compatibility we must assume that if there is no
- # version it is helm-v2
- if version == "helm-v3":
- await self._k8sclusterhelm3.uninstall(system_cluster_uuid, helm_id)
- else:
- await self._k8sclusterhelm2.uninstall(system_cluster_uuid, helm_id)
+ await self._k8sclusterhelm3.uninstall(system_cluster_uuid, helm_id)
self.log.info("ee_id: {} deleted".format(ee_id))
except N2VCException:
raise
from osm_lcm.data_utils.dict_utils import parse_yaml_strings
from osm_lcm.data_utils.database.vim_account import VimAccountDB
from n2vc.definitions import RelationEndpoint
-from n2vc.k8s_helm_conn import K8sHelmConnector
from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
on_update_db=self._on_update_n2vc_db,
)
- self.k8sclusterhelm2 = K8sHelmConnector(
- kubectl_command=self.vca_config.kubectlpath,
- helm_command=self.vca_config.helmpath,
- log=self.logger,
- on_update_db=None,
- fs=self.fs,
- db=self.db,
- )
-
self.k8sclusterhelm3 = K8sHelm3Connector(
kubectl_command=self.vca_config.kubectlpath,
helm_command=self.vca_config.helm3path,
)
self.k8scluster_map = {
- "helm-chart": self.k8sclusterhelm2,
"helm-chart-v3": self.k8sclusterhelm3,
"chart": self.k8sclusterhelm3,
"juju-bundle": self.k8sclusterjuju,
vca_id = self.get_vca_id(db_vnfr, db_nsr)
# create or register execution environment in VCA
- if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
self._write_configuration_status(
nsr_id=nsr_id,
vca_index=vca_index,
db_dict=db_dict,
vca_id=vca_id,
)
- elif vca_type == "helm" or vca_type == "helm-v3":
+ elif vca_type == "helm-v3":
ee_id, credentials = await self.vca_map[
vca_type
].create_execution_environment(
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
# TODO register in database that primitive is done
# STEP 7 Configure metrics
- if vca_type == "helm" or vca_type == "helm-v3":
+ if vca_type == "helm-v3":
# TODO: review for those cases where the helm chart is a reference and
# is not part of the NF package
prometheus_jobs = await self.extract_prometheus_scrape_jobs(
k8scluster_id_2_uuic = {
"helm-chart-v3": {},
- "helm-chart": {},
"juju-bundle": {},
}
# Default version: helm3, if helm-version is v2 assign v2
k8sclustertype = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8sclustertype = "helm-chart"
elif kdur.get("juju-bundle"):
kdumodel = kdur["juju-bundle"]
k8sclustertype = "juju-bundle"
vca_type = "native_charm"
elif ee_item.get("helm-chart"):
vca_name = ee_item["helm-chart"]
- if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
else:
self.logger.debug(
logging_text + "skipping non juju neither charm configuration"
) and vca.get("needed_terminate")
# For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
# pending native charms
- destroy_ee = (
- True if vca_type in ("helm", "helm-v3", "native_charm") else False
- )
+ destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
# self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
# vca_index, vca.get("ee_id"), vca_type, destroy_ee))
task = asyncio.ensure_future(
kdu_action = (
True
if primitive_name in actions
- and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+ and kdu["k8scluster-type"] != "helm-chart-v3"
else False
)
# add chart to list and all parameters
step = "Getting helm chart name"
chart_name = ee_item.get("helm-chart")
- if (
- ee_item.get("helm-version")
- and ee_item.get("helm-version") == "v2"
- ):
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
step = "Setting Helm chart artifact paths"
helm_artifacts.append(
if kdur.get("helm-chart"):
k8s_cluster_type = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8s_cluster_type = "helm-chart"
elif kdur.get("juju-bundle"):
k8s_cluster_type = "juju-bundle"
else:
if kdur.get("helm-chart"):
k8s_cluster_type = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8s_cluster_type = "helm-chart"
elif kdur.get("juju-bundle"):
k8s_cluster_type = "juju-bundle"
else:
vca_type = "native_charm"
elif ee_item.get("helm-chart"):
vca_name = ee_item["helm-chart"]
- if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
else:
self.logger.debug(
logging_text + "skipping non juju neither charm configuration"
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
"helm3path": "/usr/local/bin/helm3",
"kubectlpath": "/usr/bin/kubectl",
}
- lcm_helm_conn.K8sHelmConnector = asynctest.Mock(lcm_helm_conn.K8sHelmConnector)
lcm_helm_conn.K8sHelm3Connector = asynctest.Mock(
lcm_helm_conn.K8sHelm3Connector
)
self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name.return_value = (
helm_chart_id
)
- self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name = Mock()
- self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name.return_value = (
- helm_chart_id
- )
self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}}
ee_id, _ = await self.helm_conn.create_execution_environment(
def mock_vca_k8s(self):
if not getenv("OSMLCMTEST_VCA_K8s_NOMOCK"):
ns.K8sJujuConnector = asynctest.MagicMock(ns.K8sJujuConnector)
- ns.K8sHelmConnector = asynctest.MagicMock(ns.K8sHelmConnector)
+ # ns.K8sHelmConnector = asynctest.MagicMock(ns.K8sHelmConnector)
ns.K8sHelm3Connector = asynctest.MagicMock(ns.K8sHelm3Connector)
if not getenv("OSMLCMTEST_VCA_NOMOCK"):
class TestK8SClusterLcm(TestCase):
- @patch("osm_lcm.vim_sdn.K8sHelmConnector")
@patch("osm_lcm.vim_sdn.K8sHelm3Connector")
@patch("osm_lcm.vim_sdn.K8sJujuConnector")
@patch("osm_lcm.lcm_utils.Database")
mock_database,
juju_connector,
helm3_connector,
- helm_connector,
):
self.loop = asyncio.get_event_loop()
self.msg = Mock(msgbase.MsgBase())
self.config = {"database": {"driver": "mongo"}}
self.vca_config = {
"VCA": {
- "helmpath": "/usr/local/bin/helm",
"helm3path": "/usr/local/bin/helm3",
"kubectlpath": "/usr/bin/kubectl",
}
import logging.handlers
from osm_lcm import ROclient
from osm_lcm.lcm_utils import LcmException, LcmBase, deep_get
-from n2vc.k8s_helm_conn import K8sHelmConnector
from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
from n2vc.n2vc_juju_conn import N2VCJujuConnector
super().__init__(msg, self.logger)
- self.helm2_k8scluster = K8sHelmConnector(
- kubectl_command=self.vca_config.get("kubectlpath"),
- helm_command=self.vca_config.get("helmpath"),
- log=self.logger,
- on_update_db=None,
- db=self.db,
- fs=self.fs,
- )
-
self.helm3_k8scluster = K8sHelm3Connector(
kubectl_command=self.vca_config.get("kubectlpath"),
helm_command=self.vca_config.get("helm3path"),
)
self.k8s_map = {
- "helm-chart": self.helm2_k8scluster,
"helm-chart-v3": self.helm3_k8scluster,
"juju-bundle": self.juju_k8scluster,
}
# for backwards compatibility and all-false case
if not any(k8s_deploy_methods.values()):
k8s_deploy_methods = {
- "helm-chart": True,
"juju-bundle": True,
"helm-chart-v3": True,
}
step = "Getting k8scluster='{}' from db".format(k8scluster_id)
self.logger.debug(logging_text + step)
db_k8scluster = self.db.get_one("k8sclusters", {"_id": k8scluster_id})
- k8s_hc_id = deep_get(db_k8scluster, ("_admin", "helm-chart", "id"))
k8s_h3c_id = deep_get(db_k8scluster, ("_admin", "helm-chart-v3", "id"))
k8s_jb_id = deep_get(db_k8scluster, ("_admin", "juju-bundle", "id"))
db_k8scluster_update["_admin.juju-bundle.id"] = None
db_k8scluster_update["_admin.juju-bundle.operationalState"] = "DISABLED"
- if k8s_hc_id:
- step = "Removing helm-chart '{}'".format(k8s_hc_id)
- uninstall_sw = (
- deep_get(db_k8scluster, ("_admin", "helm-chart", "created"))
- or False
- )
- cluster_removed = await self.helm2_k8scluster.reset(
- cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw
- )
- db_k8scluster_update["_admin.helm-chart.id"] = None
- db_k8scluster_update["_admin.helm-chart.operationalState"] = "DISABLED"
-
if k8s_h3c_id:
step = "Removing helm-chart-v3 '{}'".format(k8s_h3c_id)
uninstall_sw = (
] = "DISABLED"
# Try to remove from cluster_inserted to clean old versions
- if k8s_hc_id and cluster_removed:
+ if k8s_h3c_id and cluster_removed:
step = "Removing k8scluster='{}' from k8srepos".format(k8scluster_id)
self.logger.debug(logging_text + step)
db_k8srepo_list = self.db.get_list(
- "k8srepos", {"_admin.cluster-inserted": k8s_hc_id}
+ "k8srepos", {"_admin.cluster-inserted": k8s_h3c_id}
)
for k8srepo in db_k8srepo_list:
try:
cluster_list = k8srepo["_admin"]["cluster-inserted"]
- cluster_list.remove(k8s_hc_id)
+ cluster_list.remove(k8s_h3c_id)
self.update_db_2(
"k8srepos",
k8srepo["_id"],
super().__init__(msg, self.logger)
- self.k8srepo = K8sHelmConnector(
+ self.k8srepo = K8sHelm3Connector(
kubectl_command=self.vca_config.get("kubectlpath"),
helm_command=self.vca_config.get("helmpath"),
fs=self.fs,