Feature 11002: Deorecate helmv2 48/13948/2
authorLuis Vega <lvega@whitestack.com>
Tue, 10 Oct 2023 22:36:33 +0000 (22:36 +0000)
committercubag <gcuba@whitestack.com>
Mon, 13 Nov 2023 23:07:29 +0000 (00:07 +0100)
Change-Id: Ica328f2d806203a22b4d47442de20ec0fa851f88
Signed-off-by: Luis Vega <lvega@whitestack.com>
osm_lcm/lcm_helm_conn.py
osm_lcm/ns.py
osm_lcm/tests/test_lcm_helm_conn.py
osm_lcm/tests/test_ns.py
osm_lcm/tests/test_vim_sdn.py
osm_lcm/vim_sdn.py

index d7db639..930ec60 100644 (file)
@@ -34,7 +34,6 @@ from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
 
 from n2vc.n2vc_conn import N2VCConnector
-from n2vc.k8s_helm_conn import K8sHelmConnector
 from n2vc.k8s_helm3_conn import K8sHelm3Connector
 from n2vc.exceptions import (
     N2VCBadArgumentsException,
@@ -123,16 +122,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         self._max_retry_time = self.vca_config.helm_max_retry_time
         self.log.debug("Retry time: {}".format(self._max_retry_time))
 
-        # initialize helm connector for helmv2 and helmv3
-        self._k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.kubectlpath,
-            helm_command=self.vca_config.helmpath,
-            fs=self.fs,
-            db=self.db,
-            log=self.log,
-            on_update_db=None,
-        )
-
+        # initialize helm connector for helmv3
         self._k8sclusterhelm3 = K8sHelm3Connector(
             kubectl_command=self.vca_config.kubectlpath,
             helm_command=self.vca_config.helm3path,
@@ -180,7 +170,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
               (e.g. stable/openldap, stable/openldap:1.2.4)
             - a path to a packaged chart (e.g. mychart.tgz)
             - a path to an unpacked chart directory or a URL (e.g. mychart)
-        :param str vca_type:  Type of vca, must be type helm or helm-v3
+        :param str vca_type:  Type of vca, must be type helm-v3
         :returns str, dict: id of the new execution environment including namespace.helm_id
         and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
         """
@@ -242,34 +232,19 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                 config["global"]["osm"] = config.get("osm")
 
             self.log.debug("install helm chart: {}".format(full_path))
-            if vca_type == "helm":
-                helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
-                    db_dict=db_dict,
-                    kdu_model=kdu_model,
-                )
-                await self._k8sclusterhelm2.install(
-                    system_cluster_uuid,
-                    kdu_model=kdu_model,
-                    kdu_instance=helm_id,
-                    namespace=namespace,
-                    params=config,
-                    db_dict=db_dict,
-                    timeout=progress_timeout,
-                )
-            else:
-                helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
-                    db_dict=db_dict,
-                    kdu_model=kdu_model,
-                )
-                await self._k8sclusterhelm3.install(
-                    system_cluster_uuid,
-                    kdu_model=kdu_model,
-                    kdu_instance=helm_id,
-                    namespace=namespace,
-                    params=config,
-                    db_dict=db_dict,
-                    timeout=progress_timeout,
-                )
+            helm_id = self._k8sclusterhelm3.generate_kdu_instance_name(
+                db_dict=db_dict,
+                kdu_model=kdu_model,
+            )
+            await self._k8sclusterhelm3.install(
+                system_cluster_uuid,
+                kdu_model=kdu_model,
+                kdu_instance=helm_id,
+                namespace=namespace,
+                params=config,
+                db_dict=db_dict,
+                timeout=progress_timeout,
+            )
 
             ee_id = "{}:{}.{}".format(vca_type, namespace, helm_id)
             return ee_id, None
@@ -306,7 +281,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         :param float total_timeout:
         :param dict config:  General variables to instantiate KDU
         :param str artifact_path:  path of package content
-        :param str vca_type:  Type of vca, must be type helm or helm-v3
+        :param str vca_type:  Type of vca, must be type helm-v3
         :returns str, dict: id of the new execution environment including namespace.helm_id
         and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
         """
@@ -361,28 +336,16 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                 config["global"]["osm"] = config.get("osm")
 
             self.log.debug("Ugrade helm chart: {}".format(full_path))
-            if vca_type == "helm":
-                await self._k8sclusterhelm2.upgrade(
-                    system_cluster_uuid,
-                    kdu_model=full_path,
-                    kdu_instance=helm_id,
-                    namespace=namespace,
-                    params=config,
-                    db_dict=db_dict,
-                    timeout=progress_timeout,
-                    force=True,
-                )
-            else:
-                await self._k8sclusterhelm3.upgrade(
-                    system_cluster_uuid,
-                    kdu_model=full_path,
-                    kdu_instance=helm_id,
-                    namespace=namespace,
-                    params=config,
-                    db_dict=db_dict,
-                    timeout=progress_timeout,
-                    force=True,
-                )
+            await self._k8sclusterhelm3.upgrade(
+                system_cluster_uuid,
+                kdu_model=full_path,
+                kdu_instance=helm_id,
+                namespace=namespace,
+                params=config,
+                db_dict=db_dict,
+                timeout=progress_timeout,
+                force=True,
+            )
 
         except N2VCException:
             raise
@@ -706,12 +669,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             # Get helm_id
             version, namespace, helm_id = get_ee_id_parts(ee_id)
 
-            # Uninstall chart, for backward compatibility we must assume that if there is no
-            # version it is helm-v2
-            if version == "helm-v3":
-                await self._k8sclusterhelm3.uninstall(system_cluster_uuid, helm_id)
-            else:
-                await self._k8sclusterhelm2.uninstall(system_cluster_uuid, helm_id)
+            await self._k8sclusterhelm3.uninstall(system_cluster_uuid, helm_id)
             self.log.info("ee_id: {} deleted".format(ee_id))
         except N2VCException:
             raise
index fad3972..3628e50 100644 (file)
@@ -97,7 +97,6 @@ from osm_lcm.data_utils.vnfr import (
 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
 from osm_lcm.data_utils.database.vim_account import VimAccountDB
 from n2vc.definitions import RelationEndpoint
-from n2vc.k8s_helm_conn import K8sHelmConnector
 from n2vc.k8s_helm3_conn import K8sHelm3Connector
 from n2vc.k8s_juju_conn import K8sJujuConnector
 
@@ -172,15 +171,6 @@ class NsLcm(LcmBase):
             on_update_db=self._on_update_n2vc_db,
         )
 
-        self.k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.kubectlpath,
-            helm_command=self.vca_config.helmpath,
-            log=self.logger,
-            on_update_db=None,
-            fs=self.fs,
-            db=self.db,
-        )
-
         self.k8sclusterhelm3 = K8sHelm3Connector(
             kubectl_command=self.vca_config.kubectlpath,
             helm_command=self.vca_config.helm3path,
@@ -200,7 +190,6 @@ class NsLcm(LcmBase):
         )
 
         self.k8scluster_map = {
-            "helm-chart": self.k8sclusterhelm2,
             "helm-chart-v3": self.k8sclusterhelm3,
             "chart": self.k8sclusterhelm3,
             "juju-bundle": self.k8sclusterjuju,
@@ -1647,7 +1636,7 @@ class NsLcm(LcmBase):
 
             vca_id = self.get_vca_id(db_vnfr, db_nsr)
             # create or register execution environment in VCA
-            if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
+            if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
                 self._write_configuration_status(
                     nsr_id=nsr_id,
                     vca_index=vca_index,
@@ -1669,7 +1658,7 @@ class NsLcm(LcmBase):
                         db_dict=db_dict,
                         vca_id=vca_id,
                     )
-                elif vca_type == "helm" or vca_type == "helm-v3":
+                elif vca_type == "helm-v3":
                     ee_id, credentials = await self.vca_map[
                         vca_type
                     ].create_execution_environment(
@@ -1815,7 +1804,7 @@ class NsLcm(LcmBase):
 
             # if SSH access is required, then get execution environment SSH public
             # if native charm we have waited already to VM be UP
-            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
                 pub_key = None
                 user = None
                 # self.logger.debug("get ssh key block")
@@ -1957,7 +1946,7 @@ class NsLcm(LcmBase):
                 # TODO register in database that primitive is done
 
             # STEP 7 Configure metrics
-            if vca_type == "helm" or vca_type == "helm-v3":
+            if vca_type == "helm-v3":
                 # TODO: review for those cases where the helm chart is a reference and
                 # is not part of the NF package
                 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
@@ -3651,7 +3640,6 @@ class NsLcm(LcmBase):
 
         k8scluster_id_2_uuic = {
             "helm-chart-v3": {},
-            "helm-chart": {},
             "juju-bundle": {},
         }
 
@@ -3750,11 +3738,6 @@ class NsLcm(LcmBase):
                         # Default version: helm3, if helm-version is v2 assign v2
                         k8sclustertype = "helm-chart-v3"
                         self.logger.debug("kdur: {}".format(kdur))
-                        if (
-                            kdur.get("helm-version")
-                            and kdur.get("helm-version") == "v2"
-                        ):
-                            k8sclustertype = "helm-chart"
                     elif kdur.get("juju-bundle"):
                         kdumodel = kdur["juju-bundle"]
                         k8sclustertype = "juju-bundle"
@@ -3975,10 +3958,7 @@ class NsLcm(LcmBase):
                     vca_type = "native_charm"
             elif ee_item.get("helm-chart"):
                 vca_name = ee_item["helm-chart"]
-                if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
-                    vca_type = "helm"
-                else:
-                    vca_type = "helm-v3"
+                vca_type = "helm-v3"
             else:
                 self.logger.debug(
                     logging_text + "skipping non juju neither charm configuration"
@@ -4494,9 +4474,7 @@ class NsLcm(LcmBase):
                 ) and vca.get("needed_terminate")
                 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
                 # pending native charms
-                destroy_ee = (
-                    True if vca_type in ("helm", "helm-v3", "native_charm") else False
-                )
+                destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
                 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
                 #     vca_index, vca.get("ee_id"), vca_type, destroy_ee))
                 task = asyncio.ensure_future(
@@ -5163,7 +5141,7 @@ class NsLcm(LcmBase):
                 kdu_action = (
                     True
                     if primitive_name in actions
-                    and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+                    and kdu["k8scluster-type"] != "helm-chart-v3"
                     else False
                 )
 
@@ -5853,13 +5831,7 @@ class NsLcm(LcmBase):
                                     # add chart to list and all parameters
                                     step = "Getting helm chart name"
                                     chart_name = ee_item.get("helm-chart")
-                                    if (
-                                        ee_item.get("helm-version")
-                                        and ee_item.get("helm-version") == "v2"
-                                    ):
-                                        vca_type = "helm"
-                                    else:
-                                        vca_type = "helm-v3"
+                                    vca_type = "helm-v3"
                                     step = "Setting Helm chart artifact paths"
 
                                     helm_artifacts.append(
@@ -6426,11 +6398,6 @@ class NsLcm(LcmBase):
                         if kdur.get("helm-chart"):
                             k8s_cluster_type = "helm-chart-v3"
                             self.logger.debug("kdur: {}".format(kdur))
-                            if (
-                                kdur.get("helm-version")
-                                and kdur.get("helm-version") == "v2"
-                            ):
-                                k8s_cluster_type = "helm-chart"
                         elif kdur.get("juju-bundle"):
                             k8s_cluster_type = "juju-bundle"
                         else:
@@ -6559,11 +6526,6 @@ class NsLcm(LcmBase):
                         if kdur.get("helm-chart"):
                             k8s_cluster_type = "helm-chart-v3"
                             self.logger.debug("kdur: {}".format(kdur))
-                            if (
-                                kdur.get("helm-version")
-                                and kdur.get("helm-version") == "v2"
-                            ):
-                                k8s_cluster_type = "helm-chart"
                         elif kdur.get("juju-bundle"):
                             k8s_cluster_type = "juju-bundle"
                         else:
@@ -8183,10 +8145,7 @@ class NsLcm(LcmBase):
                     vca_type = "native_charm"
             elif ee_item.get("helm-chart"):
                 vca_name = ee_item["helm-chart"]
-                if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
-                    vca_type = "helm"
-                else:
-                    vca_type = "helm-v3"
+                vca_type = "helm-v3"
             else:
                 self.logger.debug(
                     logging_text + "skipping non juju neither charm configuration"
@@ -8518,7 +8477,7 @@ class NsLcm(LcmBase):
 
             # if SSH access is required, then get execution environment SSH public
             # if native charm we have waited already to VM be UP
-            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+            if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
                 pub_key = None
                 user = None
                 # self.logger.debug("get ssh key block")
index b4af5a3..051fff8 100644 (file)
@@ -51,7 +51,6 @@ class TestLcmHelmConn(asynctest.TestCase):
             "helm3path": "/usr/local/bin/helm3",
             "kubectlpath": "/usr/bin/kubectl",
         }
-        lcm_helm_conn.K8sHelmConnector = asynctest.Mock(lcm_helm_conn.K8sHelmConnector)
         lcm_helm_conn.K8sHelm3Connector = asynctest.Mock(
             lcm_helm_conn.K8sHelm3Connector
         )
@@ -72,10 +71,6 @@ class TestLcmHelmConn(asynctest.TestCase):
         self.helm_conn._k8sclusterhelm3.generate_kdu_instance_name.return_value = (
             helm_chart_id
         )
-        self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name = Mock()
-        self.helm_conn._k8sclusterhelm2.generate_kdu_instance_name.return_value = (
-            helm_chart_id
-        )
 
         self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}}
         ee_id, _ = await self.helm_conn.create_execution_environment(
index 91ad6a3..d2062d3 100644 (file)
@@ -245,7 +245,7 @@ class TestBaseNS(asynctest.TestCase):
     def mock_vca_k8s(self):
         if not getenv("OSMLCMTEST_VCA_K8s_NOMOCK"):
             ns.K8sJujuConnector = asynctest.MagicMock(ns.K8sJujuConnector)
-            ns.K8sHelmConnector = asynctest.MagicMock(ns.K8sHelmConnector)
+            ns.K8sHelmConnector = asynctest.MagicMock(ns.K8sHelmConnector)
             ns.K8sHelm3Connector = asynctest.MagicMock(ns.K8sHelm3Connector)
 
         if not getenv("OSMLCMTEST_VCA_NOMOCK"):
index 7bd6c65..6ee3fd8 100644 (file)
@@ -319,7 +319,6 @@ class TestVcaLcm(TestCase):
 
 
 class TestK8SClusterLcm(TestCase):
-    @patch("osm_lcm.vim_sdn.K8sHelmConnector")
     @patch("osm_lcm.vim_sdn.K8sHelm3Connector")
     @patch("osm_lcm.vim_sdn.K8sJujuConnector")
     @patch("osm_lcm.lcm_utils.Database")
@@ -330,7 +329,6 @@ class TestK8SClusterLcm(TestCase):
         mock_database,
         juju_connector,
         helm3_connector,
-        helm_connector,
     ):
         self.loop = asyncio.get_event_loop()
         self.msg = Mock(msgbase.MsgBase())
@@ -338,7 +336,6 @@ class TestK8SClusterLcm(TestCase):
         self.config = {"database": {"driver": "mongo"}}
         self.vca_config = {
             "VCA": {
-                "helmpath": "/usr/local/bin/helm",
                 "helm3path": "/usr/local/bin/helm3",
                 "kubectlpath": "/usr/bin/kubectl",
             }
index 47015c0..a464993 100644 (file)
@@ -22,7 +22,6 @@ import logging
 import logging.handlers
 from osm_lcm import ROclient
 from osm_lcm.lcm_utils import LcmException, LcmBase, deep_get
-from n2vc.k8s_helm_conn import K8sHelmConnector
 from n2vc.k8s_helm3_conn import K8sHelm3Connector
 from n2vc.k8s_juju_conn import K8sJujuConnector
 from n2vc.n2vc_juju_conn import N2VCJujuConnector
@@ -1078,15 +1077,6 @@ class K8sClusterLcm(LcmBase):
 
         super().__init__(msg, self.logger)
 
-        self.helm2_k8scluster = K8sHelmConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helmpath"),
-            log=self.logger,
-            on_update_db=None,
-            db=self.db,
-            fs=self.fs,
-        )
-
         self.helm3_k8scluster = K8sHelm3Connector(
             kubectl_command=self.vca_config.get("kubectlpath"),
             helm_command=self.vca_config.get("helm3path"),
@@ -1106,7 +1096,6 @@ class K8sClusterLcm(LcmBase):
         )
 
         self.k8s_map = {
-            "helm-chart": self.helm2_k8scluster,
             "helm-chart-v3": self.helm3_k8scluster,
             "juju-bundle": self.juju_k8scluster,
         }
@@ -1144,7 +1133,6 @@ class K8sClusterLcm(LcmBase):
             # for backwards compatibility and all-false case
             if not any(k8s_deploy_methods.values()):
                 k8s_deploy_methods = {
-                    "helm-chart": True,
                     "juju-bundle": True,
                     "helm-chart-v3": True,
                 }
@@ -1329,7 +1317,6 @@ class K8sClusterLcm(LcmBase):
             step = "Getting k8scluster='{}' from db".format(k8scluster_id)
             self.logger.debug(logging_text + step)
             db_k8scluster = self.db.get_one("k8sclusters", {"_id": k8scluster_id})
-            k8s_hc_id = deep_get(db_k8scluster, ("_admin", "helm-chart", "id"))
             k8s_h3c_id = deep_get(db_k8scluster, ("_admin", "helm-chart-v3", "id"))
             k8s_jb_id = deep_get(db_k8scluster, ("_admin", "juju-bundle", "id"))
 
@@ -1348,18 +1335,6 @@ class K8sClusterLcm(LcmBase):
                 db_k8scluster_update["_admin.juju-bundle.id"] = None
                 db_k8scluster_update["_admin.juju-bundle.operationalState"] = "DISABLED"
 
-            if k8s_hc_id:
-                step = "Removing helm-chart '{}'".format(k8s_hc_id)
-                uninstall_sw = (
-                    deep_get(db_k8scluster, ("_admin", "helm-chart", "created"))
-                    or False
-                )
-                cluster_removed = await self.helm2_k8scluster.reset(
-                    cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw
-                )
-                db_k8scluster_update["_admin.helm-chart.id"] = None
-                db_k8scluster_update["_admin.helm-chart.operationalState"] = "DISABLED"
-
             if k8s_h3c_id:
                 step = "Removing helm-chart-v3 '{}'".format(k8s_h3c_id)
                 uninstall_sw = (
@@ -1375,16 +1350,16 @@ class K8sClusterLcm(LcmBase):
                 ] = "DISABLED"
 
             # Try to remove from cluster_inserted to clean old versions
-            if k8s_hc_id and cluster_removed:
+            if k8s_h3c_id and cluster_removed:
                 step = "Removing k8scluster='{}' from k8srepos".format(k8scluster_id)
                 self.logger.debug(logging_text + step)
                 db_k8srepo_list = self.db.get_list(
-                    "k8srepos", {"_admin.cluster-inserted": k8s_hc_id}
+                    "k8srepos", {"_admin.cluster-inserted": k8s_h3c_id}
                 )
                 for k8srepo in db_k8srepo_list:
                     try:
                         cluster_list = k8srepo["_admin"]["cluster-inserted"]
-                        cluster_list.remove(k8s_hc_id)
+                        cluster_list.remove(k8s_h3c_id)
                         self.update_db_2(
                             "k8srepos",
                             k8srepo["_id"],
@@ -1692,7 +1667,7 @@ class K8sRepoLcm(LcmBase):
 
         super().__init__(msg, self.logger)
 
-        self.k8srepo = K8sHelmConnector(
+        self.k8srepo = K8sHelm3Connector(
             kubectl_command=self.vca_config.get("kubectlpath"),
             helm_command=self.vca_config.get("helmpath"),
             fs=self.fs,