&& mv linux-amd64/helm /usr/local/bin/helm \
&& rm -r linux-amd64/
+RUN curl https://get.helm.sh/helm-v3.3.4-linux-amd64.tar.gz --output helm-v3.3.4.tar.gz \
+ && tar -zxvf helm-v3.3.4.tar.gz \
+ && mv linux-amd64/helm /usr/local/bin/helm3 \
+ && rm -r linux-amd64/
+
RUN curl -L https://launchpad.net/juju/2.7/2.7.6/+download/juju-2.7.6-k8s.tar.xz --output juju-2.7.6-k8s.tar.xz \
&& tar -xvf juju-2.7.6-k8s.tar.xz \
&& mv juju /usr/local/bin/juju
cloud: localhost
k8s_cloud: k8scloud
helmpath: /usr/local/bin/helm
+ helm3path: /usr/local/bin/helm3
kubectlpath: /usr/bin/kubectl
jujupath: /usr/local/bin/juju
# pubkey: pubkey
from osm_lcm.frontend_pb2 import PrimitiveRequest
from osm_lcm.frontend_pb2 import SshKeyRequest, SshKeyReply
from osm_lcm.frontend_grpc import FrontendExecutorStub
+from osm_lcm.lcm_utils import LcmBase
from n2vc.n2vc_conn import N2VCConnector
from n2vc.k8s_helm_conn import K8sHelmConnector
+from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.exceptions import N2VCBadArgumentsException, N2VCException, N2VCExecutionException
from osm_lcm.lcm_utils import deep_get
return wrapper
-class LCMHelmConn(N2VCConnector):
+class LCMHelmConn(N2VCConnector, LcmBase):
_KUBECTL_OSM_NAMESPACE = "osm"
_KUBECTL_OSM_CLUSTER_NAME = "_system-osm-k8s"
_EE_SERVICE_PORT = 50050
self._max_retry_time = self._MAX_RETRY_TIME
self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME
- # initialize helm connector
- self._k8sclusterhelm = K8sHelmConnector(
+ # initialize helm connector for helmv2 and helmv3
+ self._k8sclusterhelm2 = K8sHelmConnector(
kubectl_command=self.vca_config.get("kubectlpath"),
helm_command=self.vca_config.get("helmpath"),
fs=self.fs,
on_update_db=None,
)
+ self._k8sclusterhelm3 = K8sHelm3Connector(
+ kubectl_command=self.vca_config.get("kubectlpath"),
+ helm_command=self.vca_config.get("helm3path"),
+ fs=self.fs,
+ log=self.log,
+ db=self.db,
+ on_update_db=None,
+ )
+
self._system_cluster_id = None
self.log.info("Helm N2VC connector initialized")
:param float total_timeout:
:param dict config: General variables to instantiate KDU
:param str artifact_path: path of package content
- :param str vca_type: Type of vca, not used as assumed of type helm
+ :param str vca_type: Type of vca, must be type helm or helm-v3
:returns str, dict: id of the new execution environment including namespace.helm_id
and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
"""
try:
# Call helm conn install
# Obtain system cluster id from database
- system_cluster_uuid = self._get_system_cluster_id()
+ system_cluster_uuid = await self._get_system_cluster_id()
# Add parameter osm if exist to global
if config and config.get("osm"):
if not config.get("global"):
config["global"]["osm"] = config.get("osm")
self.log.debug("install helm chart: {}".format(full_path))
- helm_id = await self._k8sclusterhelm.install(system_cluster_uuid, kdu_model=full_path,
- namespace=self._KUBECTL_OSM_NAMESPACE,
- params=config,
- db_dict=db_dict,
- timeout=progress_timeout)
+ if vca_type == "helm":
+ helm_id = await self._k8sclusterhelm2.install(system_cluster_uuid, kdu_model=full_path,
+ namespace=self._KUBECTL_OSM_NAMESPACE,
+ params=config,
+ db_dict=db_dict,
+ timeout=progress_timeout)
+ else:
+ helm_id = await self._k8sclusterhelm3.install(system_cluster_uuid, kdu_model=full_path,
+ namespace=self._KUBECTL_OSM_NAMESPACE,
+ params=config,
+ db_dict=db_dict,
+ timeout=progress_timeout)
- ee_id = "{}.{}".format(self._KUBECTL_OSM_NAMESPACE, helm_id)
+ ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id)
return ee_id, None
except N2VCException:
raise
try:
# Obtain ip_addr for the ee service, it is resolved by dns from the ee name by kubernetes
- namespace, helm_id = self._get_ee_id_parts(ee_id)
+ version, namespace, helm_id = self._get_ee_id_parts(ee_id)
ip_addr = socket.gethostbyname(helm_id)
# Obtain ssh_key from the ee, this method will implement retries to allow the ee
params_dict = dict()
try:
- namespace, helm_id = self._get_ee_id_parts(ee_id)
+ version, namespace, helm_id = self._get_ee_id_parts(ee_id)
ip_addr = socket.gethostbyname(helm_id)
except Exception as e:
self.log.error("Error getting ee ip ee: {}".format(e))
try:
# Obtain cluster_uuid
- system_cluster_uuid = self._get_system_cluster_id()
+ system_cluster_uuid = await self._get_system_cluster_id()
# Get helm_id
- namespace, helm_id = self._get_ee_id_parts(ee_id)
+ version, namespace, helm_id = self._get_ee_id_parts(ee_id)
- # Uninstall chart
- await self._k8sclusterhelm.uninstall(system_cluster_uuid, helm_id)
+ # Uninstall chart, for backward compatibility we must assume that if there is no
+ # version it is helm-v2
+ if version == "helm-v3":
+ await self._k8sclusterhelm3.uninstall(system_cluster_uuid, helm_id)
+ else:
+ await self._k8sclusterhelm2.uninstall(system_cluster_uuid, helm_id)
self.log.info("ee_id: {} deleted".format(ee_id))
except N2VCException:
raise
except Exception as e:
self.log.error("Error writing detailedStatus to database: {}".format(e))
- def _get_system_cluster_id(self):
+ async def _get_system_cluster_id(self):
if not self._system_cluster_id:
db_k8cluster = self.db.get_one("k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME})
- k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart", "id"))
+ k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart-v3", "id"))
if not k8s_hc_id:
- self.log.error("osm system cluster has not been properly initialized for helm connector, "
- "helm-chart id is not defined")
- raise N2VCException("osm system cluster has not been properly initialized for helm connector")
+ try:
+ # backward compatibility for existing clusters that have not been initialized for helm v3
+ cluster_id = db_k8cluster.get("_id")
+ k8s_credentials = yaml.safe_dump(db_k8cluster.get("credentials"))
+ k8s_hc_id, uninstall_sw = await self._k8sclusterhelm3.init_env(k8s_credentials,
+ reuse_cluster_uuid=cluster_id)
+ db_k8scluster_update = {"_admin.helm-chart-v3.error_msg": None,
+ "_admin.helm-chart-v3.id": k8s_hc_id,
+ "_admin.helm-chart-v3}.created": uninstall_sw,
+ "_admin.helm-chart-v3.operationalState": "ENABLED"}
+ self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
+ except Exception as e:
+ self.log.error("error initializing helm-v3 cluster: {}".format(str(e)))
+ raise N2VCException("K8s system cluster '{}' has not been initialized for helm-chart-v3".format(
+ cluster_id))
self._system_cluster_id = k8s_hc_id
return self._system_cluster_id
def _get_ee_id_parts(self, ee_id):
- namespace, _, helm_id = ee_id.partition('.')
- return namespace, helm_id
+ """
+ Parses ee_id stored at database that can be either 'version:namespace.helm_id' or only
+ namespace.helm_id for backward compatibility
+ If exists helm version can be helm-v3 or helm (helm-v2 old version)
+ """
+ version, _, part_id = ee_id.rpartition(':')
+ namespace, _, helm_id = part_id.rpartition('.')
+ return version, namespace, helm_id
from osm_lcm.ng_ro import NgRoClient, NgRoException
from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
from n2vc.k8s_helm_conn import K8sHelmConnector
+from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
from osm_common.dbbase import DbException
on_update_db=self._on_update_n2vc_db
)
- self.k8sclusterhelm = K8sHelmConnector(
+ self.k8sclusterhelm2 = K8sHelmConnector(
kubectl_command=self.vca_config.get("kubectlpath"),
helm_command=self.vca_config.get("helmpath"),
fs=self.fs,
on_update_db=None,
)
+ self.k8sclusterhelm3 = K8sHelm3Connector(
+ kubectl_command=self.vca_config.get("kubectlpath"),
+ helm_command=self.vca_config.get("helm3path"),
+ fs=self.fs,
+ log=self.logger,
+ db=self.db,
+ on_update_db=None,
+ )
+
self.k8sclusterjuju = K8sJujuConnector(
kubectl_command=self.vca_config.get("kubectlpath"),
juju_command=self.vca_config.get("jujupath"),
)
self.k8scluster_map = {
- "helm-chart": self.k8sclusterhelm,
- "chart": self.k8sclusterhelm,
+ "helm-chart": self.k8sclusterhelm2,
+ "helm-chart-v3": self.k8sclusterhelm3,
+ "chart": self.k8sclusterhelm3,
"juju-bundle": self.k8sclusterjuju,
"juju": self.k8sclusterjuju,
}
"lxc_proxy_charm": self.n2vc,
"native_charm": self.n2vc,
"k8s_proxy_charm": self.n2vc,
- "helm": self.conn_helm_ee
+ "helm": self.conn_helm_ee,
+ "helm-v3": self.conn_helm_ee
}
self.prometheus = prometheus
ee_id = vca_deployed.get("ee_id")
# create or register execution environment in VCA
- if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm"):
+ if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
self._write_configuration_status(
nsr_id=nsr_id,
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
# TODO register in database that primitive is done
# STEP 7 Configure metrics
- if vca_type == "helm":
+ if vca_type == "helm" or vca_type == "helm-v3":
prometheus_jobs = await self.add_prometheus_metrics(
ee_id=ee_id,
artifact_path=artifact_path,
async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
# Launch kdus if present in the descriptor
- k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
+ k8scluster_id_2_uuic = {"helm-chart-v3": {}, "helm-chart": {}, "juju-bundle": {}}
async def _get_cluster_id(cluster_id, cluster_type):
nonlocal k8scluster_id_2_uuic
k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
if not k8s_id:
- raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
- cluster_type))
+ if cluster_type == "helm-chart-v3":
+ try:
+ # backward compatibility for existing clusters that have not been initialized for helm v3
+ k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials"))
+ k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(k8s_credentials,
+ reuse_cluster_uuid=cluster_id)
+ db_k8scluster_update = {}
+ db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
+ db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
+ db_k8scluster_update["_admin.helm-chart-v3.created"] = uninstall_sw
+ db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "ENABLED"
+ self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
+ except Exception as e:
+ self.logger.error(logging_text + "error initializing helm-v3 cluster: {}".format(str(e)))
+ raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
+ cluster_type))
+ else:
+ raise LcmException("K8s cluster '{}' has not been initialized for '{}'".
+ format(cluster_id, cluster_type))
k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
return k8s_id
index = 0
updated_cluster_list = []
+ updated_v3_cluster_list = []
for vnfr_data in db_vnfrs.values():
for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
namespace = kdur.get("k8s-namespace")
if kdur.get("helm-chart"):
kdumodel = kdur["helm-chart"]
- k8sclustertype = "helm-chart"
+ # Default version: helm3, if helm-version is v2 assign v2
+ k8sclustertype = "helm-chart-v3"
+ self.logger.debug("kdur: {}".format(kdur))
+ if kdur.get("helm-version") and kdur.get("helm-version") == "v2":
+ k8sclustertype = "helm-chart"
elif kdur.get("juju-bundle"):
kdumodel = kdur["juju-bundle"]
k8sclustertype = "juju-bundle"
cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
# Synchronize repos
- if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
+ if (k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list)\
+ or (k8sclustertype == "helm-chart-v3" and cluster_uuid not in updated_v3_cluster_list):
del_repo_list, added_repo_dict = await asyncio.ensure_future(
- self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
+ self.k8scluster_map[k8sclustertype].synchronize_repos(cluster_uuid=cluster_uuid))
if del_repo_list or added_repo_dict:
- unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
- updated = {'_admin.helm_charts_added.' +
- item: name for item, name in added_repo_dict.items()}
- self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
- "to_add: {}".format(k8s_cluster_id, del_repo_list,
- added_repo_dict))
+ if k8sclustertype == "helm-chart":
+ unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
+ updated = {'_admin.helm_charts_added.' +
+ item: name for item, name in added_repo_dict.items()}
+ updated_cluster_list.append(cluster_uuid)
+ elif k8sclustertype == "helm-chart-v3":
+ unset = {'_admin.helm_charts_v3_added.' + item: None for item in del_repo_list}
+ updated = {'_admin.helm_charts_v3_added.' +
+ item: name for item, name in added_repo_dict.items()}
+ updated_v3_cluster_list.append(cluster_uuid)
+ self.logger.debug(logging_text + "repos synchronized on k8s cluster "
+ "'{}' to_delete: {}, to_add: {}".
+ format(k8s_cluster_id, del_repo_list, added_repo_dict))
self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
- updated_cluster_list.append(cluster_uuid)
# Instantiate kdu
step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
vca_type = "native_charm"
elif ee_item.get("helm-chart"):
vca_name = ee_item['helm-chart']
- vca_type = "helm"
+ if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
+ vca_type = "helm"
+ else:
+ vca_type = "helm-v3"
else:
self.logger.debug(logging_text + "skipping non juju neither charm configuration")
continue
vca.get("needed_terminate"))
# For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
# pending native charms
- destroy_ee = True if vca_type in ("helm", "native_charm") else False
+ destroy_ee = True if vca_type in ("helm", "helm-v3", "native_charm") else False
# self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
# vca_index, vca.get("ee_id"), vca_type, destroy_ee))
task = asyncio.ensure_future(
helm-chart:
id: 73d96432-d692-40d2-8440-e0c73aee209c
created: True
+ helm-chart-v3:
+ id: 73d96432-d692-40d2-8440-e0c73aee209c
+ created: True
_id: e7169dab-f71a-4f1f-b82b-432605e8c4b3
credentials:
apiVersion: v1
self.db = Mock(DbMemory())
self.fs = asynctest.Mock(FsLocal())
self.fs.path = "/app/storage"
- vca_config = {}
+ vca_config = {
+ "helmpath": "/usr/local/bin/helm",
+ "helm3path": "/usr/local/bin/helm3",
+ "kubectlpath": "/usr/bin/kubectl"
+ }
lcm_helm_conn.K8sHelmConnector = asynctest.Mock(lcm_helm_conn.K8sHelmConnector)
+ lcm_helm_conn.K8sHelm3Connector = asynctest.Mock(lcm_helm_conn.K8sHelm3Connector)
self.helm_conn = LCMHelmConn(self.db, self.fs, loop=self.loop, vca_config=vca_config, log=self.logger)
@asynctest.fail_on(active_handles=True)
db_dict = {}
artifact_path = "helm_sample_charm"
helm_chart_id = "helm_sample_charm_0001"
- self.helm_conn._k8sclusterhelm.install = asynctest.CoroutineMock(return_value=helm_chart_id)
- self.db.get_one.return_value = {"_admin": {"helm-chart": {"id": "myk8s_id"}}}
- ee_id, _ = await self.helm_conn.create_execution_environment(namespace, db_dict, artifact_path=artifact_path)
- self.assertEqual(ee_id, "{}.{}".format("osm", helm_chart_id),
- "Check ee_id format: <default namespace>.<helm_chart-id>")
- self.helm_conn._k8sclusterhelm.install.assert_called_once_with("myk8s_id",
- kdu_model="/app/storage/helm_sample_charm",
- namespace="osm", db_dict=db_dict,
- params=None, timeout=None)
+ self.helm_conn._k8sclusterhelm3.install = asynctest.CoroutineMock(return_value=helm_chart_id)
+ self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}}
+ ee_id, _ = await self.helm_conn.create_execution_environment(namespace,
+ db_dict,
+ artifact_path=artifact_path,
+ vca_type="helm-v3")
+ self.assertEqual(ee_id, "{}:{}.{}".format("helm-v3", "osm", helm_chart_id),
+ "Check ee_id format: <helm-version>:<default namespace>.<helm_chart-id>")
+ self.helm_conn._k8sclusterhelm3.install.assert_called_once_with("myk8s_id",
+ kdu_model="/app/storage/helm_sample_charm",
+ namespace="osm", db_dict=db_dict,
+ params=None, timeout=None)
@asynctest.fail_on(active_handles=True)
async def test_get_ee_ssh_public__key(self):
@asynctest.fail_on(active_handles=True)
async def test_delete_execution_environment(self):
- ee_id = "osm.helm_sample_charm_0001"
- self.db.get_one.return_value = {"_admin": {"helm-chart": {"id": "myk8s_id"}}}
- self.helm_conn._k8sclusterhelm.uninstall = asynctest.CoroutineMock()
+ ee_id = "helm-v3:osm.helm_sample_charm_0001"
+ self.db.get_one.return_value = {"_admin": {"helm-chart-v3": {"id": "myk8s_id"}}}
+ self.helm_conn._k8sclusterhelm3.uninstall = asynctest.CoroutineMock(return_value="")
await self.helm_conn.delete_execution_environment(ee_id)
- self.helm_conn._k8sclusterhelm.uninstall.assert_called_once_with("myk8s_id", "helm_sample_charm_0001")
+ self.helm_conn._k8sclusterhelm3.uninstall.assert_called_once_with("myk8s_id", "helm_sample_charm_0001")
if __name__ == '__main__':
from osm_lcm.lcm_utils import TaskRegistry
from osm_lcm.ROclient import ROClient
from uuid import uuid4
-# from asynctest.mock import patch
from osm_lcm.tests import test_db_descriptors as descriptors
"secret": getenv("OSMLCM_VCA_SECRET", "vca"),
"public_key": getenv("OSMLCM_VCA_PUBKEY", None),
'ca_cert': getenv("OSMLCM_VCA_CACERT", None),
- 'apiproxy': getenv("OSMLCM_VCA_APIPROXY", "192.168.1.1")
+ 'apiproxy': getenv("OSMLCM_VCA_APIPROXY", "192.168.1.1"),
},
"ro_config": {
"uri": "http://{}:{}/openmano".format(getenv("OSMLCM_RO_HOST", "ro"),
if not getenv("OSMLCMTEST_VCA_K8s_NOMOCK"):
ns.K8sJujuConnector = asynctest.MagicMock(ns.K8sJujuConnector)
ns.K8sHelmConnector = asynctest.MagicMock(ns.K8sHelmConnector)
+ ns.K8sHelm3Connector = asynctest.MagicMock(ns.K8sHelm3Connector)
if not getenv("OSMLCMTEST_VCA_NOMOCK"):
ns.N2VCJujuConnector = asynctest.MagicMock(ns.N2VCJujuConnector)
self.my_ns.n2vc.install_configuration_sw = asynctest.CoroutineMock(return_value=pub_key)
self.my_ns.n2vc.get_ee_ssh_public__key = asynctest.CoroutineMock(return_value=pub_key)
self.my_ns.n2vc.exec_primitive = asynctest.CoroutineMock(side_effect=self._return_uuid)
+ self.my_ns.n2vc.exec_primitive = asynctest.CoroutineMock(side_effect=self._return_uuid)
self.my_ns.n2vc.GetPrimitiveStatus = asynctest.CoroutineMock(return_value="completed")
self.my_ns.n2vc.GetPrimitiveOutput = asynctest.CoroutineMock(return_value={"result": "ok",
"pubkey": pub_key})
db_vnfds = {db_vnfd["_id"]: db_vnfd}
task_register = {}
logging_text = "KDU"
- self.my_ns.k8sclusterhelm.install = asynctest.CoroutineMock(return_value="k8s_id")
- self.my_ns.k8sclusterhelm.synchronize_repos = asynctest.CoroutineMock(return_value=("", ""))
- self.my_ns.k8sclusterhelm.get_services = asynctest.CoroutineMock(return_value=([]))
+ self.my_ns.k8sclusterhelm3.install = asynctest.CoroutineMock(return_value="k8s_id")
+ self.my_ns.k8sclusterhelm3.synchronize_repos = asynctest.CoroutineMock(return_value=("", ""))
+ self.my_ns.k8sclusterhelm3.get_services = asynctest.CoroutineMock(return_value=([]))
await self.my_ns.deploy_kdus(logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_register)
await asyncio.wait(list(task_register.keys()), timeout=100)
db_nsr = self.db.get_list("nsrs")[1]
self.assertIsInstance(db_nsr["_admin"]["deployed"]["K8s"], list, "K8s entry is not of type list")
self.assertEqual(len(db_nsr["_admin"]["deployed"]["K8s"]), 2, "K8s entry is not of type list")
k8s_instace_info = {"kdu-instance": "k8s_id", "k8scluster-uuid": "73d96432-d692-40d2-8440-e0c73aee209c",
- "k8scluster-type": "helm-chart",
+ "k8scluster-type": "helm-chart-v3",
"kdu-name": "ldap", "kdu-model": "stable/openldap:1.2.1",
"member-vnf-index": "multikdu", "namespace": None}
from osm_lcm import ROclient
from osm_lcm.lcm_utils import LcmException, LcmBase, deep_get
from n2vc.k8s_helm_conn import K8sHelmConnector
+from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
from n2vc.exceptions import K8sException, N2VCException
from osm_common.dbbase import DbException
self.fs = fs
self.db = db
- self.helm_k8scluster = K8sHelmConnector(
+ self.helm2_k8scluster = K8sHelmConnector(
kubectl_command=self.vca_config.get("kubectlpath"),
helm_command=self.vca_config.get("helmpath"),
fs=self.fs,
on_update_db=None
)
+ self.helm3_k8scluster = K8sHelm3Connector(
+ kubectl_command=self.vca_config.get("kubectlpath"),
+ helm_command=self.vca_config.get("helm3path"),
+ fs=self.fs,
+ log=self.logger,
+ db=self.db,
+ on_update_db=None
+ )
+
self.juju_k8scluster = K8sJujuConnector(
kubectl_command=self.vca_config.get("kubectlpath"),
juju_command=self.vca_config.get("jujupath"),
vca_config=self.vca_config,
)
self.k8s_map = {
- "helm-chart": self.helm_k8scluster,
+ "helm-chart": self.helm2_k8scluster,
+ "helm-chart-v3": self.helm3_k8scluster,
"juju-bundle": self.juju_k8scluster,
}
task2name = {}
init_target = deep_get(db_k8scluster, ("_admin", "init"))
step = "Launching k8scluster init tasks"
- for task_name in ("helm-chart", "juju-bundle"):
+ for task_name in ("helm-chart", "juju-bundle", "helm-chart-v3"):
if init_target and task_name not in init_target:
continue
task = asyncio.ensure_future(self.k8s_map[task_name].init_env(k8s_credentials,
self.logger.debug(logging_text + step)
db_k8scluster = self.db.get_one("k8sclusters", {"_id": k8scluster_id})
k8s_hc_id = deep_get(db_k8scluster, ("_admin", "helm-chart", "id"))
+ k8s_h3c_id = deep_get(db_k8scluster, ("_admin", "helm-chart-v3", "id"))
k8s_jb_id = deep_get(db_k8scluster, ("_admin", "juju-bundle", "id"))
cluster_removed = True
if k8s_hc_id:
step = "Removing helm-chart '{}'".format(k8s_hc_id)
uninstall_sw = deep_get(db_k8scluster, ("_admin", "helm-chart", "created")) or False
- cluster_removed = await self.helm_k8scluster.reset(cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw)
+ cluster_removed = await self.helm2_k8scluster.reset(cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw)
db_k8scluster_update["_admin.helm-chart.id"] = None
db_k8scluster_update["_admin.helm-chart.operationalState"] = "DISABLED"
+ if k8s_h3c_id:
+ step = "Removing helm-chart-v3 '{}'".format(k8s_hc_id)
+ uninstall_sw = deep_get(db_k8scluster, ("_admin", "helm-chart-v3", "created")) or False
+ cluster_removed = await self.helm3_k8scluster.reset(cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw)
+ db_k8scluster_update["_admin.helm-chart-v3.id"] = None
+ db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "DISABLED"
+
# Try to remove from cluster_inserted to clean old versions
if k8s_hc_id and cluster_removed:
step = "Removing k8scluster='{}' from k8srepos".format(k8scluster_id)
self.fs = fs
self.db = db
- self.k8srepo = K8sHelmConnector(
- kubectl_command=self.vca_config.get("kubectlpath"),
- helm_command=self.vca_config.get("helmpath"),
- fs=self.fs,
- log=self.logger,
- db=self.db,
- on_update_db=None
- )
-
super().__init__(db, msg, fs, self.logger)
async def create(self, k8srepo_content, order_id):