From 5f069332606e512f74791c6497af40326b611344 Mon Sep 17 00:00:00 2001 From: Gabriel Cuba Date: Tue, 25 Apr 2023 19:26:19 -0500 Subject: [PATCH 01/16] Feature 10957: Add methods for creation of namespace, secret and RBAC Change-Id: Iada9d3caa9e8a926e421fe96894be618f36fb37e Signed-off-by: Gabriel Cuba --- n2vc/k8s_helm3_conn.py | 2 + n2vc/k8s_helm_base_conn.py | 119 ++++++++++++++ n2vc/kubectl.py | 179 +++++++++++++++++++- n2vc/tests/unit/test_kubectl.py | 282 ++++++++++++++++++++++++++++++++ 4 files changed, 579 insertions(+), 3 deletions(-) diff --git a/n2vc/k8s_helm3_conn.py b/n2vc/k8s_helm3_conn.py index 037ed66..4baadae 100644 --- a/n2vc/k8s_helm3_conn.py +++ b/n2vc/k8s_helm3_conn.py @@ -118,6 +118,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector): if namespace and namespace != "kube-system": if not await self._namespace_exists(cluster_uuid, namespace): try: + # TODO: refactor to use kubernetes API client await self._create_namespace(cluster_uuid, namespace) except Exception as e: if not await self._namespace_exists(cluster_uuid, namespace): @@ -314,6 +315,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector): if namespace != "kube-system": namespaces = await self._get_namespaces(cluster_id) if namespace not in namespaces: + # TODO: refactor to use kubernetes API client await self._create_namespace(cluster_id, namespace) repo_list = await self.repo_list(cluster_id) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index 5588c3d..34d3129 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -2113,3 +2113,122 @@ class K8sHelmBaseConnector(K8sConnector): ) kubectl = Kubectl(config_file=paths["kube_config"]) await kubectl.delete_certificate(namespace, certificate_name) + + async def create_namespace( + self, + namespace, + cluster_uuid, + ): + """ + Create a namespace in a specific cluster + + :param namespace: namespace to be created + :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig + :returns: None + """ + paths, env = self._init_paths_env( + cluster_name=cluster_uuid, create_if_not_exist=True + ) + kubectl = Kubectl(config_file=paths["kube_config"]) + await kubectl.create_namespace( + name=namespace, + ) + + async def delete_namespace( + self, + namespace, + cluster_uuid, + ): + """ + Delete a namespace in a specific cluster + + :param namespace: namespace to be deleted + :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig + :returns: None + """ + paths, env = self._init_paths_env( + cluster_name=cluster_uuid, create_if_not_exist=True + ) + kubectl = Kubectl(config_file=paths["kube_config"]) + await kubectl.delete_namespace( + name=namespace, + ) + + async def copy_secret_data( + self, + src_secret: str, + dst_secret: str, + cluster_uuid: str, + data_key: str, + src_namespace: str = "osm", + dst_namespace: str = "osm", + ): + """ + Copy a single key and value from an existing secret to a new one + + :param src_secret: name of the existing secret + :param dst_secret: name of the new secret + :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig + :param data_key: key of the existing secret to be copied + :param src_namespace: Namespace of the existing secret + :param dst_namespace: Namespace of the new secret + :returns: None + """ + paths, env = self._init_paths_env( + cluster_name=cluster_uuid, create_if_not_exist=True + ) + kubectl = Kubectl(config_file=paths["kube_config"]) + secret_data = await kubectl.get_secret_content( + name=src_secret, + namespace=src_namespace, + ) + # Only the corresponding data_key value needs to be copy + data = {data_key: secret_data.get(data_key)} + await kubectl.create_secret( + name=dst_secret, + data=data, + namespace=dst_namespace, + secret_type="Opaque", + ) + + async def setup_default_rbac( + self, + name, + namespace, + cluster_uuid, + api_groups, + resources, + verbs, + service_account, + ): + """ + Create a basic RBAC for a new namespace. + + :param name: name of both Role and Role Binding + :param namespace: K8s namespace + :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig + :param api_groups: Api groups to be allowed in Policy Rule + :param resources: Resources to be allowed in Policy Rule + :param verbs: Verbs to be allowed in Policy Rule + :param service_account: Service Account name used to bind the Role + :returns: None + """ + paths, env = self._init_paths_env( + cluster_name=cluster_uuid, create_if_not_exist=True + ) + kubectl = Kubectl(config_file=paths["kube_config"]) + await kubectl.create_role( + name=name, + labels={}, + namespace=namespace, + api_groups=api_groups, + resources=resources, + verbs=verbs, + ) + await kubectl.create_role_binding( + name=name, + labels={}, + namespace=namespace, + role_name=name, + sa_name=service_account, + ) diff --git a/n2vc/kubectl.py b/n2vc/kubectl.py index 3fe6b53..7cc6ac2 100644 --- a/n2vc/kubectl.py +++ b/n2vc/kubectl.py @@ -25,14 +25,17 @@ from kubernetes import client, config from kubernetes.client.api import VersionApi from kubernetes.client.models import ( V1ClusterRole, + V1Role, V1ObjectMeta, V1PolicyRule, V1ServiceAccount, V1ClusterRoleBinding, + V1RoleBinding, V1RoleRef, V1Subject, V1Secret, V1SecretReference, + V1Namespace, ) from kubernetes.client.rest import ApiException from n2vc.libjuju import retry_callback @@ -163,9 +166,7 @@ class Kubectl: ) if len(cluster_roles.items) > 0: - raise Exception( - "Cluster role with metadata.name={} already exists".format(name) - ) + raise Exception("Role with metadata.name={} already exists".format(name)) metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace) # Cluster role @@ -179,6 +180,46 @@ class Kubectl: self.clients[RBAC_CLIENT].create_cluster_role(cluster_role) + async def create_role( + self, + name: str, + labels: Dict[str, str], + api_groups: list, + resources: list, + verbs: list, + namespace: str, + ): + """ + Create a role with one PolicyRule + + :param: name: Name of the namespaced Role + :param: labels: Labels for namespaced Role metadata + :param: api_groups: List with api-groups allowed in the policy rule + :param: resources: List with resources allowed in the policy rule + :param: verbs: List with verbs allowed in the policy rule + :param: namespace: Kubernetes namespace for Role metadata + + :return: None + """ + + roles = self.clients[RBAC_CLIENT].list_namespaced_role( + namespace, field_selector="metadata.name={}".format(name) + ) + + if len(roles.items) > 0: + raise Exception("Role with metadata.name={} already exists".format(name)) + + metadata = V1ObjectMeta(name=name, labels=labels, namespace=namespace) + + role = V1Role( + metadata=metadata, + rules=[ + V1PolicyRule(api_groups=api_groups, resources=resources, verbs=verbs), + ], + ) + + self.clients[RBAC_CLIENT].create_namespaced_role(namespace, role) + def delete_cluster_role(self, name: str): """ Delete a cluster role @@ -308,6 +349,44 @@ class Kubectl: ) self.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding) + async def create_role_binding( + self, + name: str, + role_name: str, + sa_name: str, + labels: Dict[str, str], + namespace: str, + ): + """ + Create a cluster role binding + + :param: name: Name of the namespaced Role Binding + :param: role_name: Name of the namespaced Role to be bound + :param: sa_name: Name of the Service Account to be bound + :param: labels: Labels for Role Binding metadata + :param: namespace: Kubernetes namespace for Role Binding metadata + + :return: None + """ + role_bindings = self.clients[RBAC_CLIENT].list_namespaced_role_binding( + namespace, field_selector="metadata.name={}".format(name) + ) + if len(role_bindings.items) > 0: + raise Exception( + "Role Binding with metadata.name={} already exists".format(name) + ) + + role_binding = V1RoleBinding( + metadata=V1ObjectMeta(name=name, labels=labels), + role_ref=V1RoleRef(kind="Role", name=role_name, api_group=""), + subjects=[ + V1Subject(kind="ServiceAccount", name=sa_name, namespace=namespace) + ], + ) + self.clients[RBAC_CLIENT].create_namespaced_role_binding( + namespace, role_binding + ) + def delete_cluster_role_binding(self, name: str): """ Delete a cluster role binding @@ -351,6 +430,7 @@ class Kubectl: raise Exception( "Failed getting the secret from service account {}".format(name) ) + # TODO: refactor to use get_secret_content secret = v1_core.list_namespaced_secret( namespace, field_selector="metadata.name={}".format(secret_name) ).items[0] @@ -363,6 +443,53 @@ class Kubectl: base64.b64decode(client_certificate_data).decode("utf-8"), ) + @retry( + attempts=10, + delay=1, + fallback=Exception("Failed getting data from the secret"), + ) + async def get_secret_content( + self, + name: str, + namespace: str, + ) -> dict: + """ + Get secret data + + :param: name: Name of the secret + :param: namespace: Name of the namespace where the secret is stored + + :return: Dictionary with secret's data + """ + v1_core = self.clients[CORE_CLIENT] + + secret = v1_core.read_namespaced_secret(name, namespace) + + return secret.data + + @retry( + attempts=10, + delay=1, + fallback=Exception("Failed creating the secret"), + ) + async def create_secret( + self, name: str, data: dict, namespace: str, secret_type: str + ): + """ + Get secret data + + :param: name: Name of the secret + :param: data: Dict with data content. Values must be already base64 encoded + :param: namespace: Name of the namespace where the secret will be stored + :param: secret_type: Type of the secret, e.g., Opaque, kubernetes.io/service-account-token, kubernetes.io/tls + + :return: None + """ + v1_core = self.clients[CORE_CLIENT] + metadata = V1ObjectMeta(name=name, namespace=namespace) + secret = V1Secret(metadata=metadata, data=data, type=secret_type) + v1_core.create_namespaced_secret(namespace, secret) + async def create_certificate( self, namespace: str, @@ -441,3 +568,49 @@ class Kubectl: self.logger.warning("Certificate already deleted: {}".format(e)) else: raise e + + @retry( + attempts=10, + delay=1, + fallback=Exception("Failed creating the namespace"), + ) + async def create_namespace(self, name: str): + """ + Create a namespace + + :param: name: Name of the namespace to be created + + """ + v1_core = self.clients[CORE_CLIENT] + metadata = V1ObjectMeta(name=name) + namespace = V1Namespace( + metadata=metadata, + ) + + try: + v1_core.create_namespace(namespace) + self.logger.debug("Namespace created: {}".format(name)) + except ApiException as e: + info = json.loads(e.body) + if info.get("reason").lower() == "alreadyexists": + self.logger.warning("Namespace already exists: {}".format(e)) + else: + raise e + + @retry( + attempts=10, + delay=1, + fallback=Exception("Failed deleting the namespace"), + ) + async def delete_namespace(self, name: str): + """ + Delete a namespace + + :param: name: Name of the namespace to be deleted + + """ + try: + self.clients[CORE_CLIENT].delete_namespace(name) + except ApiException as e: + if e.reason == "Not Found": + self.logger.warning("Namespace already deleted: {}".format(e)) diff --git a/n2vc/tests/unit/test_kubectl.py b/n2vc/tests/unit/test_kubectl.py index d970bf0..6ba074f 100644 --- a/n2vc/tests/unit/test_kubectl.py +++ b/n2vc/tests/unit/test_kubectl.py @@ -24,6 +24,12 @@ from kubernetes.client import ( V1Secret, V1ServiceAccount, V1SecretReference, + V1Role, + V1RoleBinding, + V1RoleRef, + V1Subject, + V1PolicyRule, + V1Namespace, ) @@ -93,6 +99,24 @@ class FakeK8sSecretList: return self._items +class FakeK8sRoleList: + def __init__(self, items=[]): + self._items = items + + @property + def items(self): + return self._items + + +class FakeK8sRoleBindingList: + def __init__(self, items=[]): + self._items = items + + @property + def items(self): + return self._items + + class FakeK8sVersionApiCode: def __init__(self, major: str, minor: str): self._major = major @@ -555,3 +579,261 @@ class DeleteCertificateClass(asynctest.TestCase): namespace=self.namespace, object_name=self.object_name, ) + + +@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role") +@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role") +class CreateRoleClass(asynctest.TestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(CreateRoleClass, self).setUp() + self.name = "role" + self.namespace = "osm" + self.resources = ["*"] + self.api_groups = ["*"] + self.verbs = ["*"] + self.labels = {} + self.kubectl = Kubectl() + + @asynctest.fail_on(active_handles=True) + async def assert_create_role(self, mock_create_role): + metadata = V1ObjectMeta( + name=self.name, labels=self.labels, namespace=self.namespace + ) + role = V1Role( + metadata=metadata, + rules=[ + V1PolicyRule( + api_groups=self.api_groups, + resources=self.resources, + verbs=self.verbs, + ), + ], + ) + await self.kubectl.create_role( + namespace=self.namespace, + api_groups=self.api_groups, + name=self.name, + resources=self.resources, + verbs=self.verbs, + labels=self.labels, + ) + mock_create_role.assert_called_once_with(self.namespace, role) + + @asynctest.fail_on(active_handles=True) + async def test_raise_exception_if_role_already_exists( + self, + mock_list_role, + mock_create_role, + ): + mock_list_role.return_value = FakeK8sRoleList(items=[1]) + with self.assertRaises(Exception) as context: + await self.kubectl.create_role( + self.name, + self.labels, + self.api_groups, + self.resources, + self.verbs, + self.namespace, + ) + self.assertTrue( + "Role with metadata.name={} already exists".format(self.name) + in str(context.exception) + ) + mock_create_role.assert_not_called() + + +@mock.patch("kubernetes.client.RbacAuthorizationV1Api.create_namespaced_role_binding") +@mock.patch("kubernetes.client.RbacAuthorizationV1Api.list_namespaced_role_binding") +class CreateRoleBindingClass(asynctest.TestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(CreateRoleBindingClass, self).setUp() + self.name = "rolebinding" + self.namespace = "osm" + self.role_name = "role" + self.sa_name = "Default" + self.labels = {} + self.kubectl = Kubectl() + + @asynctest.fail_on(active_handles=True) + async def assert_create_role_binding(self, mock_create_role_binding): + role_binding = V1RoleBinding( + metadata=V1ObjectMeta(name=self.name, labels=self.labels), + role_ref=V1RoleRef(kind="Role", name=self.role_name, api_group=""), + subjects=[ + V1Subject( + kind="ServiceAccount", + name=self.sa_name, + namespace=self.namespace, + ) + ], + ) + await self.kubectl.create_role_binding( + namespace=self.namespace, + role_name=self.role_name, + name=self.name, + sa_name=self.sa_name, + labels=self.labels, + ) + mock_create_role_binding.assert_called_once_with(self.namespace, role_binding) + + @asynctest.fail_on(active_handles=True) + async def test_raise_exception_if_role_binding_already_exists( + self, + mock_list_role_binding, + mock_create_role_binding, + ): + mock_list_role_binding.return_value = FakeK8sRoleBindingList(items=[1]) + with self.assertRaises(Exception) as context: + await self.kubectl.create_role_binding( + self.name, + self.role_name, + self.sa_name, + self.labels, + self.namespace, + ) + self.assertTrue( + "Role Binding with metadata.name={} already exists".format(self.name) + in str(context.exception) + ) + mock_create_role_binding.assert_not_called() + + +@mock.patch("kubernetes.client.CoreV1Api.create_namespaced_secret") +class CreateSecretClass(asynctest.TestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(CreateSecretClass, self).setUp() + self.name = "secret" + self.namespace = "osm" + self.data = {"test": "1234"} + self.secret_type = "Opaque" + self.kubectl = Kubectl() + + @asynctest.fail_on(active_handles=True) + async def assert_create_secret(self, mock_create_secret): + secret_metadata = V1ObjectMeta(name=self.name, namespace=self.namespace) + secret = V1Secret( + metadata=secret_metadata, + data=self.data, + type=self.secret_type, + ) + await self.kubectl.create_secret( + namespace=self.namespace, + data=self.data, + name=self.name, + secret_type=self.secret_type, + ) + mock_create_secret.assert_called_once_with(self.namespace, secret) + + +@mock.patch("kubernetes.client.CoreV1Api.create_namespace") +class CreateNamespaceClass(asynctest.TestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(CreateNamespaceClass, self).setUp() + self.namespace = "osm" + self.kubectl = Kubectl() + + @asynctest.fail_on(active_handles=True) + async def test_namespace_is_created( + self, + mock_create_namespace, + ): + metadata = V1ObjectMeta(name=self.namespace) + namespace = V1Namespace( + metadata=metadata, + ) + await self.kubectl.create_namespace( + name=self.namespace, + ) + mock_create_namespace.assert_called_once_with(namespace) + + @asynctest.fail_on(active_handles=True) + async def test_no_exception_if_alreadyexists( + self, + mock_create_namespace, + ): + api_exception = ApiException() + api_exception.body = '{"reason": "AlreadyExists"}' + self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = api_exception + raised = False + try: + await self.kubectl.create_namespace( + name=self.namespace, + ) + except Exception: + raised = True + self.assertFalse(raised, "An exception was raised") + + @asynctest.fail_on(active_handles=True) + async def test_other_exceptions( + self, + mock_create_namespace, + ): + self.kubectl.clients[CORE_CLIENT].create_namespace.side_effect = Exception() + with self.assertRaises(Exception): + await self.kubectl.create_namespace( + name=self.namespace, + ) + + +@mock.patch("kubernetes.client.CoreV1Api.delete_namespace") +class DeleteNamespaceClass(asynctest.TestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(DeleteNamespaceClass, self).setUp() + self.namespace = "osm" + self.kubectl = Kubectl() + + @asynctest.fail_on(active_handles=True) + async def test_no_exception_if_notfound( + self, + mock_delete_namespace, + ): + api_exception = ApiException() + api_exception.body = '{"reason": "NotFound"}' + self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = api_exception + raised = False + try: + await self.kubectl.delete_namespace( + name=self.namespace, + ) + except Exception: + raised = True + self.assertFalse(raised, "An exception was raised") + + @asynctest.fail_on(active_handles=True) + async def test_other_exceptions( + self, + mock_delete_namespace, + ): + self.kubectl.clients[CORE_CLIENT].delete_namespace.side_effect = Exception() + with self.assertRaises(Exception): + await self.kubectl.delete_namespace( + name=self.namespace, + ) + + +@mock.patch("kubernetes.client.CoreV1Api.read_namespaced_secret") +class GetSecretContentClass(asynctest.TestCase): + @mock.patch("kubernetes.config.load_kube_config") + def setUp(self, mock_load_kube_config): + super(GetSecretContentClass, self).setUp() + self.name = "my_secret" + self.namespace = "osm" + self.data = {"my_key": "my_value"} + self.type = "Opaque" + self.kubectl = Kubectl() + + @asynctest.fail_on(active_handles=True) + async def test_return_type_is_dict( + self, + mock_read_namespaced_secret, + ): + metadata = V1ObjectMeta(name=self.name, namespace=self.namespace) + secret = V1Secret(metadata=metadata, data=self.data, type=self.type) + mock_read_namespaced_secret.return_value = secret + content = await self.kubectl.get_secret_content(self.name, self.namespace) + assert type(content) is dict -- 2.17.1 From d21509c14b0bef94d5ace71a4b5c7592851b453d Mon Sep 17 00:00:00 2001 From: Gabriel Cuba Date: Wed, 17 May 2023 01:30:15 -0500 Subject: [PATCH 02/16] Feature 10948: Set labels to new namespaces Change-Id: I0f40d198d398c79f3e9badd8def8f884f60fb7f8 Signed-off-by: Gabriel Cuba --- n2vc/k8s_helm_base_conn.py | 5 ++++- n2vc/kubectl.py | 5 +++-- n2vc/tests/unit/test_kubectl.py | 17 ++++++++++++++++- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index 34d3129..2a588b4 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -2118,12 +2118,14 @@ class K8sHelmBaseConnector(K8sConnector): self, namespace, cluster_uuid, + labels, ): """ Create a namespace in a specific cluster - :param namespace: namespace to be created + :param namespace: Namespace to be created :param cluster_uuid: K8s cluster uuid used to retrieve kubeconfig + :param labels: Dictionary with labels for the new namespace :returns: None """ paths, env = self._init_paths_env( @@ -2132,6 +2134,7 @@ class K8sHelmBaseConnector(K8sConnector): kubectl = Kubectl(config_file=paths["kube_config"]) await kubectl.create_namespace( name=namespace, + labels=labels, ) async def delete_namespace( diff --git a/n2vc/kubectl.py b/n2vc/kubectl.py index 7cc6ac2..c16c95a 100644 --- a/n2vc/kubectl.py +++ b/n2vc/kubectl.py @@ -574,15 +574,16 @@ class Kubectl: delay=1, fallback=Exception("Failed creating the namespace"), ) - async def create_namespace(self, name: str): + async def create_namespace(self, name: str, labels: dict = None): """ Create a namespace :param: name: Name of the namespace to be created + :param: labels: Dictionary with labels for the new namespace """ v1_core = self.clients[CORE_CLIENT] - metadata = V1ObjectMeta(name=name) + metadata = V1ObjectMeta(name=name, labels=labels) namespace = V1Namespace( metadata=metadata, ) diff --git a/n2vc/tests/unit/test_kubectl.py b/n2vc/tests/unit/test_kubectl.py index 6ba074f..a6d02ff 100644 --- a/n2vc/tests/unit/test_kubectl.py +++ b/n2vc/tests/unit/test_kubectl.py @@ -734,6 +734,7 @@ class CreateNamespaceClass(asynctest.TestCase): def setUp(self, mock_load_kube_config): super(CreateNamespaceClass, self).setUp() self.namespace = "osm" + self.labels = {"key": "value"} self.kubectl = Kubectl() @asynctest.fail_on(active_handles=True) @@ -741,7 +742,21 @@ class CreateNamespaceClass(asynctest.TestCase): self, mock_create_namespace, ): - metadata = V1ObjectMeta(name=self.namespace) + metadata = V1ObjectMeta(name=self.namespace, labels=self.labels) + namespace = V1Namespace( + metadata=metadata, + ) + await self.kubectl.create_namespace( + name=self.namespace, + labels=self.labels, + ) + mock_create_namespace.assert_called_once_with(namespace) + + async def test_namespace_is_created_default_labels( + self, + mock_create_namespace, + ): + metadata = V1ObjectMeta(name=self.namespace, labels=None) namespace = V1Namespace( metadata=metadata, ) -- 2.17.1 From 21852a087bec9102e8ba762d7298e46bb8452e0e Mon Sep 17 00:00:00 2001 From: "selvi.j" Date: Thu, 27 Apr 2023 06:53:45 +0000 Subject: [PATCH 03/16] Coverity-CWE 330: Use of Insufficiently Random Values Added fix for CWE 330: Use of Insufficiently Random Value (Cryptographically weak PRNG) Change-Id: I927c9d22ca42b01f6ec1fe9fc41e4b962167b72f Signed-off-by: selvi.j --- n2vc/k8s_helm_base_conn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index 2a588b4..a897e0e 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -1962,7 +1962,7 @@ class K8sHelmBaseConnector(K8sConnector): self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True) def get_random_number(): - r = random.randrange(start=1, stop=99999999) + r = random.SystemRandom().randint(1, 99999999) s = str(r) while len(s) < 10: s = "0" + s @@ -2028,7 +2028,7 @@ class K8sHelmBaseConnector(K8sConnector): name += "-" def get_random_number(): - r = random.randrange(start=1, stop=99999999) + r = random.SystemRandom().randint(1, 99999999) s = str(r) s = s.rjust(10, "0") return s -- 2.17.1 From afde3be17b3f596a3d7996b1ebaf50c027bf624e Mon Sep 17 00:00:00 2001 From: "selvi.j" Date: Fri, 28 Apr 2023 06:17:26 +0000 Subject: [PATCH 04/16] Coverity-CWE 22: Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Added fix for CWE 22: Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Change-Id: I6e39b16dc2cc796eb91485ff6dcecef38b29377b Signed-off-by: selvi.j --- n2vc/n2vc_conn.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/n2vc/n2vc_conn.py b/n2vc/n2vc_conn.py index 4fa7e36..9e91a10 100644 --- a/n2vc/n2vc_conn.py +++ b/n2vc/n2vc_conn.py @@ -115,19 +115,27 @@ class N2VCConnector(abc.ABC, Loggable): self.log.warning("No HOME environment variable, using /tmp") homedir = "/tmp" sshdir = "{}/.ssh".format(homedir) + sshdir = os.path.realpath(os.path.normpath(os.path.abspath(sshdir))) if not os.path.exists(sshdir): os.mkdir(sshdir) self.private_key_path = "{}/id_n2vc_rsa".format(sshdir) + self.private_key_path = os.path.realpath( + os.path.normpath(os.path.abspath(self.private_key_path)) + ) self.public_key_path = "{}.pub".format(self.private_key_path) + self.public_key_path = os.path.realpath( + os.path.normpath(os.path.abspath(self.public_key_path)) + ) # If we don't have a key generated, then we have to generate it using ssh-keygen if not os.path.exists(self.private_key_path): - cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + command = "ssh-keygen -t {} -b {} -N '' -f {}".format( "rsa", "4096", self.private_key_path ) # run command with arguments - subprocess.check_output(shlex.split(cmd)) + args = shlex.split(command) + subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Read the public key. Only one public key (one line) in the file with open(self.public_key_path, "r") as file: -- 2.17.1 From 49fc01f07c72caa744ce95275462ac2e76a914f8 Mon Sep 17 00:00:00 2001 From: Guillermo Calvino Date: Mon, 26 Jun 2023 13:47:46 +0200 Subject: [PATCH 05/16] Update python-libjuju version to 2.9.42.4 Change-Id: I9770e1647ea715d205d06cd07fda1b4d244a673f Signed-off-by: Guillermo Calvino --- requirements.in | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.in b/requirements.in index 0195351..68d077a 100644 --- a/requirements.in +++ b/requirements.in @@ -13,7 +13,7 @@ # limitations under the License. charset-normalizer google-auth<2.18.0 -juju==3.0.0 +juju==2.9.42.4 kubernetes motor pyasn1 diff --git a/requirements.txt b/requirements.txt index 53808d1..be6032c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -42,7 +42,7 @@ google-auth==2.17.3 # kubernetes idna==3.4 # via requests -juju==3.0.0 +juju==2.9.42.4 # via -r requirements.in jujubundlelib==0.5.7 # via theblues -- 2.17.1 From de6984b39684c3a8587ea4111c757ab878942aba Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Tue, 27 Jun 2023 16:42:41 -0300 Subject: [PATCH 06/16] Fix bug 2088 by quoting inputs for commands CVE-2022-35503 Change-Id: I392eda9138d399b956dd8072a082e15edab142b7 Signed-off-by: Daniel Arndt --- n2vc/k8s_helm3_conn.py | 46 ++++++++++++++++--------- n2vc/k8s_helm_base_conn.py | 59 ++++++++++++++++---------------- n2vc/k8s_helm_conn.py | 69 +++++++++++++++++++++----------------- n2vc/n2vc_conn.py | 9 +++-- 4 files changed, 103 insertions(+), 80 deletions(-) diff --git a/n2vc/k8s_helm3_conn.py b/n2vc/k8s_helm3_conn.py index 4baadae..675c851 100644 --- a/n2vc/k8s_helm3_conn.py +++ b/n2vc/k8s_helm3_conn.py @@ -20,6 +20,7 @@ # contact with: nfvlabs@tid.es ## from typing import Union +from shlex import quote import os import yaml @@ -258,7 +259,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector): ) command = "{} --kubeconfig={} get namespaces -o=yaml".format( - self.kubectl_command, paths["kube_config"] + self.kubectl_command, quote(paths["kube_config"]) ) output, _rc = await self._local_async_exec( command=command, raise_exception_on_error=True, env=env @@ -279,7 +280,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector): ) command = "{} --kubeconfig={} create namespace {}".format( - self.kubectl_command, paths["kube_config"], namespace + self.kubectl_command, quote(paths["kube_config"]), quote(namespace) ) _, _rc = await self._local_async_exec( command=command, raise_exception_on_error=True, env=env @@ -297,9 +298,11 @@ class K8sHelm3Connector(K8sHelmBaseConnector): ) command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format( - kubeconfig, self._helm_command, kdu_instance, namespace + kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace) + ) + command2 = "{} get --namespace={} -f -".format( + self.kubectl_command, quote(namespace) ) - command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace) output, _rc = await self._local_async_exec_pipe( command1, command2, env=env, raise_exception_on_error=True ) @@ -364,7 +367,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector): """ inspect_command = "{} show {} {}{} {}".format( - self._helm_command, show_command, kdu_model, repo_str, version + self._helm_command, show_command, quote(kdu_model), repo_str, version ) return inspect_command @@ -373,7 +376,11 @@ class K8sHelm3Connector(K8sHelmBaseConnector): ): get_command = ( "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format( - kubeconfig, self._helm_command, get_command, kdu_instance, namespace + kubeconfig, + self._helm_command, + get_command, + quote(kdu_instance), + quote(namespace), ) ) return get_command @@ -398,7 +405,10 @@ class K8sHelm3Connector(K8sHelmBaseConnector): cluster_name=cluster_id, create_if_not_exist=True ) command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format( - paths["kube_config"], self._helm_command, kdu_instance, namespace + paths["kube_config"], + self._helm_command, + quote(kdu_instance), + quote(namespace), ) output, rc = await self._local_async_exec( @@ -455,7 +465,7 @@ class K8sHelm3Connector(K8sHelmBaseConnector): # namespace namespace_str = "" if namespace: - namespace_str = "--namespace {}".format(namespace) + namespace_str = "--namespace {}".format(quote(namespace)) # version version_str = "" @@ -467,12 +477,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector): "{params} {timeout} {ns} {model} {ver}".format( kubeconfig=kubeconfig, helm=self._helm_command, - name=kdu_instance, + name=quote(kdu_instance), atomic=atomic_str, params=params_str, timeout=timeout_str, ns=namespace_str, - model=kdu_model, + model=quote(kdu_model), ver=version_str, ) ) @@ -575,12 +585,12 @@ class K8sHelm3Connector(K8sHelmBaseConnector): # version version_str = "" if version: - version_str = "--version {}".format(version) + version_str = "--version {}".format(quote(version)) # namespace namespace_str = "" if namespace: - namespace_str = "--namespace {}".format(namespace) + namespace_str = "--namespace {}".format(quote(namespace)) command = ( "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} {force}" @@ -588,13 +598,13 @@ class K8sHelm3Connector(K8sHelmBaseConnector): ).format( kubeconfig=kubeconfig, helm=self._helm_command, - name=kdu_instance, + name=quote(kdu_instance), namespace=namespace_str, atomic=atomic_str, force=force_str, params=params_str, timeout=timeout_str, - model=kdu_model, + model=quote(kdu_model), ver=version_str, ) return command @@ -603,14 +613,18 @@ class K8sHelm3Connector(K8sHelmBaseConnector): self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str ) -> str: return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format( - kubeconfig, self._helm_command, kdu_instance, revision, namespace + kubeconfig, + self._helm_command, + quote(kdu_instance), + revision, + quote(namespace), ) def _get_uninstall_command( self, kdu_instance: str, namespace: str, kubeconfig: str ) -> str: return "env KUBECONFIG={} {} uninstall {} --namespace={}".format( - kubeconfig, self._helm_command, kdu_instance, namespace + kubeconfig, self._helm_command, quote(kdu_instance), quote(namespace) ) def _get_helm_chart_repos_ids(self, cluster_uuid) -> list: diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index a897e0e..383ce7d 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -22,6 +22,7 @@ import abc import asyncio from typing import Union +from shlex import quote import random import time import shlex @@ -113,7 +114,7 @@ class K8sHelmBaseConnector(K8sConnector): namespace: str = "kube-system", reuse_cluster_uuid=None, **kwargs, - ) -> (str, bool): + ) -> tuple[str, bool]: """ It prepares a given K8s cluster environment to run Charts @@ -181,7 +182,7 @@ class K8sHelmBaseConnector(K8sConnector): # helm repo add name url command = ("env KUBECONFIG={} {} repo add {} {}").format( - paths["kube_config"], self._helm_command, name, url + paths["kube_config"], self._helm_command, quote(name), quote(url) ) if cert: @@ -191,13 +192,13 @@ class K8sHelmBaseConnector(K8sConnector): os.makedirs(os.path.dirname(temp_cert_file), exist_ok=True) with open(temp_cert_file, "w") as the_cert: the_cert.write(cert) - command += " --ca-file {}".format(temp_cert_file) + command += " --ca-file {}".format(quote(temp_cert_file)) if user: - command += " --username={}".format(user) + command += " --username={}".format(quote(user)) if password: - command += " --password={}".format(password) + command += " --password={}".format(quote(password)) self.log.debug("adding repo: {}".format(command)) await self._local_async_exec( @@ -206,7 +207,7 @@ class K8sHelmBaseConnector(K8sConnector): # helm repo update command = "env KUBECONFIG={} {} repo update {}".format( - paths["kube_config"], self._helm_command, name + paths["kube_config"], self._helm_command, quote(name) ) self.log.debug("updating repo: {}".format(command)) await self._local_async_exec( @@ -232,7 +233,7 @@ class K8sHelmBaseConnector(K8sConnector): self.fs.sync(from_path=cluster_uuid) # helm repo update - command = "{} repo update {}".format(self._helm_command, name) + command = "{} repo update {}".format(self._helm_command, quote(name)) self.log.debug("updating repo: {}".format(command)) await self._local_async_exec( command=command, raise_exception_on_error=False, env=env @@ -294,7 +295,7 @@ class K8sHelmBaseConnector(K8sConnector): self.fs.sync(from_path=cluster_uuid) command = "env KUBECONFIG={} {} repo remove {}".format( - paths["kube_config"], self._helm_command, name + paths["kube_config"], self._helm_command, quote(name) ) await self._local_async_exec( command=command, raise_exception_on_error=True, env=env @@ -1538,7 +1539,7 @@ class K8sHelmBaseConnector(K8sConnector): show_error_log: bool = True, encode_utf8: bool = False, env: dict = None, - ) -> (str, int): + ) -> tuple[str, int]: command = K8sHelmBaseConnector._remove_multiple_spaces(command) self.log.debug( "Executing async local command: {}, env: {}".format(command, env) @@ -1704,7 +1705,10 @@ class K8sHelmBaseConnector(K8sConnector): ) command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format( - self.kubectl_command, paths["kube_config"], namespace, service_name + self.kubectl_command, + paths["kube_config"], + quote(namespace), + quote(service_name), ) output, _rc = await self._local_async_exec( @@ -1755,20 +1759,20 @@ class K8sHelmBaseConnector(K8sConnector): repo_str = "" if repo_url: - repo_str = " --repo {}".format(repo_url) + repo_str = " --repo {}".format(quote(repo_url)) # Obtain the Chart's name and store it in the var kdu_model kdu_model, _ = self._split_repo(kdu_model=kdu_model) kdu_model, version = self._split_version(kdu_model) if version: - version_str = "--version {}".format(version) + version_str = "--version {}".format(quote(version)) else: version_str = "" full_command = self._get_inspect_command( show_command=inspect_command, - kdu_model=kdu_model, + kdu_model=quote(kdu_model), repo_str=repo_str, version=version_str, ) @@ -1782,7 +1786,7 @@ class K8sHelmBaseConnector(K8sConnector): kdu_model: str, repo_url: str = None, resource_name: str = None, - ) -> (int, str): + ) -> tuple[int, str]: """Get the replica count value in the Helm Chart Values. Args: @@ -1957,7 +1961,7 @@ class K8sHelmBaseConnector(K8sConnector): # params for use in -f file # returns values file option and filename (in order to delete it at the end) - def _params_to_file_option(self, cluster_id: str, params: dict) -> (str, str): + def _params_to_file_option(self, cluster_id: str, params: dict) -> tuple[str, str]: if params and len(params) > 0: self._init_paths_env(cluster_name=cluster_id, create_if_not_exist=True) @@ -1986,19 +1990,14 @@ class K8sHelmBaseConnector(K8sConnector): # params for use in --set option @staticmethod def _params_to_set_option(params: dict) -> str: - params_str = "" - if params and len(params) > 0: - start = True - for key in params: - value = params.get(key, None) - if value is not None: - if start: - params_str += "--set " - start = False - else: - params_str += "," - params_str += "{}={}".format(key, value) - return params_str + pairs = [ + f"{quote(str(key))}={quote(str(value))}" + for key, value in params.items() + if value is not None + ] + if not pairs: + return "" + return "--set " + ",".join(pairs) @staticmethod def generate_kdu_instance_name(**kwargs): @@ -2036,7 +2035,7 @@ class K8sHelmBaseConnector(K8sConnector): name = name + get_random_number() return name.lower() - def _split_version(self, kdu_model: str) -> (str, str): + def _split_version(self, kdu_model: str) -> tuple[str, str]: version = None if not self._is_helm_chart_a_file(kdu_model) and ":" in kdu_model: parts = kdu_model.split(sep=":") @@ -2045,7 +2044,7 @@ class K8sHelmBaseConnector(K8sConnector): kdu_model = parts[0] return kdu_model, version - def _split_repo(self, kdu_model: str) -> (str, str): + def _split_repo(self, kdu_model: str) -> tuple[str, str]: """Obtain the Helm Chart's repository and Chart's names from the KDU model Args: diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py index bbe4c48..17e960f 100644 --- a/n2vc/k8s_helm_conn.py +++ b/n2vc/k8s_helm_conn.py @@ -21,6 +21,7 @@ ## import asyncio from typing import Union +from shlex import quote import os import yaml @@ -73,7 +74,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): self.log.debug("Initializing helm client-only...") command = "{} init --client-only {} ".format( self._helm_command, - "--stable-repo-url {}".format(self._stable_repo_url) + "--stable-repo-url {}".format(quote(self._stable_repo_url)) if self._stable_repo_url else "--skip-repos", ) @@ -237,9 +238,11 @@ class K8sHelmConnector(K8sHelmBaseConnector): ) command1 = "env KUBECONFIG={} {} get manifest {} ".format( - kubeconfig, self._helm_command, kdu_instance + kubeconfig, self._helm_command, quote(kdu_instance) + ) + command2 = "{} get --namespace={} -f -".format( + self.kubectl_command, quote(namespace) ) - command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace) output, _rc = await self._local_async_exec_pipe( command1, command2, env=env, raise_exception_on_error=True ) @@ -257,7 +260,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): # check if tiller pod is up in cluster command = "{} --kubeconfig={} --namespace={} get deployments".format( - self.kubectl_command, paths["kube_config"], namespace + self.kubectl_command, paths["kube_config"], quote(namespace) ) output, _rc = await self._local_async_exec( command=command, raise_exception_on_error=True, env=env @@ -282,7 +285,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): "Initializing helm in client and server: {}".format(cluster_id) ) command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format( - self.kubectl_command, paths["kube_config"], self.service_account + self.kubectl_command, paths["kube_config"], quote(self.service_account) ) _, _rc = await self._local_async_exec( command=command, raise_exception_on_error=False, env=env @@ -291,7 +294,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): command = ( "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule " "--clusterrole=cluster-admin --serviceaccount=kube-system:{}" - ).format(self.kubectl_command, paths["kube_config"], self.service_account) + ).format( + self.kubectl_command, paths["kube_config"], quote(self.service_account) + ) _, _rc = await self._local_async_exec( command=command, raise_exception_on_error=False, env=env ) @@ -302,10 +307,10 @@ class K8sHelmConnector(K8sHelmBaseConnector): ).format( self._helm_command, paths["kube_config"], - namespace, - paths["helm_dir"], - self.service_account, - "--stable-repo-url {}".format(self._stable_repo_url) + quote(namespace), + quote(paths["helm_dir"]), + quote(self.service_account), + "--stable-repo-url {}".format(quote(self._stable_repo_url)) if self._stable_repo_url else "--skip-repos", ) @@ -326,9 +331,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): ).format( self._helm_command, paths["kube_config"], - namespace, - paths["helm_dir"], - "--stable-repo-url {}".format(self._stable_repo_url) + quote(namespace), + quote(paths["helm_dir"]), + "--stable-repo-url {}".format(quote(self._stable_repo_url)) if self._stable_repo_url else "--skip-repos", ) @@ -362,7 +367,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): if not namespace: # find namespace for tiller pod command = "{} --kubeconfig={} get deployments --all-namespaces".format( - self.kubectl_command, paths["kube_config"] + self.kubectl_command, quote(paths["kube_config"]) ) output, _rc = await self._local_async_exec( command=command, raise_exception_on_error=False, env=env @@ -386,7 +391,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): # uninstall tiller from cluster self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) command = "{} --kubeconfig={} --home={} reset".format( - self._helm_command, paths["kube_config"], paths["helm_dir"] + self._helm_command, + quote(paths["kube_config"]), + quote(paths["helm_dir"]), ) self.log.debug("resetting: {}".format(command)) output, _rc = await self._local_async_exec( @@ -397,16 +404,16 @@ class K8sHelmConnector(K8sHelmBaseConnector): command = ( "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s." "io/osm-tiller-cluster-rule" - ).format(self.kubectl_command, paths["kube_config"]) + ).format(self.kubectl_command, quote(paths["kube_config"])) output, _rc = await self._local_async_exec( command=command, raise_exception_on_error=False, env=env ) command = ( "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format( self.kubectl_command, - paths["kube_config"], - namespace, - self.service_account, + quote(paths["kube_config"]), + quote(namespace), + quote(self.service_account), ) ) output, _rc = await self._local_async_exec( @@ -443,7 +450,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): self, show_command: str, kdu_model: str, repo_str: str, version: str ): inspect_command = "{} inspect {} {}{} {}".format( - self._helm_command, show_command, kdu_model, repo_str, version + self._helm_command, show_command, quote(kdu_model), repo_str, version ) return inspect_command @@ -451,7 +458,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str ): get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format( - kubeconfig, self._helm_command, get_command, kdu_instance + kubeconfig, self._helm_command, get_command, quote(kdu_instance) ) return get_command @@ -472,7 +479,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): cluster_name=cluster_id, create_if_not_exist=True ) command = ("env KUBECONFIG={} {} status {} --output yaml").format( - paths["kube_config"], self._helm_command, kdu_instance + paths["kube_config"], self._helm_command, quote(kdu_instance) ) output, rc = await self._local_async_exec( command=command, @@ -610,7 +617,7 @@ class K8sHelmConnector(K8sHelmBaseConnector): # namespace namespace_str = "" if namespace: - namespace_str = "--namespace {}".format(namespace) + namespace_str = "--namespace {}".format(quote(namespace)) # version version_str = "" @@ -625,9 +632,9 @@ class K8sHelmConnector(K8sHelmBaseConnector): atomic=atomic_str, params=params_str, timeout=timeout_str, - name=kdu_instance, + name=quote(kdu_instance), ns=namespace_str, - model=kdu_model, + model=quote(kdu_model), ver=version_str, ) ) @@ -730,12 +737,12 @@ class K8sHelmConnector(K8sHelmBaseConnector): # version version_str = "" if version: - version_str = "--version {}".format(version) + version_str = "--version {}".format(quote(version)) # namespace namespace_str = "" if namespace: - namespace_str = "--namespace {}".format(namespace) + namespace_str = "--namespace {}".format(quote(namespace)) command = ( "env KUBECONFIG={kubeconfig} {helm} upgrade {namespace} {atomic} --output yaml {params} {timeout} {force}" @@ -748,8 +755,8 @@ class K8sHelmConnector(K8sHelmBaseConnector): force=force_str, params=params_str, timeout=timeout_str, - name=kdu_instance, - model=kdu_model, + name=quote(kdu_instance), + model=quote(kdu_model), ver=version_str, ) return command @@ -758,12 +765,12 @@ class K8sHelmConnector(K8sHelmBaseConnector): self, kdu_instance, namespace, revision, kubeconfig ) -> str: return "env KUBECONFIG={} {} rollback {} {} --wait".format( - kubeconfig, self._helm_command, kdu_instance, revision + kubeconfig, self._helm_command, quote(kdu_instance), revision ) def _get_uninstall_command( self, kdu_instance: str, namespace: str, kubeconfig: str ) -> str: return "env KUBECONFIG={} {} delete --purge {}".format( - kubeconfig, self._helm_command, kdu_instance + kubeconfig, self._helm_command, quote(kdu_instance) ) diff --git a/n2vc/n2vc_conn.py b/n2vc/n2vc_conn.py index 9e91a10..01d7df8 100644 --- a/n2vc/n2vc_conn.py +++ b/n2vc/n2vc_conn.py @@ -24,6 +24,7 @@ import abc import asyncio from http import HTTPStatus +from shlex import quote import os import shlex import subprocess @@ -131,7 +132,7 @@ class N2VCConnector(abc.ABC, Loggable): # If we don't have a key generated, then we have to generate it using ssh-keygen if not os.path.exists(self.private_key_path): command = "ssh-keygen -t {} -b {} -N '' -f {}".format( - "rsa", "4096", self.private_key_path + "rsa", "4096", quote(self.private_key_path) ) # run command with arguments args = shlex.split(command) @@ -151,7 +152,7 @@ class N2VCConnector(abc.ABC, Loggable): reuse_ee_id: str = None, progress_timeout: float = None, total_timeout: float = None, - ) -> (str, dict): + ) -> tuple[str, dict]: """Create an Execution Environment. Returns when it is created or raises an exception on failing @@ -398,7 +399,9 @@ class N2VCConnector(abc.ABC, Loggable): #################################################################################### """ - def _get_namespace_components(self, namespace: str) -> (str, str, str, str, str): + def _get_namespace_components( + self, namespace: str + ) -> tuple[str, str, str, str, str]: """ Split namespace components -- 2.17.1 From 4ab954c482f704ac8b7291bc80c3f362f3c13388 Mon Sep 17 00:00:00 2001 From: Dario Faccin Date: Fri, 16 Jun 2023 10:21:38 +0200 Subject: [PATCH 07/16] Remove EE Charms when VNF has only day-1 operations Add paramter in EE deletion method to allow deletion of a single application instead of the whole model Change-Id: I4d1ebdd0c44c21a01c4d1e0e1f10b63ac983d787 Signed-off-by: Dario Faccin --- n2vc/n2vc_juju_conn.py | 31 +++++++++++-- n2vc/tests/unit/test_n2vc_juju_conn.py | 63 ++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 5 deletions(-) diff --git a/n2vc/n2vc_juju_conn.py b/n2vc/n2vc_juju_conn.py index 9d0cdfa..f28a9bd 100644 --- a/n2vc/n2vc_juju_conn.py +++ b/n2vc/n2vc_juju_conn.py @@ -836,6 +836,7 @@ class N2VCJujuConnector(N2VCConnector): scaling_in: bool = False, vca_type: str = None, vca_id: str = None, + application_to_delete: str = None, ): """ Delete an execution environment @@ -845,10 +846,11 @@ class N2VCJujuConnector(N2VCConnector): {collection: , filter: {}, path: }, e.g. {collection: "nsrs", filter: {_id: , path: "_admin.deployed.VCA.3"} - :param: total_timeout: Total timeout - :param: scaling_in: Boolean to indicate if it is a scaling in operation - :param: vca_type: VCA type - :param: vca_id: VCA ID + :param total_timeout: Total timeout + :param scaling_in: Boolean to indicate if it is a scaling in operation + :param vca_type: VCA type + :param vca_id: VCA ID + :param application_to_delete: name of the single application to be deleted """ self.log.info("Deleting execution environment ee_id={}".format(ee_id)) libjuju = await self._get_libjuju(vca_id) @@ -863,7 +865,26 @@ class N2VCJujuConnector(N2VCConnector): ee_id=ee_id ) try: - if not scaling_in: + if application_to_delete == application_name: + # destroy the application + await libjuju.destroy_application( + model_name=model_name, + application_name=application_name, + total_timeout=total_timeout, + ) + # if model is empty delete it + controller = await libjuju.get_controller() + model = await libjuju.get_model( + controller=controller, + model_name=model_name, + ) + if not model.applications: + self.log.info("Model {} is empty, deleting it".format(model_name)) + await libjuju.destroy_model( + model_name=model_name, + total_timeout=total_timeout, + ) + elif not scaling_in: # destroy the model await libjuju.destroy_model( model_name=model_name, total_timeout=total_timeout diff --git a/n2vc/tests/unit/test_n2vc_juju_conn.py b/n2vc/tests/unit/test_n2vc_juju_conn.py index 456ec1e..2ce5024 100644 --- a/n2vc/tests/unit/test_n2vc_juju_conn.py +++ b/n2vc/tests/unit/test_n2vc_juju_conn.py @@ -1430,3 +1430,66 @@ class GenerateApplicationNameTest(N2VCJujuConnTestCase): self.assertLess(len(application_name), 50) mock_vnf_count_and_record.assert_called_once_with("ns-level", None) self.db.get_one.assert_called_once() + + +class DeleteExecutionEnvironmentTest(N2VCJujuConnTestCase): + def setUp(self): + super(DeleteExecutionEnvironmentTest, self).setUp() + self.n2vc.libjuju.get_controller = AsyncMock() + self.n2vc.libjuju.destroy_model = AsyncMock() + self.n2vc.libjuju.destroy_application = AsyncMock() + + def test_remove_ee__target_application_exists__model_is_deleted(self): + get_ee_id_components = MagicMock() + get_ee_id_components.return_value = ("my_model", "my_app", None) + model = MagicMock(create_autospec=True) + model.applications = {} + self.n2vc.libjuju.get_model = AsyncMock() + self.n2vc.libjuju.get_model.return_value = model + with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components): + self.loop.run_until_complete( + self.n2vc.delete_execution_environment( + "my_ee", application_to_delete="my_app" + ) + ) + self.n2vc.libjuju.destroy_application.assert_called_with( + model_name="my_model", + application_name="my_app", + total_timeout=None, + ) + self.n2vc.libjuju.destroy_model.assert_called_with( + model_name="my_model", + total_timeout=None, + ) + + def test_remove_ee__multiple_applications_exist__model_is_not_deleted(self): + get_ee_id_components = MagicMock() + get_ee_id_components.return_value = ("my_model", "my_app", None) + model = MagicMock(create_autospec=True) + model.applications = {MagicMock(create_autospec=True)} + self.n2vc.libjuju.get_model = AsyncMock() + self.n2vc.libjuju.get_model.return_value = model + with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components): + self.loop.run_until_complete( + self.n2vc.delete_execution_environment( + "my_ee", application_to_delete="my_app" + ) + ) + self.n2vc.libjuju.destroy_application.assert_called_with( + model_name="my_model", + application_name="my_app", + total_timeout=None, + ) + self.n2vc.libjuju.destroy_model.assert_not_called() + + def test_remove_ee__target_application_does_not_exist__model_is_deleted(self): + get_ee_id_components = MagicMock() + get_ee_id_components.return_value = ("my_model", "my_app", None) + with patch.object(self.n2vc, "_get_ee_id_components", get_ee_id_components): + self.loop.run_until_complete( + self.n2vc.delete_execution_environment("my_ee") + ) + self.n2vc.libjuju.destroy_model.assert_called_with( + model_name="my_model", + total_timeout=None, + ) -- 2.17.1 From cbdf61d9604fdf5ba1fe28de3142323b5babc0ed Mon Sep 17 00:00:00 2001 From: Mark Beierl Date: Fri, 21 Jul 2023 17:13:24 +0000 Subject: [PATCH 08/16] Update version of PyYAML Change-Id: Ic2a9611e6ee3a34e8c0c0d51243c6b95e6c11491 Signed-off-by: Mark Beierl --- requirements-dev.txt | 2 +- requirements.in | 4 ++-- requirements.txt | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 46fdb1f..9f8bd6d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -44,5 +44,5 @@ pymongo==4.3.3 # via # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # motor -pyyaml==5.4.1 +pyyaml==6.0.1 # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master diff --git a/requirements.in b/requirements.in index 68d077a..bd7abb5 100644 --- a/requirements.in +++ b/requirements.in @@ -13,9 +13,9 @@ # limitations under the License. charset-normalizer google-auth<2.18.0 -juju==2.9.42.4 +juju==2.9.44.0 kubernetes motor pyasn1 -pyyaml==5.4.1 +pyyaml>6 retrying-async diff --git a/requirements.txt b/requirements.txt index be6032c..0b4d370 100644 --- a/requirements.txt +++ b/requirements.txt @@ -42,7 +42,7 @@ google-auth==2.17.3 # kubernetes idna==3.4 # via requests -juju==2.9.42.4 +juju==2.9.44.0 # via -r requirements.in jujubundlelib==0.5.7 # via theblues @@ -91,7 +91,7 @@ python-dateutil==2.8.2 # via kubernetes pytz==2023.3 # via pyrfc3339 -pyyaml==5.4.1 +pyyaml==6.0.1 # via # -r requirements.in # juju -- 2.17.1 From 5a8c7f28bd01a01e344494b33c1fb59912a42733 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Tue, 18 Apr 2023 15:07:58 +0200 Subject: [PATCH 09/16] Clean stage-archive.sh Change-Id: Ibfebb96815f05a7493de9c8866b8da299b1042fb Signed-off-by: garciadeblas --- devops-stages/stage-archive.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/devops-stages/stage-archive.sh b/devops-stages/stage-archive.sh index 662616c..eead613 100755 --- a/devops-stages/stage-archive.sh +++ b/devops-stages/stage-archive.sh @@ -18,7 +18,4 @@ rm -rf pool rm -rf dists mkdir -p pool/$MDG mv deb_dist/*.deb pool/$MDG/ -mkdir -p dists/unstable/$MDG/binary-amd64/ -apt-ftparchive packages pool/$MDG > dists/unstable/$MDG/binary-amd64/Packages -gzip -9fk dists/unstable/$MDG/binary-amd64/Packages -echo "dists/**,pool/$MDG/*.deb" + -- 2.17.1 From fe88c32ad2b54fec84611f3063fecde2ab1cc664 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Thu, 28 Sep 2023 23:31:11 +0200 Subject: [PATCH 10/16] Update pip requirements to pass stage2 and stage3 in all modules Change-Id: Ib3e14922e3e40cdb423896caabc4915e78ac3ae2 Signed-off-by: garciadeblas --- requirements-dev.txt | 12 ++++++------ requirements-test.txt | 14 +++++++------- requirements.txt | 39 +++++++++++++++++++-------------------- tox.ini | 4 ++-- 4 files changed, 34 insertions(+), 35 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 9f8bd6d..cc6daf0 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -14,15 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. ####################################################################################### -aiokafka==0.8.0 +aiokafka==0.8.1 # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master -async-timeout==4.0.2 +async-timeout==4.0.3 # via # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # aiokafka dataclasses==0.6 # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master -dnspython==2.3.0 +dnspython==2.4.2 # via # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # pymongo @@ -30,7 +30,7 @@ kafka-python==2.0.2 # via # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # aiokafka -motor==3.1.2 +motor==3.3.1 # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master # via -r requirements-dev.in @@ -38,9 +38,9 @@ packaging==23.1 # via # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # aiokafka -pycryptodome==3.17 +pycryptodome==3.19.0 # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master -pymongo==4.3.3 +pymongo==4.5.0 # via # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master # motor diff --git a/requirements-test.txt b/requirements-test.txt index 0704348..57e30a7 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -16,13 +16,13 @@ ####################################################################################### asynctest==0.13.0 # via -r requirements-test.in -certifi==2023.5.7 +certifi==2023.7.22 # via requests -charset-normalizer==3.1.0 +charset-normalizer==3.2.0 # via # -r requirements-test.in # requests -coverage==7.2.5 +coverage==7.3.1 # via -r requirements-test.in flake8==4.0.1 # via -r requirements-test.in @@ -30,7 +30,7 @@ idna==3.4 # via requests mccabe==0.6.1 # via flake8 -mock==5.0.2 +mock==5.1.0 # via -r requirements-test.in nose2==0.13.0 # via -r requirements-test.in @@ -38,11 +38,11 @@ pycodestyle==2.8.0 # via flake8 pyflakes==2.4.0 # via flake8 -requests==2.30.0 +requests==2.31.0 # via requests-mock -requests-mock==1.10.0 +requests-mock==1.11.0 # via -r requirements-test.in six==1.16.0 # via requests-mock -urllib3==2.0.2 +urllib3==2.0.5 # via requests diff --git a/requirements.txt b/requirements.txt index 0b4d370..b9ecc7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,27 +14,27 @@ # See the License for the specific language governing permissions and # limitations under the License. ####################################################################################### -async-timeout==4.0.2 +async-timeout==4.0.3 # via retrying-async bcrypt==4.0.1 # via paramiko -cachetools==5.3.0 +cachetools==5.3.1 # via google-auth -certifi==2023.5.7 +certifi==2023.7.22 # via # kubernetes # requests -cffi==1.15.1 +cffi==1.16.0 # via # cryptography # pynacl -charset-normalizer==3.1.0 +charset-normalizer==3.2.0 # via # -r requirements.in # requests -cryptography==40.0.2 +cryptography==41.0.4 # via paramiko -dnspython==2.3.0 +dnspython==2.4.2 # via pymongo google-auth==2.17.3 # via @@ -46,7 +46,7 @@ juju==2.9.44.0 # via -r requirements.in jujubundlelib==0.5.7 # via theblues -kubernetes==26.1.0 +kubernetes==28.1.0 # via # -r requirements.in # juju @@ -54,12 +54,14 @@ macaroonbakery==1.3.1 # via # juju # theblues -motor==3.1.2 +motor==3.3.1 # via -r requirements.in mypy-extensions==1.0.0 # via typing-inspect oauthlib==3.2.2 - # via requests-oauthlib + # via + # kubernetes + # requests-oauthlib paramiko==2.12.0 # via juju protobuf==3.20.3 @@ -76,7 +78,7 @@ pycparser==2.21 # via cffi pymacaroons==0.13.0 # via macaroonbakery -pymongo==4.3.3 +pymongo==4.5.0 # via motor pynacl==1.5.0 # via @@ -89,7 +91,7 @@ pyrfc3339==1.1 # macaroonbakery python-dateutil==2.8.2 # via kubernetes -pytz==2023.3 +pytz==2023.3.post1 # via pyrfc3339 pyyaml==6.0.1 # via @@ -97,7 +99,7 @@ pyyaml==6.0.1 # juju # jujubundlelib # kubernetes -requests==2.30.0 +requests==2.31.0 # via # kubernetes # macaroonbakery @@ -121,18 +123,15 @@ theblues==0.5.2 # via juju toposort==1.10 # via juju -typing-extensions==4.5.0 +typing-extensions==4.8.0 # via typing-inspect -typing-inspect==0.8.0 +typing-inspect==0.9.0 # via juju -urllib3==2.0.2 +urllib3==1.26.16 # via # kubernetes # requests -websocket-client==1.5.1 +websocket-client==1.6.3 # via kubernetes websockets==11.0.3 # via juju - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/tox.ini b/tox.ini index d4e8594..704aa6c 100644 --- a/tox.ini +++ b/tox.ini @@ -83,7 +83,7 @@ commands = ####################################################################################### [testenv:pip-compile] -deps = pip-tools==6.6.2 +deps = pip-tools==6.13.0 skip_install = true allowlist_externals = bash [ @@ -91,7 +91,7 @@ commands = - bash -c "for file in requirements*.in ; do \ UNSAFE="" ; \ if [[ $file =~ 'dist' ]] ; then UNSAFE='--allow-unsafe' ; fi ; \ - pip-compile -rU --no-header $UNSAFE $file ;\ + pip-compile --resolver=backtracking -rU --no-header $UNSAFE $file ;\ out=`echo $file | sed 's/.in/.txt/'` ; \ sed -i -e '1 e head -16 tox.ini' $out ;\ done" -- 2.17.1 From 0571e0ef3ff495bce12dd26d3843fe2ff76a3132 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Fri, 29 Sep 2023 09:54:53 +0200 Subject: [PATCH 11/16] Update pip requirements to pin kubernetes version to 26.1.0 Change-Id: I1c5ffa2baee938a36ea126b30becbc1e9c7d5885 Signed-off-by: garciadeblas --- requirements.in | 2 +- requirements.txt | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/requirements.in b/requirements.in index bd7abb5..95605f5 100644 --- a/requirements.in +++ b/requirements.in @@ -14,7 +14,7 @@ charset-normalizer google-auth<2.18.0 juju==2.9.44.0 -kubernetes +kubernetes==26.1.0 motor pyasn1 pyyaml>6 diff --git a/requirements.txt b/requirements.txt index b9ecc7d..215ac92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ juju==2.9.44.0 # via -r requirements.in jujubundlelib==0.5.7 # via theblues -kubernetes==28.1.0 +kubernetes==26.1.0 # via # -r requirements.in # juju @@ -59,9 +59,7 @@ motor==3.3.1 mypy-extensions==1.0.0 # via typing-inspect oauthlib==3.2.2 - # via - # kubernetes - # requests-oauthlib + # via requests-oauthlib paramiko==2.12.0 # via juju protobuf==3.20.3 @@ -127,7 +125,7 @@ typing-extensions==4.8.0 # via typing-inspect typing-inspect==0.9.0 # via juju -urllib3==1.26.16 +urllib3==2.0.5 # via # kubernetes # requests @@ -135,3 +133,6 @@ websocket-client==1.6.3 # via kubernetes websockets==11.0.3 # via juju + +# The following packages are considered to be unsafe in a requirements file: +# setuptools -- 2.17.1 From c81293be8ba0656a5bc7994e4333fedf73b527ff Mon Sep 17 00:00:00 2001 From: Luis Vega Date: Fri, 13 Oct 2023 14:44:26 +0000 Subject: [PATCH 12/16] Feature 11002: Deprecate helmv2 Change-Id: I639916d8aeef5a984a4fda2643b68e5bb589b19e Signed-off-by: Luis Vega --- n2vc/k8s_helm_conn.py | 776 -------------------------- n2vc/tests/unit/test_k8s_helm_conn.py | 740 ------------------------ 2 files changed, 1516 deletions(-) delete mode 100644 n2vc/k8s_helm_conn.py delete mode 100644 n2vc/tests/unit/test_k8s_helm_conn.py diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py deleted file mode 100644 index 17e960f..0000000 --- a/n2vc/k8s_helm_conn.py +++ /dev/null @@ -1,776 +0,0 @@ -## -# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U. -# This file is part of OSM -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# For those usages not covered by the Apache License, Version 2.0 please -# contact with: nfvlabs@tid.es -## -import asyncio -from typing import Union -from shlex import quote -import os -import yaml - -from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector -from n2vc.exceptions import K8sException - - -class K8sHelmConnector(K8sHelmBaseConnector): - - """ - #################################################################################### - ################################### P U B L I C #################################### - #################################################################################### - """ - - def __init__( - self, - fs: object, - db: object, - kubectl_command: str = "/usr/bin/kubectl", - helm_command: str = "/usr/bin/helm", - log: object = None, - on_update_db=None, - ): - """ - Initializes helm connector for helm v2 - - :param fs: file system for kubernetes and helm configuration - :param db: database object to write current operation status - :param kubectl_command: path to kubectl executable - :param helm_command: path to helm executable - :param log: logger - :param on_update_db: callback called when k8s connector updates database - """ - - # parent class - K8sHelmBaseConnector.__init__( - self, - db=db, - log=log, - fs=fs, - kubectl_command=kubectl_command, - helm_command=helm_command, - on_update_db=on_update_db, - ) - - self.log.info("Initializing K8S Helm2 connector") - - # initialize helm client-only - self.log.debug("Initializing helm client-only...") - command = "{} init --client-only {} ".format( - self._helm_command, - "--stable-repo-url {}".format(quote(self._stable_repo_url)) - if self._stable_repo_url - else "--skip-repos", - ) - try: - asyncio.create_task( - self._local_async_exec(command=command, raise_exception_on_error=False) - ) - except Exception as e: - self.warning( - msg="helm init failed (it was already initialized): {}".format(e) - ) - - self.log.info("K8S Helm2 connector initialized") - - async def install( - self, - cluster_uuid: str, - kdu_model: str, - kdu_instance: str, - atomic: bool = True, - timeout: float = 300, - params: dict = None, - db_dict: dict = None, - kdu_name: str = None, - namespace: str = None, - **kwargs, - ): - """ - Deploys of a new KDU instance. It would implicitly rely on the `install` call - to deploy the Chart/Bundle properly parametrized (in practice, this call would - happen before any _initial-config-primitive_of the VNF is called). - - :param cluster_uuid: UUID of a K8s cluster known by OSM - :param kdu_model: chart/reference (string), which can be either - of these options: - - a name of chart available via the repos known by OSM - (e.g. stable/openldap, stable/openldap:1.2.4) - - a path to a packaged chart (e.g. mychart.tgz) - - a path to an unpacked chart directory or a URL (e.g. mychart) - :param kdu_instance: Kdu instance name - :param atomic: If set, installation process purges chart/bundle on fail, also - will wait until all the K8s objects are active - :param timeout: Time in seconds to wait for the install of the chart/bundle - (defaults to Helm default timeout: 300s) - :param params: dictionary of key-value pairs for instantiation parameters - (overriding default values) - :param dict db_dict: where to write into database when the status changes. - It contains a dict with {collection: , filter: {}, - path: }, - e.g. {collection: "nsrs", filter: - {_id: , path: "_admin.deployed.K8S.3"} - :param kdu_name: Name of the KDU instance to be installed - :param namespace: K8s namespace to use for the KDU instance - :param kwargs: Additional parameters (None yet) - :return: True if successful - """ - self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid)) - - # sync local dir - self.fs.sync(from_path=cluster_uuid) - - # init env, paths - paths, env = self._init_paths_env( - cluster_name=cluster_uuid, create_if_not_exist=True - ) - - await self._install_impl( - cluster_uuid, - kdu_model, - paths, - env, - kdu_instance, - atomic=atomic, - timeout=timeout, - params=params, - db_dict=db_dict, - kdu_name=kdu_name, - namespace=namespace, - ) - - # sync fs - self.fs.reverse_sync(from_path=cluster_uuid) - - self.log.debug("Returning kdu_instance {}".format(kdu_instance)) - return True - - async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str: - self.log.debug( - "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url) - ) - - return await self._exec_inspect_command( - inspect_command="", kdu_model=kdu_model, repo_url=repo_url - ) - - """ - #################################################################################### - ################################### P R I V A T E ################################## - #################################################################################### - """ - - def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True): - """ - Creates and returns base cluster and kube dirs and returns them. - Also created helm3 dirs according to new directory specification, paths are - returned and also environment variables that must be provided to execute commands - - Helm 2 directory specification uses helm_home dir: - - The variables assigned for this paths are: - - Helm hone: $HELM_HOME - - helm kubeconfig: $KUBECONFIG - - :param cluster_name: cluster_name - :return: Dictionary with config_paths and dictionary with helm environment variables - """ - base = self.fs.path - if base.endswith("/") or base.endswith("\\"): - base = base[:-1] - - # base dir for cluster - cluster_dir = base + "/" + cluster_name - - # kube dir - kube_dir = cluster_dir + "/" + ".kube" - if create_if_not_exist and not os.path.exists(kube_dir): - self.log.debug("Creating dir {}".format(kube_dir)) - os.makedirs(kube_dir) - - # helm home dir - helm_dir = cluster_dir + "/" + ".helm" - if create_if_not_exist and not os.path.exists(helm_dir): - self.log.debug("Creating dir {}".format(helm_dir)) - os.makedirs(helm_dir) - - config_filename = kube_dir + "/config" - - # 2 - Prepare dictionary with paths - paths = { - "kube_dir": kube_dir, - "kube_config": config_filename, - "cluster_dir": cluster_dir, - "helm_dir": helm_dir, - } - - for file_name, file in paths.items(): - if "dir" in file_name and not os.path.exists(file): - err_msg = "{} dir does not exist".format(file) - self.log.error(err_msg) - raise K8sException(err_msg) - - # 3 - Prepare environment variables - env = {"HELM_HOME": helm_dir, "KUBECONFIG": config_filename} - - return paths, env - - async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig): - # init config, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - command1 = "env KUBECONFIG={} {} get manifest {} ".format( - kubeconfig, self._helm_command, quote(kdu_instance) - ) - command2 = "{} get --namespace={} -f -".format( - self.kubectl_command, quote(namespace) - ) - output, _rc = await self._local_async_exec_pipe( - command1, command2, env=env, raise_exception_on_error=True - ) - services = self._parse_services(output) - - return services - - async def _cluster_init( - self, cluster_id: str, namespace: str, paths: dict, env: dict - ): - """ - Implements the helm version dependent cluster initialization: - For helm2 it initialized tiller environment if needed - """ - - # check if tiller pod is up in cluster - command = "{} --kubeconfig={} --namespace={} get deployments".format( - self.kubectl_command, paths["kube_config"], quote(namespace) - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - - output_table = self._output_to_table(output=output) - - # find 'tiller' pod in all pods - already_initialized = False - try: - for row in output_table: - if row[0].startswith("tiller-deploy"): - already_initialized = True - break - except Exception: - pass - - # helm init - n2vc_installed_sw = False - if not already_initialized: - self.log.info( - "Initializing helm in client and server: {}".format(cluster_id) - ) - command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format( - self.kubectl_command, paths["kube_config"], quote(self.service_account) - ) - _, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - - command = ( - "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule " - "--clusterrole=cluster-admin --serviceaccount=kube-system:{}" - ).format( - self.kubectl_command, paths["kube_config"], quote(self.service_account) - ) - _, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - - command = ( - "{} --kubeconfig={} --tiller-namespace={} --home={} --service-account {} " - " {} init" - ).format( - self._helm_command, - paths["kube_config"], - quote(namespace), - quote(paths["helm_dir"]), - quote(self.service_account), - "--stable-repo-url {}".format(quote(self._stable_repo_url)) - if self._stable_repo_url - else "--skip-repos", - ) - _, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - n2vc_installed_sw = True - else: - # check client helm installation - check_file = paths["helm_dir"] + "/repository/repositories.yaml" - if not self._check_file_exists( - filename=check_file, exception_if_not_exists=False - ): - self.log.info("Initializing helm in client: {}".format(cluster_id)) - command = ( - "{} --kubeconfig={} --tiller-namespace={} " - "--home={} init --client-only {} " - ).format( - self._helm_command, - paths["kube_config"], - quote(namespace), - quote(paths["helm_dir"]), - "--stable-repo-url {}".format(quote(self._stable_repo_url)) - if self._stable_repo_url - else "--skip-repos", - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - else: - self.log.info("Helm client already initialized") - - repo_list = await self.repo_list(cluster_id) - for repo in repo_list: - if repo["name"] == "stable" and repo["url"] != self._stable_repo_url: - self.log.debug("Add new stable repo url: {}") - await self.repo_remove(cluster_id, "stable") - if self._stable_repo_url: - await self.repo_add(cluster_id, "stable", self._stable_repo_url) - break - - return n2vc_installed_sw - - async def _uninstall_sw(self, cluster_id: str, namespace: str): - # uninstall Tiller if necessary - - self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) - - # init paths, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - if not namespace: - # find namespace for tiller pod - command = "{} --kubeconfig={} get deployments --all-namespaces".format( - self.kubectl_command, quote(paths["kube_config"]) - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - output_table = self._output_to_table(output=output) - namespace = None - for r in output_table: - try: - if "tiller-deploy" in r[1]: - namespace = r[0] - break - except Exception: - pass - else: - msg = "Tiller deployment not found in cluster {}".format(cluster_id) - self.log.error(msg) - - self.log.debug("namespace for tiller: {}".format(namespace)) - - if namespace: - # uninstall tiller from cluster - self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) - command = "{} --kubeconfig={} --home={} reset".format( - self._helm_command, - quote(paths["kube_config"]), - quote(paths["helm_dir"]), - ) - self.log.debug("resetting: {}".format(command)) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - # Delete clusterrolebinding and serviceaccount. - # Ignore if errors for backward compatibility - command = ( - "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s." - "io/osm-tiller-cluster-rule" - ).format(self.kubectl_command, quote(paths["kube_config"])) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - command = ( - "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format( - self.kubectl_command, - quote(paths["kube_config"]), - quote(namespace), - quote(self.service_account), - ) - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - - else: - self.log.debug("namespace not found") - - async def _instances_list(self, cluster_id): - # init paths, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - command = "{} list --output yaml".format(self._helm_command) - - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - - if output and len(output) > 0: - # parse yaml and update keys to lower case to unify with helm3 - instances = yaml.load(output, Loader=yaml.SafeLoader).get("Releases") - new_instances = [] - for instance in instances: - new_instance = dict((k.lower(), v) for k, v in instance.items()) - new_instances.append(new_instance) - return new_instances - else: - return [] - - def _get_inspect_command( - self, show_command: str, kdu_model: str, repo_str: str, version: str - ): - inspect_command = "{} inspect {} {}{} {}".format( - self._helm_command, show_command, quote(kdu_model), repo_str, version - ) - return inspect_command - - def _get_get_command( - self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str - ): - get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format( - kubeconfig, self._helm_command, get_command, quote(kdu_instance) - ) - return get_command - - async def _status_kdu( - self, - cluster_id: str, - kdu_instance: str, - namespace: str = None, - yaml_format: bool = False, - show_error_log: bool = False, - ) -> Union[str, dict]: - self.log.debug( - "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace) - ) - - # init config, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - command = ("env KUBECONFIG={} {} status {} --output yaml").format( - paths["kube_config"], self._helm_command, quote(kdu_instance) - ) - output, rc = await self._local_async_exec( - command=command, - raise_exception_on_error=True, - show_error_log=show_error_log, - env=env, - ) - - if yaml_format: - return str(output) - - if rc != 0: - return None - - data = yaml.load(output, Loader=yaml.SafeLoader) - - # remove field 'notes' - try: - del data.get("info").get("status")["notes"] - except KeyError: - pass - - # parse the manifest to a list of dictionaries - if "manifest" in data: - manifest_str = data.get("manifest") - manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader) - - data["manifest"] = [] - for doc in manifest_docs: - data["manifest"].append(doc) - - # parse field 'resources' - try: - resources = str(data.get("info").get("status").get("resources")) - resource_table = self._output_to_table(resources) - data.get("info").get("status")["resources"] = resource_table - except Exception: - pass - - # set description to lowercase (unify with helm3) - try: - data.get("info")["description"] = data.get("info").pop("Description") - except KeyError: - pass - - return data - - def _get_helm_chart_repos_ids(self, cluster_uuid) -> list: - repo_ids = [] - cluster_filter = {"_admin.helm-chart.id": cluster_uuid} - cluster = self.db.get_one("k8sclusters", cluster_filter) - if cluster: - repo_ids = cluster.get("_admin").get("helm_chart_repos") or [] - return repo_ids - else: - raise K8sException( - "k8cluster with helm-id : {} not found".format(cluster_uuid) - ) - - async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool: - # init config, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - status = await self._status_kdu( - cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False - ) - - # extract info.status.resources-> str - # format: - # ==> v1/Deployment - # NAME READY UP-TO-DATE AVAILABLE AGE - # halting-horse-mongodb 0/1 1 0 0s - # halting-petit-mongodb 1/1 1 0 0s - # blank line - resources = K8sHelmBaseConnector._get_deep( - status, ("info", "status", "resources") - ) - - # convert to table - resources = K8sHelmBaseConnector._output_to_table(resources) - - num_lines = len(resources) - index = 0 - ready = True - while index < num_lines: - try: - line1 = resources[index] - index += 1 - # find '==>' in column 0 - if line1[0] == "==>": - line2 = resources[index] - index += 1 - # find READY in column 1 - if line2[1] == "READY": - # read next lines - line3 = resources[index] - index += 1 - while len(line3) > 1 and index < num_lines: - ready_value = line3[1] - parts = ready_value.split(sep="/") - current = int(parts[0]) - total = int(parts[1]) - if current < total: - self.log.debug("NOT READY:\n {}".format(line3)) - ready = False - line3 = resources[index] - index += 1 - - except Exception: - pass - - return ready - - def _get_install_command( - self, - kdu_model, - kdu_instance, - namespace, - params_str, - version, - atomic, - timeout, - kubeconfig, - ) -> str: - timeout_str = "" - if timeout: - timeout_str = "--timeout {}".format(timeout) - - # atomic - atomic_str = "" - if atomic: - atomic_str = "--atomic" - # namespace - namespace_str = "" - if namespace: - namespace_str = "--namespace {}".format(quote(namespace)) - - # version - version_str = "" - if version: - version_str = "--version {}".format(version) - - command = ( - "env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml " - "{params} {timeout} --name={name} {ns} {model} {ver}".format( - kubeconfig=kubeconfig, - helm=self._helm_command, - atomic=atomic_str, - params=params_str, - timeout=timeout_str, - name=quote(kdu_instance), - ns=namespace_str, - model=quote(kdu_model), - ver=version_str, - ) - ) - return command - - def _get_upgrade_scale_command( - self, - kdu_model: str, - kdu_instance: str, - namespace: str, - scale: int, - version: str, - atomic: bool, - replica_str: str, - timeout: float, - resource_name: str, - kubeconfig: str, - ) -> str: - """Generates the command to scale a Helm Chart release - - Args: - kdu_model (str): Kdu model name, corresponding to the Helm local location or repository - kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question - namespace (str): Namespace where this KDU instance is deployed - scale (int): Scale count - version (str): Constraint with specific version of the Chart to use - atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. - The --wait flag will be set automatically if --atomic is used - replica_str (str): The key under resource_name key where the scale count is stored - timeout (float): The time, in seconds, to wait - resource_name (str): The KDU's resource to scale - kubeconfig (str): Kubeconfig file path - - Returns: - str: command to scale a Helm Chart release - """ - - # scale - if resource_name: - scale_dict = {"{}.{}".format(resource_name, replica_str): scale} - else: - scale_dict = {replica_str: scale} - - scale_str = self._params_to_set_option(scale_dict) - - return self._get_upgrade_command( - kdu_model=kdu_model, - kdu_instance=kdu_instance, - namespace=namespace, - params_str=scale_str, - version=version, - atomic=atomic, - timeout=timeout, - kubeconfig=kubeconfig, - ) - - def _get_upgrade_command( - self, - kdu_model, - kdu_instance, - namespace, - params_str, - version, - atomic, - timeout, - kubeconfig, - force: bool = False, - ) -> str: - """Generates the command to upgrade a Helm Chart release - - Args: - kdu_model (str): Kdu model name, corresponding to the Helm local location or repository - kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question - namespace (str): Namespace where this KDU instance is deployed - params_str (str): Params used to upgrade the Helm Chart release - version (str): Constraint with specific version of the Chart to use - atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. - The --wait flag will be set automatically if --atomic is used - timeout (float): The time, in seconds, to wait - kubeconfig (str): Kubeconfig file path - force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods. - Returns: - str: command to upgrade a Helm Chart release - """ - - timeout_str = "" - if timeout: - timeout_str = "--timeout {}".format(timeout) - - # atomic - atomic_str = "" - if atomic: - atomic_str = "--atomic" - - # force - force_str = "" - if force: - force_str = "--force " - - # version - version_str = "" - if version: - version_str = "--version {}".format(quote(version)) - - # namespace - namespace_str = "" - if namespace: - namespace_str = "--namespace {}".format(quote(namespace)) - - command = ( - "env KUBECONFIG={kubeconfig} {helm} upgrade {namespace} {atomic} --output yaml {params} {timeout} {force}" - "--reuse-values {name} {model} {ver}" - ).format( - kubeconfig=kubeconfig, - helm=self._helm_command, - namespace=namespace_str, - atomic=atomic_str, - force=force_str, - params=params_str, - timeout=timeout_str, - name=quote(kdu_instance), - model=quote(kdu_model), - ver=version_str, - ) - return command - - def _get_rollback_command( - self, kdu_instance, namespace, revision, kubeconfig - ) -> str: - return "env KUBECONFIG={} {} rollback {} {} --wait".format( - kubeconfig, self._helm_command, quote(kdu_instance), revision - ) - - def _get_uninstall_command( - self, kdu_instance: str, namespace: str, kubeconfig: str - ) -> str: - return "env KUBECONFIG={} {} delete --purge {}".format( - kubeconfig, self._helm_command, quote(kdu_instance) - ) diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py deleted file mode 100644 index 161471a..0000000 --- a/n2vc/tests/unit/test_k8s_helm_conn.py +++ /dev/null @@ -1,740 +0,0 @@ -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# For those usages not covered by the Apache License, Version 2.0 please -# contact: alfonso.tiernosepulveda@telefonica.com -## - -import asynctest -import logging - -from asynctest.mock import Mock -from osm_common.dbmemory import DbMemory -from osm_common.fslocal import FsLocal -from n2vc.k8s_helm_conn import K8sHelmConnector - -__author__ = "Isabel Lloret " - - -class TestK8sHelmConn(asynctest.TestCase): - logging.basicConfig(level=logging.DEBUG) - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - - async def setUp(self): - self.db = Mock(DbMemory()) - self.fs = asynctest.Mock(FsLocal()) - self.fs.path = "./tmp/" - self.namespace = "testk8s" - self.service_account = "osm" - self.cluster_id = "helm_cluster_id" - self.cluster_uuid = self.cluster_id - # pass fake kubectl and helm commands to make sure it does not call actual commands - K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True) - K8sHelmConnector._local_async_exec = asynctest.CoroutineMock( - return_value=(0, "") - ) - cluster_dir = self.fs.path + self.cluster_id - self.kube_config = self.fs.path + self.cluster_id + "/.kube/config" - self.helm_home = self.fs.path + self.cluster_id + "/.helm" - self.env = { - "HELM_HOME": "{}/.helm".format(cluster_dir), - "KUBECONFIG": "{}/.kube/config".format(cluster_dir), - } - self.helm_conn = K8sHelmConnector(self.fs, self.db, log=self.logger) - self.logger.debug("Set up executed") - - @asynctest.fail_on(active_handles=True) - async def test_init_env(self): - # TODO - pass - - @asynctest.fail_on(active_handles=True) - async def test_repo_add(self): - repo_name = "bitnami" - repo_url = "https://charts.bitnami.com/bitnami" - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - self.assertEqual( - self.helm_conn._local_async_exec.call_count, - 2, - "local_async_exec expected 2 calls, called {}".format( - self.helm_conn._local_async_exec.call_count - ), - ) - - repo_update_command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo update {}" - ).format(repo_name) - repo_add_command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo add {} {}" - ).format(repo_name, repo_url) - calls = self.helm_conn._local_async_exec.call_args_list - call0_kargs = calls[0][1] - self.assertEqual( - call0_kargs.get("command"), - repo_add_command, - "Invalid repo add command: {}".format(call0_kargs.get("command")), - ) - self.assertEqual( - call0_kargs.get("env"), - self.env, - "Invalid env for add command: {}".format(call0_kargs.get("env")), - ) - call1_kargs = calls[1][1] - self.assertEqual( - call1_kargs.get("command"), - repo_update_command, - "Invalid repo update command: {}".format(call1_kargs.get("command")), - ) - self.assertEqual( - call1_kargs.get("env"), - self.env, - "Invalid env for update command: {}".format(call1_kargs.get("env")), - ) - - @asynctest.fail_on(active_handles=True) - async def test_repo_list(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn.repo_list(self.cluster_uuid) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo list --output yaml" - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_repo_remove(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - repo_name = "bitnami" - await self.helm_conn.repo_remove(self.cluster_uuid, repo_name) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo remove {}".format( - repo_name - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_install(self): - kdu_model = "stable/openldap:1.2.2" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.generate_kdu_instance_name = Mock(return_value=kdu_instance) - - await self.helm_conn.install( - self.cluster_uuid, - kdu_model, - kdu_instance, - atomic=True, - namespace=self.namespace, - db_dict=db_dict, - ) - - self.helm_conn.fs.sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="install", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm install " - "--atomic --output yaml --timeout 300 " - "--name=stable-openldap-0005399828 --namespace testk8s stable/openldap " - "--version 1.2.2" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_upgrade_force_true(self): - kdu_model = "stable/openldap:1.2.3" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.2", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 1, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - # TEST-1 (--force true) - await self.helm_conn.upgrade( - self.cluster_uuid, - kdu_instance, - kdu_model, - atomic=True, - db_dict=db_dict, - force=True, - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="upgrade", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace testk8s " - "--atomic --output yaml --timeout 300 --force --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - # TEST-2 (--force false) - await self.helm_conn.upgrade( - self.cluster_uuid, - kdu_instance, - kdu_model, - atomic=True, - db_dict=db_dict, - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="upgrade", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace testk8s " - "--atomic --output yaml --timeout 300 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_upgrade_namespace(self): - kdu_model = "stable/openldap:1.2.3" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.2", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 1, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - await self.helm_conn.upgrade( - self.cluster_uuid, - kdu_instance, - kdu_model, - atomic=True, - db_dict=db_dict, - namespace="default", - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace="default", - db_dict=db_dict, - operation="upgrade", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace default " - "--atomic --output yaml --timeout 300 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_scale(self): - kdu_model = "stable/openldap:1.2.3" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.3", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 1, - "status": "DEPLOYED", - } - repo_list = [ - { - "name": "stable", - "url": "https://kubernetes-charts.storage.googleapis.com/", - } - ] - kdu_values = """ - # Default values for openldap. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - replicaCount: 1 - dummy-app: - replicas: 2 - """ - - self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list) - self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values) - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - # TEST-1 - await self.helm_conn.scale( - kdu_instance, - 2, - "", - kdu_model=kdu_model, - cluster_uuid=self.cluster_uuid, - atomic=True, - db_dict=db_dict, - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " - "/usr/bin/helm upgrade --namespace testk8s --atomic --output yaml --set replicaCount=2 " - "--timeout 1800 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - # TEST-2 - await self.helm_conn.scale( - kdu_instance, - 3, - "dummy-app", - kdu_model=kdu_model, - cluster_uuid=self.cluster_uuid, - atomic=True, - db_dict=db_dict, - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " - "/usr/bin/helm upgrade --namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 " - "--timeout 1800 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - self.helm_conn.fs.reverse_sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="scale", - ) - - @asynctest.fail_on(active_handles=True) - async def test_rollback(self): - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.3", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 2, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - await self.helm_conn.rollback( - self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="rollback", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " - "/usr/bin/helm rollback stable-openldap-0005399828 1 --wait" - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_uninstall(self): - kdu_instance = "stable-openldap-0005399828" - instance_info = { - "chart": "openldap-1.2.2", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 3, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm delete --purge {}".format( - kdu_instance - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_get_services(self): - kdu_instance = "test_services_1" - service = {"name": "testservice", "type": "LoadBalancer"} - self.helm_conn._local_async_exec_pipe = asynctest.CoroutineMock( - return_value=("", 0) - ) - self.helm_conn._parse_services = Mock(return_value=["testservice"]) - self.helm_conn._get_service = asynctest.CoroutineMock(return_value=service) - - services = await self.helm_conn.get_services( - self.cluster_uuid, kdu_instance, self.namespace - ) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - self.helm_conn._parse_services.assert_called_once() - command1 = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get manifest {} ".format( - kdu_instance - ) - command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace) - self.helm_conn._local_async_exec_pipe.assert_called_once_with( - command1, command2, env=self.env, raise_exception_on_error=True - ) - self.assertEqual( - services, [service], "Invalid service returned from get_service" - ) - - @asynctest.fail_on(active_handles=True) - async def test_get_service(self): - service_name = "service1" - - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - await self.helm_conn.get_service( - self.cluster_uuid, service_name, self.namespace - ) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = ( - "/usr/bin/kubectl --kubeconfig=./tmp/helm_cluster_id/.kube/config " - "--namespace=testk8s get service service1 -o=yaml" - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_inspect_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_model = "stable/openldap:1.2.4" - repo_url = "https://kubernetes-charts.storage.googleapis.com/" - await self.helm_conn.inspect_kdu(kdu_model, repo_url) - - command = ( - "/usr/bin/helm inspect openldap --repo " - "https://kubernetes-charts.storage.googleapis.com/ " - "--version 1.2.4" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_help_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_model = "stable/openldap:1.2.4" - repo_url = "https://kubernetes-charts.storage.googleapis.com/" - await self.helm_conn.help_kdu(kdu_model, repo_url) - - command = ( - "/usr/bin/helm inspect readme openldap --repo " - "https://kubernetes-charts.storage.googleapis.com/ " - "--version 1.2.4" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_values_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_model = "stable/openldap:1.2.4" - repo_url = "https://kubernetes-charts.storage.googleapis.com/" - await self.helm_conn.values_kdu(kdu_model, repo_url) - - command = ( - "/usr/bin/helm inspect values openldap --repo " - "https://kubernetes-charts.storage.googleapis.com/ " - "--version 1.2.4" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_get_values_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_instance = "stable-openldap-0005399828" - await self.helm_conn.get_values_kdu( - kdu_instance, self.namespace, self.env["KUBECONFIG"] - ) - - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get values " - "stable-openldap-0005399828 --output yaml" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_instances_list(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn.instances_list(self.cluster_uuid) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "/usr/bin/helm list --output yaml" - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_status_kdu(self): - kdu_instance = "stable-openldap-0005399828" - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn._status_kdu( - self.cluster_id, kdu_instance, self.namespace, yaml_format=True - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm status {} --output yaml" - ).format(kdu_instance) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, - env=self.env, - raise_exception_on_error=True, - show_error_log=False, - ) - - @asynctest.fail_on(active_handles=True) - async def test_store_status(self): - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - status = { - "info": { - "description": "Install complete", - "status": { - "code": "1", - "notes": "The openldap helm chart has been installed", - }, - } - } - self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=status) - self.helm_conn.write_app_status_to_db = asynctest.CoroutineMock( - return_value=status - ) - - await self.helm_conn._store_status( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="install", - ) - self.helm_conn._status_kdu.assert_called_once_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - yaml_format=False, - ) - self.helm_conn.write_app_status_to_db.assert_called_once_with( - db_dict=db_dict, - status="Install complete", - detailed_status=str(status), - operation="install", - ) - - @asynctest.fail_on(active_handles=True) - async def test_reset_uninstall_false(self): - self.helm_conn._uninstall_sw = asynctest.CoroutineMock() - - await self.helm_conn.reset(self.cluster_uuid, force=False, uninstall_sw=False) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.file_delete.assert_called_once_with( - self.cluster_id, ignore_non_exist=True - ) - self.helm_conn._uninstall_sw.assert_not_called() - - @asynctest.fail_on(active_handles=True) - async def test_reset_uninstall(self): - kdu_instance = "stable-openldap-0021099429" - instances = [ - { - "app_version": "2.4.48", - "chart": "openldap-1.2.3", - "name": kdu_instance, - "namespace": self.namespace, - "revision": "1", - "status": "deployed", - "updated": "2020-10-30 11:11:20.376744191 +0000 UTC", - } - ] - self.helm_conn._get_namespace = Mock(return_value=self.namespace) - self.helm_conn._uninstall_sw = asynctest.CoroutineMock() - self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances) - self.helm_conn.uninstall = asynctest.CoroutineMock() - - await self.helm_conn.reset(self.cluster_uuid, force=True, uninstall_sw=True) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.file_delete.assert_called_once_with( - self.cluster_id, ignore_non_exist=True - ) - self.helm_conn._get_namespace.assert_called_once_with( - cluster_uuid=self.cluster_uuid - ) - self.helm_conn.instances_list.assert_called_once_with( - cluster_uuid=self.cluster_uuid - ) - self.helm_conn.uninstall.assert_called_once_with( - cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance - ) - self.helm_conn._uninstall_sw.assert_called_once_with( - cluster_id=self.cluster_id, namespace=self.namespace - ) - - @asynctest.fail_on(active_handles=True) - async def test_uninstall_sw_namespace(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn._uninstall_sw(self.cluster_id, self.namespace) - calls = self.helm_conn._local_async_exec.call_args_list - self.assertEqual( - len(calls), 3, "To uninstall should have executed three commands" - ) - call0_kargs = calls[0][1] - command_0 = "/usr/bin/helm --kubeconfig={} --home={} reset".format( - self.kube_config, self.helm_home - ) - self.assertEqual( - call0_kargs, - {"command": command_0, "raise_exception_on_error": True, "env": self.env}, - "Invalid args for first call to local_exec", - ) - call1_kargs = calls[1][1] - command_1 = ( - "/usr/bin/kubectl --kubeconfig={} delete " - "clusterrolebinding.rbac.authorization.k8s.io/osm-tiller-cluster-rule".format( - self.kube_config - ) - ) - self.assertEqual( - call1_kargs, - {"command": command_1, "raise_exception_on_error": False, "env": self.env}, - "Invalid args for second call to local_exec", - ) - call2_kargs = calls[2][1] - command_2 = ( - "/usr/bin/kubectl --kubeconfig={} --namespace {} delete " - "serviceaccount/{}".format( - self.kube_config, self.namespace, self.service_account - ) - ) - self.assertEqual( - call2_kargs, - {"command": command_2, "raise_exception_on_error": False, "env": self.env}, - "Invalid args for third call to local_exec", - ) -- 2.17.1 From dfb624e236597b96658da80fe7436b0f92416cc3 Mon Sep 17 00:00:00 2001 From: cubag Date: Wed, 29 Nov 2023 23:07:12 +0200 Subject: [PATCH 13/16] Revert "Feature 11002: Deprecate helmv2" This reverts commit c81293be8ba0656a5bc7994e4333fedf73b527ff. Change-Id: I89c7d1009c4f059ba497a76557f045434a1d2186 Signed-off-by: Gabriel Cuba --- n2vc/k8s_helm_conn.py | 776 ++++++++++++++++++++++++++ n2vc/tests/unit/test_k8s_helm_conn.py | 740 ++++++++++++++++++++++++ 2 files changed, 1516 insertions(+) create mode 100644 n2vc/k8s_helm_conn.py create mode 100644 n2vc/tests/unit/test_k8s_helm_conn.py diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py new file mode 100644 index 0000000..17e960f --- /dev/null +++ b/n2vc/k8s_helm_conn.py @@ -0,0 +1,776 @@ +## +# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U. +# This file is part of OSM +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## +import asyncio +from typing import Union +from shlex import quote +import os +import yaml + +from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector +from n2vc.exceptions import K8sException + + +class K8sHelmConnector(K8sHelmBaseConnector): + + """ + #################################################################################### + ################################### P U B L I C #################################### + #################################################################################### + """ + + def __init__( + self, + fs: object, + db: object, + kubectl_command: str = "/usr/bin/kubectl", + helm_command: str = "/usr/bin/helm", + log: object = None, + on_update_db=None, + ): + """ + Initializes helm connector for helm v2 + + :param fs: file system for kubernetes and helm configuration + :param db: database object to write current operation status + :param kubectl_command: path to kubectl executable + :param helm_command: path to helm executable + :param log: logger + :param on_update_db: callback called when k8s connector updates database + """ + + # parent class + K8sHelmBaseConnector.__init__( + self, + db=db, + log=log, + fs=fs, + kubectl_command=kubectl_command, + helm_command=helm_command, + on_update_db=on_update_db, + ) + + self.log.info("Initializing K8S Helm2 connector") + + # initialize helm client-only + self.log.debug("Initializing helm client-only...") + command = "{} init --client-only {} ".format( + self._helm_command, + "--stable-repo-url {}".format(quote(self._stable_repo_url)) + if self._stable_repo_url + else "--skip-repos", + ) + try: + asyncio.create_task( + self._local_async_exec(command=command, raise_exception_on_error=False) + ) + except Exception as e: + self.warning( + msg="helm init failed (it was already initialized): {}".format(e) + ) + + self.log.info("K8S Helm2 connector initialized") + + async def install( + self, + cluster_uuid: str, + kdu_model: str, + kdu_instance: str, + atomic: bool = True, + timeout: float = 300, + params: dict = None, + db_dict: dict = None, + kdu_name: str = None, + namespace: str = None, + **kwargs, + ): + """ + Deploys of a new KDU instance. It would implicitly rely on the `install` call + to deploy the Chart/Bundle properly parametrized (in practice, this call would + happen before any _initial-config-primitive_of the VNF is called). + + :param cluster_uuid: UUID of a K8s cluster known by OSM + :param kdu_model: chart/reference (string), which can be either + of these options: + - a name of chart available via the repos known by OSM + (e.g. stable/openldap, stable/openldap:1.2.4) + - a path to a packaged chart (e.g. mychart.tgz) + - a path to an unpacked chart directory or a URL (e.g. mychart) + :param kdu_instance: Kdu instance name + :param atomic: If set, installation process purges chart/bundle on fail, also + will wait until all the K8s objects are active + :param timeout: Time in seconds to wait for the install of the chart/bundle + (defaults to Helm default timeout: 300s) + :param params: dictionary of key-value pairs for instantiation parameters + (overriding default values) + :param dict db_dict: where to write into database when the status changes. + It contains a dict with {collection: , filter: {}, + path: }, + e.g. {collection: "nsrs", filter: + {_id: , path: "_admin.deployed.K8S.3"} + :param kdu_name: Name of the KDU instance to be installed + :param namespace: K8s namespace to use for the KDU instance + :param kwargs: Additional parameters (None yet) + :return: True if successful + """ + self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid)) + + # sync local dir + self.fs.sync(from_path=cluster_uuid) + + # init env, paths + paths, env = self._init_paths_env( + cluster_name=cluster_uuid, create_if_not_exist=True + ) + + await self._install_impl( + cluster_uuid, + kdu_model, + paths, + env, + kdu_instance, + atomic=atomic, + timeout=timeout, + params=params, + db_dict=db_dict, + kdu_name=kdu_name, + namespace=namespace, + ) + + # sync fs + self.fs.reverse_sync(from_path=cluster_uuid) + + self.log.debug("Returning kdu_instance {}".format(kdu_instance)) + return True + + async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str: + self.log.debug( + "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url) + ) + + return await self._exec_inspect_command( + inspect_command="", kdu_model=kdu_model, repo_url=repo_url + ) + + """ + #################################################################################### + ################################### P R I V A T E ################################## + #################################################################################### + """ + + def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True): + """ + Creates and returns base cluster and kube dirs and returns them. + Also created helm3 dirs according to new directory specification, paths are + returned and also environment variables that must be provided to execute commands + + Helm 2 directory specification uses helm_home dir: + + The variables assigned for this paths are: + - Helm hone: $HELM_HOME + - helm kubeconfig: $KUBECONFIG + + :param cluster_name: cluster_name + :return: Dictionary with config_paths and dictionary with helm environment variables + """ + base = self.fs.path + if base.endswith("/") or base.endswith("\\"): + base = base[:-1] + + # base dir for cluster + cluster_dir = base + "/" + cluster_name + + # kube dir + kube_dir = cluster_dir + "/" + ".kube" + if create_if_not_exist and not os.path.exists(kube_dir): + self.log.debug("Creating dir {}".format(kube_dir)) + os.makedirs(kube_dir) + + # helm home dir + helm_dir = cluster_dir + "/" + ".helm" + if create_if_not_exist and not os.path.exists(helm_dir): + self.log.debug("Creating dir {}".format(helm_dir)) + os.makedirs(helm_dir) + + config_filename = kube_dir + "/config" + + # 2 - Prepare dictionary with paths + paths = { + "kube_dir": kube_dir, + "kube_config": config_filename, + "cluster_dir": cluster_dir, + "helm_dir": helm_dir, + } + + for file_name, file in paths.items(): + if "dir" in file_name and not os.path.exists(file): + err_msg = "{} dir does not exist".format(file) + self.log.error(err_msg) + raise K8sException(err_msg) + + # 3 - Prepare environment variables + env = {"HELM_HOME": helm_dir, "KUBECONFIG": config_filename} + + return paths, env + + async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig): + # init config, env + paths, env = self._init_paths_env( + cluster_name=cluster_id, create_if_not_exist=True + ) + + command1 = "env KUBECONFIG={} {} get manifest {} ".format( + kubeconfig, self._helm_command, quote(kdu_instance) + ) + command2 = "{} get --namespace={} -f -".format( + self.kubectl_command, quote(namespace) + ) + output, _rc = await self._local_async_exec_pipe( + command1, command2, env=env, raise_exception_on_error=True + ) + services = self._parse_services(output) + + return services + + async def _cluster_init( + self, cluster_id: str, namespace: str, paths: dict, env: dict + ): + """ + Implements the helm version dependent cluster initialization: + For helm2 it initialized tiller environment if needed + """ + + # check if tiller pod is up in cluster + command = "{} --kubeconfig={} --namespace={} get deployments".format( + self.kubectl_command, paths["kube_config"], quote(namespace) + ) + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=True, env=env + ) + + output_table = self._output_to_table(output=output) + + # find 'tiller' pod in all pods + already_initialized = False + try: + for row in output_table: + if row[0].startswith("tiller-deploy"): + already_initialized = True + break + except Exception: + pass + + # helm init + n2vc_installed_sw = False + if not already_initialized: + self.log.info( + "Initializing helm in client and server: {}".format(cluster_id) + ) + command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format( + self.kubectl_command, paths["kube_config"], quote(self.service_account) + ) + _, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=False, env=env + ) + + command = ( + "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule " + "--clusterrole=cluster-admin --serviceaccount=kube-system:{}" + ).format( + self.kubectl_command, paths["kube_config"], quote(self.service_account) + ) + _, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=False, env=env + ) + + command = ( + "{} --kubeconfig={} --tiller-namespace={} --home={} --service-account {} " + " {} init" + ).format( + self._helm_command, + paths["kube_config"], + quote(namespace), + quote(paths["helm_dir"]), + quote(self.service_account), + "--stable-repo-url {}".format(quote(self._stable_repo_url)) + if self._stable_repo_url + else "--skip-repos", + ) + _, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=True, env=env + ) + n2vc_installed_sw = True + else: + # check client helm installation + check_file = paths["helm_dir"] + "/repository/repositories.yaml" + if not self._check_file_exists( + filename=check_file, exception_if_not_exists=False + ): + self.log.info("Initializing helm in client: {}".format(cluster_id)) + command = ( + "{} --kubeconfig={} --tiller-namespace={} " + "--home={} init --client-only {} " + ).format( + self._helm_command, + paths["kube_config"], + quote(namespace), + quote(paths["helm_dir"]), + "--stable-repo-url {}".format(quote(self._stable_repo_url)) + if self._stable_repo_url + else "--skip-repos", + ) + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=True, env=env + ) + else: + self.log.info("Helm client already initialized") + + repo_list = await self.repo_list(cluster_id) + for repo in repo_list: + if repo["name"] == "stable" and repo["url"] != self._stable_repo_url: + self.log.debug("Add new stable repo url: {}") + await self.repo_remove(cluster_id, "stable") + if self._stable_repo_url: + await self.repo_add(cluster_id, "stable", self._stable_repo_url) + break + + return n2vc_installed_sw + + async def _uninstall_sw(self, cluster_id: str, namespace: str): + # uninstall Tiller if necessary + + self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) + + # init paths, env + paths, env = self._init_paths_env( + cluster_name=cluster_id, create_if_not_exist=True + ) + + if not namespace: + # find namespace for tiller pod + command = "{} --kubeconfig={} get deployments --all-namespaces".format( + self.kubectl_command, quote(paths["kube_config"]) + ) + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=False, env=env + ) + output_table = self._output_to_table(output=output) + namespace = None + for r in output_table: + try: + if "tiller-deploy" in r[1]: + namespace = r[0] + break + except Exception: + pass + else: + msg = "Tiller deployment not found in cluster {}".format(cluster_id) + self.log.error(msg) + + self.log.debug("namespace for tiller: {}".format(namespace)) + + if namespace: + # uninstall tiller from cluster + self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) + command = "{} --kubeconfig={} --home={} reset".format( + self._helm_command, + quote(paths["kube_config"]), + quote(paths["helm_dir"]), + ) + self.log.debug("resetting: {}".format(command)) + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=True, env=env + ) + # Delete clusterrolebinding and serviceaccount. + # Ignore if errors for backward compatibility + command = ( + "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s." + "io/osm-tiller-cluster-rule" + ).format(self.kubectl_command, quote(paths["kube_config"])) + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=False, env=env + ) + command = ( + "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format( + self.kubectl_command, + quote(paths["kube_config"]), + quote(namespace), + quote(self.service_account), + ) + ) + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=False, env=env + ) + + else: + self.log.debug("namespace not found") + + async def _instances_list(self, cluster_id): + # init paths, env + paths, env = self._init_paths_env( + cluster_name=cluster_id, create_if_not_exist=True + ) + + command = "{} list --output yaml".format(self._helm_command) + + output, _rc = await self._local_async_exec( + command=command, raise_exception_on_error=True, env=env + ) + + if output and len(output) > 0: + # parse yaml and update keys to lower case to unify with helm3 + instances = yaml.load(output, Loader=yaml.SafeLoader).get("Releases") + new_instances = [] + for instance in instances: + new_instance = dict((k.lower(), v) for k, v in instance.items()) + new_instances.append(new_instance) + return new_instances + else: + return [] + + def _get_inspect_command( + self, show_command: str, kdu_model: str, repo_str: str, version: str + ): + inspect_command = "{} inspect {} {}{} {}".format( + self._helm_command, show_command, quote(kdu_model), repo_str, version + ) + return inspect_command + + def _get_get_command( + self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str + ): + get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format( + kubeconfig, self._helm_command, get_command, quote(kdu_instance) + ) + return get_command + + async def _status_kdu( + self, + cluster_id: str, + kdu_instance: str, + namespace: str = None, + yaml_format: bool = False, + show_error_log: bool = False, + ) -> Union[str, dict]: + self.log.debug( + "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace) + ) + + # init config, env + paths, env = self._init_paths_env( + cluster_name=cluster_id, create_if_not_exist=True + ) + command = ("env KUBECONFIG={} {} status {} --output yaml").format( + paths["kube_config"], self._helm_command, quote(kdu_instance) + ) + output, rc = await self._local_async_exec( + command=command, + raise_exception_on_error=True, + show_error_log=show_error_log, + env=env, + ) + + if yaml_format: + return str(output) + + if rc != 0: + return None + + data = yaml.load(output, Loader=yaml.SafeLoader) + + # remove field 'notes' + try: + del data.get("info").get("status")["notes"] + except KeyError: + pass + + # parse the manifest to a list of dictionaries + if "manifest" in data: + manifest_str = data.get("manifest") + manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader) + + data["manifest"] = [] + for doc in manifest_docs: + data["manifest"].append(doc) + + # parse field 'resources' + try: + resources = str(data.get("info").get("status").get("resources")) + resource_table = self._output_to_table(resources) + data.get("info").get("status")["resources"] = resource_table + except Exception: + pass + + # set description to lowercase (unify with helm3) + try: + data.get("info")["description"] = data.get("info").pop("Description") + except KeyError: + pass + + return data + + def _get_helm_chart_repos_ids(self, cluster_uuid) -> list: + repo_ids = [] + cluster_filter = {"_admin.helm-chart.id": cluster_uuid} + cluster = self.db.get_one("k8sclusters", cluster_filter) + if cluster: + repo_ids = cluster.get("_admin").get("helm_chart_repos") or [] + return repo_ids + else: + raise K8sException( + "k8cluster with helm-id : {} not found".format(cluster_uuid) + ) + + async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool: + # init config, env + paths, env = self._init_paths_env( + cluster_name=cluster_id, create_if_not_exist=True + ) + + status = await self._status_kdu( + cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False + ) + + # extract info.status.resources-> str + # format: + # ==> v1/Deployment + # NAME READY UP-TO-DATE AVAILABLE AGE + # halting-horse-mongodb 0/1 1 0 0s + # halting-petit-mongodb 1/1 1 0 0s + # blank line + resources = K8sHelmBaseConnector._get_deep( + status, ("info", "status", "resources") + ) + + # convert to table + resources = K8sHelmBaseConnector._output_to_table(resources) + + num_lines = len(resources) + index = 0 + ready = True + while index < num_lines: + try: + line1 = resources[index] + index += 1 + # find '==>' in column 0 + if line1[0] == "==>": + line2 = resources[index] + index += 1 + # find READY in column 1 + if line2[1] == "READY": + # read next lines + line3 = resources[index] + index += 1 + while len(line3) > 1 and index < num_lines: + ready_value = line3[1] + parts = ready_value.split(sep="/") + current = int(parts[0]) + total = int(parts[1]) + if current < total: + self.log.debug("NOT READY:\n {}".format(line3)) + ready = False + line3 = resources[index] + index += 1 + + except Exception: + pass + + return ready + + def _get_install_command( + self, + kdu_model, + kdu_instance, + namespace, + params_str, + version, + atomic, + timeout, + kubeconfig, + ) -> str: + timeout_str = "" + if timeout: + timeout_str = "--timeout {}".format(timeout) + + # atomic + atomic_str = "" + if atomic: + atomic_str = "--atomic" + # namespace + namespace_str = "" + if namespace: + namespace_str = "--namespace {}".format(quote(namespace)) + + # version + version_str = "" + if version: + version_str = "--version {}".format(version) + + command = ( + "env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml " + "{params} {timeout} --name={name} {ns} {model} {ver}".format( + kubeconfig=kubeconfig, + helm=self._helm_command, + atomic=atomic_str, + params=params_str, + timeout=timeout_str, + name=quote(kdu_instance), + ns=namespace_str, + model=quote(kdu_model), + ver=version_str, + ) + ) + return command + + def _get_upgrade_scale_command( + self, + kdu_model: str, + kdu_instance: str, + namespace: str, + scale: int, + version: str, + atomic: bool, + replica_str: str, + timeout: float, + resource_name: str, + kubeconfig: str, + ) -> str: + """Generates the command to scale a Helm Chart release + + Args: + kdu_model (str): Kdu model name, corresponding to the Helm local location or repository + kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question + namespace (str): Namespace where this KDU instance is deployed + scale (int): Scale count + version (str): Constraint with specific version of the Chart to use + atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. + The --wait flag will be set automatically if --atomic is used + replica_str (str): The key under resource_name key where the scale count is stored + timeout (float): The time, in seconds, to wait + resource_name (str): The KDU's resource to scale + kubeconfig (str): Kubeconfig file path + + Returns: + str: command to scale a Helm Chart release + """ + + # scale + if resource_name: + scale_dict = {"{}.{}".format(resource_name, replica_str): scale} + else: + scale_dict = {replica_str: scale} + + scale_str = self._params_to_set_option(scale_dict) + + return self._get_upgrade_command( + kdu_model=kdu_model, + kdu_instance=kdu_instance, + namespace=namespace, + params_str=scale_str, + version=version, + atomic=atomic, + timeout=timeout, + kubeconfig=kubeconfig, + ) + + def _get_upgrade_command( + self, + kdu_model, + kdu_instance, + namespace, + params_str, + version, + atomic, + timeout, + kubeconfig, + force: bool = False, + ) -> str: + """Generates the command to upgrade a Helm Chart release + + Args: + kdu_model (str): Kdu model name, corresponding to the Helm local location or repository + kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question + namespace (str): Namespace where this KDU instance is deployed + params_str (str): Params used to upgrade the Helm Chart release + version (str): Constraint with specific version of the Chart to use + atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. + The --wait flag will be set automatically if --atomic is used + timeout (float): The time, in seconds, to wait + kubeconfig (str): Kubeconfig file path + force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods. + Returns: + str: command to upgrade a Helm Chart release + """ + + timeout_str = "" + if timeout: + timeout_str = "--timeout {}".format(timeout) + + # atomic + atomic_str = "" + if atomic: + atomic_str = "--atomic" + + # force + force_str = "" + if force: + force_str = "--force " + + # version + version_str = "" + if version: + version_str = "--version {}".format(quote(version)) + + # namespace + namespace_str = "" + if namespace: + namespace_str = "--namespace {}".format(quote(namespace)) + + command = ( + "env KUBECONFIG={kubeconfig} {helm} upgrade {namespace} {atomic} --output yaml {params} {timeout} {force}" + "--reuse-values {name} {model} {ver}" + ).format( + kubeconfig=kubeconfig, + helm=self._helm_command, + namespace=namespace_str, + atomic=atomic_str, + force=force_str, + params=params_str, + timeout=timeout_str, + name=quote(kdu_instance), + model=quote(kdu_model), + ver=version_str, + ) + return command + + def _get_rollback_command( + self, kdu_instance, namespace, revision, kubeconfig + ) -> str: + return "env KUBECONFIG={} {} rollback {} {} --wait".format( + kubeconfig, self._helm_command, quote(kdu_instance), revision + ) + + def _get_uninstall_command( + self, kdu_instance: str, namespace: str, kubeconfig: str + ) -> str: + return "env KUBECONFIG={} {} delete --purge {}".format( + kubeconfig, self._helm_command, quote(kdu_instance) + ) diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py new file mode 100644 index 0000000..161471a --- /dev/null +++ b/n2vc/tests/unit/test_k8s_helm_conn.py @@ -0,0 +1,740 @@ +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: alfonso.tiernosepulveda@telefonica.com +## + +import asynctest +import logging + +from asynctest.mock import Mock +from osm_common.dbmemory import DbMemory +from osm_common.fslocal import FsLocal +from n2vc.k8s_helm_conn import K8sHelmConnector + +__author__ = "Isabel Lloret " + + +class TestK8sHelmConn(asynctest.TestCase): + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + + async def setUp(self): + self.db = Mock(DbMemory()) + self.fs = asynctest.Mock(FsLocal()) + self.fs.path = "./tmp/" + self.namespace = "testk8s" + self.service_account = "osm" + self.cluster_id = "helm_cluster_id" + self.cluster_uuid = self.cluster_id + # pass fake kubectl and helm commands to make sure it does not call actual commands + K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True) + K8sHelmConnector._local_async_exec = asynctest.CoroutineMock( + return_value=(0, "") + ) + cluster_dir = self.fs.path + self.cluster_id + self.kube_config = self.fs.path + self.cluster_id + "/.kube/config" + self.helm_home = self.fs.path + self.cluster_id + "/.helm" + self.env = { + "HELM_HOME": "{}/.helm".format(cluster_dir), + "KUBECONFIG": "{}/.kube/config".format(cluster_dir), + } + self.helm_conn = K8sHelmConnector(self.fs, self.db, log=self.logger) + self.logger.debug("Set up executed") + + @asynctest.fail_on(active_handles=True) + async def test_init_env(self): + # TODO + pass + + @asynctest.fail_on(active_handles=True) + async def test_repo_add(self): + repo_name = "bitnami" + repo_url = "https://charts.bitnami.com/bitnami" + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url) + + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + self.assertEqual( + self.helm_conn._local_async_exec.call_count, + 2, + "local_async_exec expected 2 calls, called {}".format( + self.helm_conn._local_async_exec.call_count + ), + ) + + repo_update_command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo update {}" + ).format(repo_name) + repo_add_command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo add {} {}" + ).format(repo_name, repo_url) + calls = self.helm_conn._local_async_exec.call_args_list + call0_kargs = calls[0][1] + self.assertEqual( + call0_kargs.get("command"), + repo_add_command, + "Invalid repo add command: {}".format(call0_kargs.get("command")), + ) + self.assertEqual( + call0_kargs.get("env"), + self.env, + "Invalid env for add command: {}".format(call0_kargs.get("env")), + ) + call1_kargs = calls[1][1] + self.assertEqual( + call1_kargs.get("command"), + repo_update_command, + "Invalid repo update command: {}".format(call1_kargs.get("command")), + ) + self.assertEqual( + call1_kargs.get("env"), + self.env, + "Invalid env for update command: {}".format(call1_kargs.get("env")), + ) + + @asynctest.fail_on(active_handles=True) + async def test_repo_list(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + await self.helm_conn.repo_list(self.cluster_uuid) + + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo list --output yaml" + self.helm_conn._local_async_exec.assert_called_with( + command=command, env=self.env, raise_exception_on_error=False + ) + + @asynctest.fail_on(active_handles=True) + async def test_repo_remove(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + repo_name = "bitnami" + await self.helm_conn.repo_remove(self.cluster_uuid, repo_name) + + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo remove {}".format( + repo_name + ) + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, env=self.env, raise_exception_on_error=True + ) + + @asynctest.fail_on(active_handles=True) + async def test_install(self): + kdu_model = "stable/openldap:1.2.2" + kdu_instance = "stable-openldap-0005399828" + db_dict = {} + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None) + self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn.generate_kdu_instance_name = Mock(return_value=kdu_instance) + + await self.helm_conn.install( + self.cluster_uuid, + kdu_model, + kdu_instance, + atomic=True, + namespace=self.namespace, + db_dict=db_dict, + ) + + self.helm_conn.fs.sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn._store_status.assert_called_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + db_dict=db_dict, + operation="install", + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm install " + "--atomic --output yaml --timeout 300 " + "--name=stable-openldap-0005399828 --namespace testk8s stable/openldap " + "--version 1.2.2" + ) + self.helm_conn._local_async_exec.assert_called_with( + command=command, env=self.env, raise_exception_on_error=False + ) + + @asynctest.fail_on(active_handles=True) + async def test_upgrade_force_true(self): + kdu_model = "stable/openldap:1.2.3" + kdu_instance = "stable-openldap-0005399828" + db_dict = {} + instance_info = { + "chart": "openldap-1.2.2", + "name": kdu_instance, + "namespace": self.namespace, + "revision": 1, + "status": "DEPLOYED", + } + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn.get_instance_info = asynctest.CoroutineMock( + return_value=instance_info + ) + # TEST-1 (--force true) + await self.helm_conn.upgrade( + self.cluster_uuid, + kdu_instance, + kdu_model, + atomic=True, + db_dict=db_dict, + force=True, + ) + self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn._store_status.assert_called_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + db_dict=db_dict, + operation="upgrade", + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace testk8s " + "--atomic --output yaml --timeout 300 --force --reuse-values stable-openldap-0005399828 stable/openldap " + "--version 1.2.3" + ) + self.helm_conn._local_async_exec.assert_called_with( + command=command, env=self.env, raise_exception_on_error=False + ) + # TEST-2 (--force false) + await self.helm_conn.upgrade( + self.cluster_uuid, + kdu_instance, + kdu_model, + atomic=True, + db_dict=db_dict, + ) + self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn._store_status.assert_called_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + db_dict=db_dict, + operation="upgrade", + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace testk8s " + "--atomic --output yaml --timeout 300 --reuse-values stable-openldap-0005399828 stable/openldap " + "--version 1.2.3" + ) + self.helm_conn._local_async_exec.assert_called_with( + command=command, env=self.env, raise_exception_on_error=False + ) + + @asynctest.fail_on(active_handles=True) + async def test_upgrade_namespace(self): + kdu_model = "stable/openldap:1.2.3" + kdu_instance = "stable-openldap-0005399828" + db_dict = {} + instance_info = { + "chart": "openldap-1.2.2", + "name": kdu_instance, + "namespace": self.namespace, + "revision": 1, + "status": "DEPLOYED", + } + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn.get_instance_info = asynctest.CoroutineMock( + return_value=instance_info + ) + + await self.helm_conn.upgrade( + self.cluster_uuid, + kdu_instance, + kdu_model, + atomic=True, + db_dict=db_dict, + namespace="default", + ) + self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_has_calls( + [ + asynctest.call(from_path=self.cluster_id), + asynctest.call(from_path=self.cluster_id), + ] + ) + self.helm_conn._store_status.assert_called_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace="default", + db_dict=db_dict, + operation="upgrade", + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace default " + "--atomic --output yaml --timeout 300 --reuse-values stable-openldap-0005399828 stable/openldap " + "--version 1.2.3" + ) + self.helm_conn._local_async_exec.assert_called_with( + command=command, env=self.env, raise_exception_on_error=False + ) + + @asynctest.fail_on(active_handles=True) + async def test_scale(self): + kdu_model = "stable/openldap:1.2.3" + kdu_instance = "stable-openldap-0005399828" + db_dict = {} + instance_info = { + "chart": "openldap-1.2.3", + "name": kdu_instance, + "namespace": self.namespace, + "revision": 1, + "status": "DEPLOYED", + } + repo_list = [ + { + "name": "stable", + "url": "https://kubernetes-charts.storage.googleapis.com/", + } + ] + kdu_values = """ + # Default values for openldap. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + replicaCount: 1 + dummy-app: + replicas: 2 + """ + + self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list) + self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values) + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn.get_instance_info = asynctest.CoroutineMock( + return_value=instance_info + ) + + # TEST-1 + await self.helm_conn.scale( + kdu_instance, + 2, + "", + kdu_model=kdu_model, + cluster_uuid=self.cluster_uuid, + atomic=True, + db_dict=db_dict, + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " + "/usr/bin/helm upgrade --namespace testk8s --atomic --output yaml --set replicaCount=2 " + "--timeout 1800 --reuse-values stable-openldap-0005399828 stable/openldap " + "--version 1.2.3" + ) + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, env=self.env, raise_exception_on_error=False + ) + + # TEST-2 + await self.helm_conn.scale( + kdu_instance, + 3, + "dummy-app", + kdu_model=kdu_model, + cluster_uuid=self.cluster_uuid, + atomic=True, + db_dict=db_dict, + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " + "/usr/bin/helm upgrade --namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 " + "--timeout 1800 --reuse-values stable-openldap-0005399828 stable/openldap " + "--version 1.2.3" + ) + self.helm_conn._local_async_exec.assert_called_with( + command=command, env=self.env, raise_exception_on_error=False + ) + self.helm_conn.fs.reverse_sync.assert_called_with(from_path=self.cluster_id) + self.helm_conn._store_status.assert_called_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + db_dict=db_dict, + operation="scale", + ) + + @asynctest.fail_on(active_handles=True) + async def test_rollback(self): + kdu_instance = "stable-openldap-0005399828" + db_dict = {} + instance_info = { + "chart": "openldap-1.2.3", + "name": kdu_instance, + "namespace": self.namespace, + "revision": 2, + "status": "DEPLOYED", + } + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn.get_instance_info = asynctest.CoroutineMock( + return_value=instance_info + ) + + await self.helm_conn.rollback( + self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict + ) + self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + self.helm_conn._store_status.assert_called_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + db_dict=db_dict, + operation="rollback", + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " + "/usr/bin/helm rollback stable-openldap-0005399828 1 --wait" + ) + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, env=self.env, raise_exception_on_error=False + ) + + @asynctest.fail_on(active_handles=True) + async def test_uninstall(self): + kdu_instance = "stable-openldap-0005399828" + instance_info = { + "chart": "openldap-1.2.2", + "name": kdu_instance, + "namespace": self.namespace, + "revision": 3, + "status": "DEPLOYED", + } + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn.get_instance_info = asynctest.CoroutineMock( + return_value=instance_info + ) + + await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance) + self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm delete --purge {}".format( + kdu_instance + ) + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, env=self.env, raise_exception_on_error=True + ) + + @asynctest.fail_on(active_handles=True) + async def test_get_services(self): + kdu_instance = "test_services_1" + service = {"name": "testservice", "type": "LoadBalancer"} + self.helm_conn._local_async_exec_pipe = asynctest.CoroutineMock( + return_value=("", 0) + ) + self.helm_conn._parse_services = Mock(return_value=["testservice"]) + self.helm_conn._get_service = asynctest.CoroutineMock(return_value=service) + + services = await self.helm_conn.get_services( + self.cluster_uuid, kdu_instance, self.namespace + ) + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + self.helm_conn._parse_services.assert_called_once() + command1 = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get manifest {} ".format( + kdu_instance + ) + command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace) + self.helm_conn._local_async_exec_pipe.assert_called_once_with( + command1, command2, env=self.env, raise_exception_on_error=True + ) + self.assertEqual( + services, [service], "Invalid service returned from get_service" + ) + + @asynctest.fail_on(active_handles=True) + async def test_get_service(self): + service_name = "service1" + + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + await self.helm_conn.get_service( + self.cluster_uuid, service_name, self.namespace + ) + + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + command = ( + "/usr/bin/kubectl --kubeconfig=./tmp/helm_cluster_id/.kube/config " + "--namespace=testk8s get service service1 -o=yaml" + ) + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, env=self.env, raise_exception_on_error=True + ) + + @asynctest.fail_on(active_handles=True) + async def test_inspect_kdu(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + kdu_model = "stable/openldap:1.2.4" + repo_url = "https://kubernetes-charts.storage.googleapis.com/" + await self.helm_conn.inspect_kdu(kdu_model, repo_url) + + command = ( + "/usr/bin/helm inspect openldap --repo " + "https://kubernetes-charts.storage.googleapis.com/ " + "--version 1.2.4" + ) + self.helm_conn._local_async_exec.assert_called_with(command=command) + + @asynctest.fail_on(active_handles=True) + async def test_help_kdu(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + kdu_model = "stable/openldap:1.2.4" + repo_url = "https://kubernetes-charts.storage.googleapis.com/" + await self.helm_conn.help_kdu(kdu_model, repo_url) + + command = ( + "/usr/bin/helm inspect readme openldap --repo " + "https://kubernetes-charts.storage.googleapis.com/ " + "--version 1.2.4" + ) + self.helm_conn._local_async_exec.assert_called_with(command=command) + + @asynctest.fail_on(active_handles=True) + async def test_values_kdu(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + kdu_model = "stable/openldap:1.2.4" + repo_url = "https://kubernetes-charts.storage.googleapis.com/" + await self.helm_conn.values_kdu(kdu_model, repo_url) + + command = ( + "/usr/bin/helm inspect values openldap --repo " + "https://kubernetes-charts.storage.googleapis.com/ " + "--version 1.2.4" + ) + self.helm_conn._local_async_exec.assert_called_with(command=command) + + @asynctest.fail_on(active_handles=True) + async def test_get_values_kdu(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + kdu_instance = "stable-openldap-0005399828" + await self.helm_conn.get_values_kdu( + kdu_instance, self.namespace, self.env["KUBECONFIG"] + ) + + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get values " + "stable-openldap-0005399828 --output yaml" + ) + self.helm_conn._local_async_exec.assert_called_with(command=command) + + @asynctest.fail_on(active_handles=True) + async def test_instances_list(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + await self.helm_conn.instances_list(self.cluster_uuid) + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.reverse_sync.assert_called_once_with( + from_path=self.cluster_id + ) + command = "/usr/bin/helm list --output yaml" + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, env=self.env, raise_exception_on_error=True + ) + + @asynctest.fail_on(active_handles=True) + async def test_status_kdu(self): + kdu_instance = "stable-openldap-0005399828" + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + await self.helm_conn._status_kdu( + self.cluster_id, kdu_instance, self.namespace, yaml_format=True + ) + command = ( + "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm status {} --output yaml" + ).format(kdu_instance) + self.helm_conn._local_async_exec.assert_called_once_with( + command=command, + env=self.env, + raise_exception_on_error=True, + show_error_log=False, + ) + + @asynctest.fail_on(active_handles=True) + async def test_store_status(self): + kdu_instance = "stable-openldap-0005399828" + db_dict = {} + status = { + "info": { + "description": "Install complete", + "status": { + "code": "1", + "notes": "The openldap helm chart has been installed", + }, + } + } + self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=status) + self.helm_conn.write_app_status_to_db = asynctest.CoroutineMock( + return_value=status + ) + + await self.helm_conn._store_status( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + db_dict=db_dict, + operation="install", + ) + self.helm_conn._status_kdu.assert_called_once_with( + cluster_id=self.cluster_id, + kdu_instance=kdu_instance, + namespace=self.namespace, + yaml_format=False, + ) + self.helm_conn.write_app_status_to_db.assert_called_once_with( + db_dict=db_dict, + status="Install complete", + detailed_status=str(status), + operation="install", + ) + + @asynctest.fail_on(active_handles=True) + async def test_reset_uninstall_false(self): + self.helm_conn._uninstall_sw = asynctest.CoroutineMock() + + await self.helm_conn.reset(self.cluster_uuid, force=False, uninstall_sw=False) + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.file_delete.assert_called_once_with( + self.cluster_id, ignore_non_exist=True + ) + self.helm_conn._uninstall_sw.assert_not_called() + + @asynctest.fail_on(active_handles=True) + async def test_reset_uninstall(self): + kdu_instance = "stable-openldap-0021099429" + instances = [ + { + "app_version": "2.4.48", + "chart": "openldap-1.2.3", + "name": kdu_instance, + "namespace": self.namespace, + "revision": "1", + "status": "deployed", + "updated": "2020-10-30 11:11:20.376744191 +0000 UTC", + } + ] + self.helm_conn._get_namespace = Mock(return_value=self.namespace) + self.helm_conn._uninstall_sw = asynctest.CoroutineMock() + self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances) + self.helm_conn.uninstall = asynctest.CoroutineMock() + + await self.helm_conn.reset(self.cluster_uuid, force=True, uninstall_sw=True) + self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) + self.helm_conn.fs.file_delete.assert_called_once_with( + self.cluster_id, ignore_non_exist=True + ) + self.helm_conn._get_namespace.assert_called_once_with( + cluster_uuid=self.cluster_uuid + ) + self.helm_conn.instances_list.assert_called_once_with( + cluster_uuid=self.cluster_uuid + ) + self.helm_conn.uninstall.assert_called_once_with( + cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance + ) + self.helm_conn._uninstall_sw.assert_called_once_with( + cluster_id=self.cluster_id, namespace=self.namespace + ) + + @asynctest.fail_on(active_handles=True) + async def test_uninstall_sw_namespace(self): + self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) + + await self.helm_conn._uninstall_sw(self.cluster_id, self.namespace) + calls = self.helm_conn._local_async_exec.call_args_list + self.assertEqual( + len(calls), 3, "To uninstall should have executed three commands" + ) + call0_kargs = calls[0][1] + command_0 = "/usr/bin/helm --kubeconfig={} --home={} reset".format( + self.kube_config, self.helm_home + ) + self.assertEqual( + call0_kargs, + {"command": command_0, "raise_exception_on_error": True, "env": self.env}, + "Invalid args for first call to local_exec", + ) + call1_kargs = calls[1][1] + command_1 = ( + "/usr/bin/kubectl --kubeconfig={} delete " + "clusterrolebinding.rbac.authorization.k8s.io/osm-tiller-cluster-rule".format( + self.kube_config + ) + ) + self.assertEqual( + call1_kargs, + {"command": command_1, "raise_exception_on_error": False, "env": self.env}, + "Invalid args for second call to local_exec", + ) + call2_kargs = calls[2][1] + command_2 = ( + "/usr/bin/kubectl --kubeconfig={} --namespace {} delete " + "serviceaccount/{}".format( + self.kube_config, self.namespace, self.service_account + ) + ) + self.assertEqual( + call2_kargs, + {"command": command_2, "raise_exception_on_error": False, "env": self.env}, + "Invalid args for third call to local_exec", + ) -- 2.17.1 From 3bc59c1786633d06fd9d8016e3ba36a611e635f4 Mon Sep 17 00:00:00 2001 From: almagia Date: Thu, 30 Nov 2023 19:50:02 +0200 Subject: [PATCH 14/16] Revert "Revert "Feature 11002: Deprecate helmv2"" This reverts commit dfb624e236597b96658da80fe7436b0f92416cc3. TSC request on 30.11 Change-Id: Ieb1e64d052c913b8aaedfe07620245054aa82a08 --- n2vc/k8s_helm_conn.py | 776 -------------------------- n2vc/tests/unit/test_k8s_helm_conn.py | 740 ------------------------ 2 files changed, 1516 deletions(-) delete mode 100644 n2vc/k8s_helm_conn.py delete mode 100644 n2vc/tests/unit/test_k8s_helm_conn.py diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py deleted file mode 100644 index 17e960f..0000000 --- a/n2vc/k8s_helm_conn.py +++ /dev/null @@ -1,776 +0,0 @@ -## -# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U. -# This file is part of OSM -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# For those usages not covered by the Apache License, Version 2.0 please -# contact with: nfvlabs@tid.es -## -import asyncio -from typing import Union -from shlex import quote -import os -import yaml - -from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector -from n2vc.exceptions import K8sException - - -class K8sHelmConnector(K8sHelmBaseConnector): - - """ - #################################################################################### - ################################### P U B L I C #################################### - #################################################################################### - """ - - def __init__( - self, - fs: object, - db: object, - kubectl_command: str = "/usr/bin/kubectl", - helm_command: str = "/usr/bin/helm", - log: object = None, - on_update_db=None, - ): - """ - Initializes helm connector for helm v2 - - :param fs: file system for kubernetes and helm configuration - :param db: database object to write current operation status - :param kubectl_command: path to kubectl executable - :param helm_command: path to helm executable - :param log: logger - :param on_update_db: callback called when k8s connector updates database - """ - - # parent class - K8sHelmBaseConnector.__init__( - self, - db=db, - log=log, - fs=fs, - kubectl_command=kubectl_command, - helm_command=helm_command, - on_update_db=on_update_db, - ) - - self.log.info("Initializing K8S Helm2 connector") - - # initialize helm client-only - self.log.debug("Initializing helm client-only...") - command = "{} init --client-only {} ".format( - self._helm_command, - "--stable-repo-url {}".format(quote(self._stable_repo_url)) - if self._stable_repo_url - else "--skip-repos", - ) - try: - asyncio.create_task( - self._local_async_exec(command=command, raise_exception_on_error=False) - ) - except Exception as e: - self.warning( - msg="helm init failed (it was already initialized): {}".format(e) - ) - - self.log.info("K8S Helm2 connector initialized") - - async def install( - self, - cluster_uuid: str, - kdu_model: str, - kdu_instance: str, - atomic: bool = True, - timeout: float = 300, - params: dict = None, - db_dict: dict = None, - kdu_name: str = None, - namespace: str = None, - **kwargs, - ): - """ - Deploys of a new KDU instance. It would implicitly rely on the `install` call - to deploy the Chart/Bundle properly parametrized (in practice, this call would - happen before any _initial-config-primitive_of the VNF is called). - - :param cluster_uuid: UUID of a K8s cluster known by OSM - :param kdu_model: chart/reference (string), which can be either - of these options: - - a name of chart available via the repos known by OSM - (e.g. stable/openldap, stable/openldap:1.2.4) - - a path to a packaged chart (e.g. mychart.tgz) - - a path to an unpacked chart directory or a URL (e.g. mychart) - :param kdu_instance: Kdu instance name - :param atomic: If set, installation process purges chart/bundle on fail, also - will wait until all the K8s objects are active - :param timeout: Time in seconds to wait for the install of the chart/bundle - (defaults to Helm default timeout: 300s) - :param params: dictionary of key-value pairs for instantiation parameters - (overriding default values) - :param dict db_dict: where to write into database when the status changes. - It contains a dict with {collection: , filter: {}, - path: }, - e.g. {collection: "nsrs", filter: - {_id: , path: "_admin.deployed.K8S.3"} - :param kdu_name: Name of the KDU instance to be installed - :param namespace: K8s namespace to use for the KDU instance - :param kwargs: Additional parameters (None yet) - :return: True if successful - """ - self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid)) - - # sync local dir - self.fs.sync(from_path=cluster_uuid) - - # init env, paths - paths, env = self._init_paths_env( - cluster_name=cluster_uuid, create_if_not_exist=True - ) - - await self._install_impl( - cluster_uuid, - kdu_model, - paths, - env, - kdu_instance, - atomic=atomic, - timeout=timeout, - params=params, - db_dict=db_dict, - kdu_name=kdu_name, - namespace=namespace, - ) - - # sync fs - self.fs.reverse_sync(from_path=cluster_uuid) - - self.log.debug("Returning kdu_instance {}".format(kdu_instance)) - return True - - async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str: - self.log.debug( - "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url) - ) - - return await self._exec_inspect_command( - inspect_command="", kdu_model=kdu_model, repo_url=repo_url - ) - - """ - #################################################################################### - ################################### P R I V A T E ################################## - #################################################################################### - """ - - def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True): - """ - Creates and returns base cluster and kube dirs and returns them. - Also created helm3 dirs according to new directory specification, paths are - returned and also environment variables that must be provided to execute commands - - Helm 2 directory specification uses helm_home dir: - - The variables assigned for this paths are: - - Helm hone: $HELM_HOME - - helm kubeconfig: $KUBECONFIG - - :param cluster_name: cluster_name - :return: Dictionary with config_paths and dictionary with helm environment variables - """ - base = self.fs.path - if base.endswith("/") or base.endswith("\\"): - base = base[:-1] - - # base dir for cluster - cluster_dir = base + "/" + cluster_name - - # kube dir - kube_dir = cluster_dir + "/" + ".kube" - if create_if_not_exist and not os.path.exists(kube_dir): - self.log.debug("Creating dir {}".format(kube_dir)) - os.makedirs(kube_dir) - - # helm home dir - helm_dir = cluster_dir + "/" + ".helm" - if create_if_not_exist and not os.path.exists(helm_dir): - self.log.debug("Creating dir {}".format(helm_dir)) - os.makedirs(helm_dir) - - config_filename = kube_dir + "/config" - - # 2 - Prepare dictionary with paths - paths = { - "kube_dir": kube_dir, - "kube_config": config_filename, - "cluster_dir": cluster_dir, - "helm_dir": helm_dir, - } - - for file_name, file in paths.items(): - if "dir" in file_name and not os.path.exists(file): - err_msg = "{} dir does not exist".format(file) - self.log.error(err_msg) - raise K8sException(err_msg) - - # 3 - Prepare environment variables - env = {"HELM_HOME": helm_dir, "KUBECONFIG": config_filename} - - return paths, env - - async def _get_services(self, cluster_id, kdu_instance, namespace, kubeconfig): - # init config, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - command1 = "env KUBECONFIG={} {} get manifest {} ".format( - kubeconfig, self._helm_command, quote(kdu_instance) - ) - command2 = "{} get --namespace={} -f -".format( - self.kubectl_command, quote(namespace) - ) - output, _rc = await self._local_async_exec_pipe( - command1, command2, env=env, raise_exception_on_error=True - ) - services = self._parse_services(output) - - return services - - async def _cluster_init( - self, cluster_id: str, namespace: str, paths: dict, env: dict - ): - """ - Implements the helm version dependent cluster initialization: - For helm2 it initialized tiller environment if needed - """ - - # check if tiller pod is up in cluster - command = "{} --kubeconfig={} --namespace={} get deployments".format( - self.kubectl_command, paths["kube_config"], quote(namespace) - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - - output_table = self._output_to_table(output=output) - - # find 'tiller' pod in all pods - already_initialized = False - try: - for row in output_table: - if row[0].startswith("tiller-deploy"): - already_initialized = True - break - except Exception: - pass - - # helm init - n2vc_installed_sw = False - if not already_initialized: - self.log.info( - "Initializing helm in client and server: {}".format(cluster_id) - ) - command = "{} --kubeconfig={} --namespace kube-system create serviceaccount {}".format( - self.kubectl_command, paths["kube_config"], quote(self.service_account) - ) - _, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - - command = ( - "{} --kubeconfig={} create clusterrolebinding osm-tiller-cluster-rule " - "--clusterrole=cluster-admin --serviceaccount=kube-system:{}" - ).format( - self.kubectl_command, paths["kube_config"], quote(self.service_account) - ) - _, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - - command = ( - "{} --kubeconfig={} --tiller-namespace={} --home={} --service-account {} " - " {} init" - ).format( - self._helm_command, - paths["kube_config"], - quote(namespace), - quote(paths["helm_dir"]), - quote(self.service_account), - "--stable-repo-url {}".format(quote(self._stable_repo_url)) - if self._stable_repo_url - else "--skip-repos", - ) - _, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - n2vc_installed_sw = True - else: - # check client helm installation - check_file = paths["helm_dir"] + "/repository/repositories.yaml" - if not self._check_file_exists( - filename=check_file, exception_if_not_exists=False - ): - self.log.info("Initializing helm in client: {}".format(cluster_id)) - command = ( - "{} --kubeconfig={} --tiller-namespace={} " - "--home={} init --client-only {} " - ).format( - self._helm_command, - paths["kube_config"], - quote(namespace), - quote(paths["helm_dir"]), - "--stable-repo-url {}".format(quote(self._stable_repo_url)) - if self._stable_repo_url - else "--skip-repos", - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - else: - self.log.info("Helm client already initialized") - - repo_list = await self.repo_list(cluster_id) - for repo in repo_list: - if repo["name"] == "stable" and repo["url"] != self._stable_repo_url: - self.log.debug("Add new stable repo url: {}") - await self.repo_remove(cluster_id, "stable") - if self._stable_repo_url: - await self.repo_add(cluster_id, "stable", self._stable_repo_url) - break - - return n2vc_installed_sw - - async def _uninstall_sw(self, cluster_id: str, namespace: str): - # uninstall Tiller if necessary - - self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) - - # init paths, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - if not namespace: - # find namespace for tiller pod - command = "{} --kubeconfig={} get deployments --all-namespaces".format( - self.kubectl_command, quote(paths["kube_config"]) - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - output_table = self._output_to_table(output=output) - namespace = None - for r in output_table: - try: - if "tiller-deploy" in r[1]: - namespace = r[0] - break - except Exception: - pass - else: - msg = "Tiller deployment not found in cluster {}".format(cluster_id) - self.log.error(msg) - - self.log.debug("namespace for tiller: {}".format(namespace)) - - if namespace: - # uninstall tiller from cluster - self.log.debug("Uninstalling tiller from cluster {}".format(cluster_id)) - command = "{} --kubeconfig={} --home={} reset".format( - self._helm_command, - quote(paths["kube_config"]), - quote(paths["helm_dir"]), - ) - self.log.debug("resetting: {}".format(command)) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - # Delete clusterrolebinding and serviceaccount. - # Ignore if errors for backward compatibility - command = ( - "{} --kubeconfig={} delete clusterrolebinding.rbac.authorization.k8s." - "io/osm-tiller-cluster-rule" - ).format(self.kubectl_command, quote(paths["kube_config"])) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - command = ( - "{} --kubeconfig={} --namespace {} delete serviceaccount/{}".format( - self.kubectl_command, - quote(paths["kube_config"]), - quote(namespace), - quote(self.service_account), - ) - ) - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) - - else: - self.log.debug("namespace not found") - - async def _instances_list(self, cluster_id): - # init paths, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - command = "{} list --output yaml".format(self._helm_command) - - output, _rc = await self._local_async_exec( - command=command, raise_exception_on_error=True, env=env - ) - - if output and len(output) > 0: - # parse yaml and update keys to lower case to unify with helm3 - instances = yaml.load(output, Loader=yaml.SafeLoader).get("Releases") - new_instances = [] - for instance in instances: - new_instance = dict((k.lower(), v) for k, v in instance.items()) - new_instances.append(new_instance) - return new_instances - else: - return [] - - def _get_inspect_command( - self, show_command: str, kdu_model: str, repo_str: str, version: str - ): - inspect_command = "{} inspect {} {}{} {}".format( - self._helm_command, show_command, quote(kdu_model), repo_str, version - ) - return inspect_command - - def _get_get_command( - self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str - ): - get_command = "env KUBECONFIG={} {} get {} {} --output yaml".format( - kubeconfig, self._helm_command, get_command, quote(kdu_instance) - ) - return get_command - - async def _status_kdu( - self, - cluster_id: str, - kdu_instance: str, - namespace: str = None, - yaml_format: bool = False, - show_error_log: bool = False, - ) -> Union[str, dict]: - self.log.debug( - "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace) - ) - - # init config, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - command = ("env KUBECONFIG={} {} status {} --output yaml").format( - paths["kube_config"], self._helm_command, quote(kdu_instance) - ) - output, rc = await self._local_async_exec( - command=command, - raise_exception_on_error=True, - show_error_log=show_error_log, - env=env, - ) - - if yaml_format: - return str(output) - - if rc != 0: - return None - - data = yaml.load(output, Loader=yaml.SafeLoader) - - # remove field 'notes' - try: - del data.get("info").get("status")["notes"] - except KeyError: - pass - - # parse the manifest to a list of dictionaries - if "manifest" in data: - manifest_str = data.get("manifest") - manifest_docs = yaml.load_all(manifest_str, Loader=yaml.SafeLoader) - - data["manifest"] = [] - for doc in manifest_docs: - data["manifest"].append(doc) - - # parse field 'resources' - try: - resources = str(data.get("info").get("status").get("resources")) - resource_table = self._output_to_table(resources) - data.get("info").get("status")["resources"] = resource_table - except Exception: - pass - - # set description to lowercase (unify with helm3) - try: - data.get("info")["description"] = data.get("info").pop("Description") - except KeyError: - pass - - return data - - def _get_helm_chart_repos_ids(self, cluster_uuid) -> list: - repo_ids = [] - cluster_filter = {"_admin.helm-chart.id": cluster_uuid} - cluster = self.db.get_one("k8sclusters", cluster_filter) - if cluster: - repo_ids = cluster.get("_admin").get("helm_chart_repos") or [] - return repo_ids - else: - raise K8sException( - "k8cluster with helm-id : {} not found".format(cluster_uuid) - ) - - async def _is_install_completed(self, cluster_id: str, kdu_instance: str) -> bool: - # init config, env - paths, env = self._init_paths_env( - cluster_name=cluster_id, create_if_not_exist=True - ) - - status = await self._status_kdu( - cluster_id=cluster_id, kdu_instance=kdu_instance, yaml_format=False - ) - - # extract info.status.resources-> str - # format: - # ==> v1/Deployment - # NAME READY UP-TO-DATE AVAILABLE AGE - # halting-horse-mongodb 0/1 1 0 0s - # halting-petit-mongodb 1/1 1 0 0s - # blank line - resources = K8sHelmBaseConnector._get_deep( - status, ("info", "status", "resources") - ) - - # convert to table - resources = K8sHelmBaseConnector._output_to_table(resources) - - num_lines = len(resources) - index = 0 - ready = True - while index < num_lines: - try: - line1 = resources[index] - index += 1 - # find '==>' in column 0 - if line1[0] == "==>": - line2 = resources[index] - index += 1 - # find READY in column 1 - if line2[1] == "READY": - # read next lines - line3 = resources[index] - index += 1 - while len(line3) > 1 and index < num_lines: - ready_value = line3[1] - parts = ready_value.split(sep="/") - current = int(parts[0]) - total = int(parts[1]) - if current < total: - self.log.debug("NOT READY:\n {}".format(line3)) - ready = False - line3 = resources[index] - index += 1 - - except Exception: - pass - - return ready - - def _get_install_command( - self, - kdu_model, - kdu_instance, - namespace, - params_str, - version, - atomic, - timeout, - kubeconfig, - ) -> str: - timeout_str = "" - if timeout: - timeout_str = "--timeout {}".format(timeout) - - # atomic - atomic_str = "" - if atomic: - atomic_str = "--atomic" - # namespace - namespace_str = "" - if namespace: - namespace_str = "--namespace {}".format(quote(namespace)) - - # version - version_str = "" - if version: - version_str = "--version {}".format(version) - - command = ( - "env KUBECONFIG={kubeconfig} {helm} install {atomic} --output yaml " - "{params} {timeout} --name={name} {ns} {model} {ver}".format( - kubeconfig=kubeconfig, - helm=self._helm_command, - atomic=atomic_str, - params=params_str, - timeout=timeout_str, - name=quote(kdu_instance), - ns=namespace_str, - model=quote(kdu_model), - ver=version_str, - ) - ) - return command - - def _get_upgrade_scale_command( - self, - kdu_model: str, - kdu_instance: str, - namespace: str, - scale: int, - version: str, - atomic: bool, - replica_str: str, - timeout: float, - resource_name: str, - kubeconfig: str, - ) -> str: - """Generates the command to scale a Helm Chart release - - Args: - kdu_model (str): Kdu model name, corresponding to the Helm local location or repository - kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question - namespace (str): Namespace where this KDU instance is deployed - scale (int): Scale count - version (str): Constraint with specific version of the Chart to use - atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. - The --wait flag will be set automatically if --atomic is used - replica_str (str): The key under resource_name key where the scale count is stored - timeout (float): The time, in seconds, to wait - resource_name (str): The KDU's resource to scale - kubeconfig (str): Kubeconfig file path - - Returns: - str: command to scale a Helm Chart release - """ - - # scale - if resource_name: - scale_dict = {"{}.{}".format(resource_name, replica_str): scale} - else: - scale_dict = {replica_str: scale} - - scale_str = self._params_to_set_option(scale_dict) - - return self._get_upgrade_command( - kdu_model=kdu_model, - kdu_instance=kdu_instance, - namespace=namespace, - params_str=scale_str, - version=version, - atomic=atomic, - timeout=timeout, - kubeconfig=kubeconfig, - ) - - def _get_upgrade_command( - self, - kdu_model, - kdu_instance, - namespace, - params_str, - version, - atomic, - timeout, - kubeconfig, - force: bool = False, - ) -> str: - """Generates the command to upgrade a Helm Chart release - - Args: - kdu_model (str): Kdu model name, corresponding to the Helm local location or repository - kdu_instance (str): KDU instance, corresponding to the Helm Chart release in question - namespace (str): Namespace where this KDU instance is deployed - params_str (str): Params used to upgrade the Helm Chart release - version (str): Constraint with specific version of the Chart to use - atomic (bool): If set, upgrade process rolls back changes made in case of failed upgrade. - The --wait flag will be set automatically if --atomic is used - timeout (float): The time, in seconds, to wait - kubeconfig (str): Kubeconfig file path - force (bool): If set, helm forces resource updates through a replacement strategy. This may recreate pods. - Returns: - str: command to upgrade a Helm Chart release - """ - - timeout_str = "" - if timeout: - timeout_str = "--timeout {}".format(timeout) - - # atomic - atomic_str = "" - if atomic: - atomic_str = "--atomic" - - # force - force_str = "" - if force: - force_str = "--force " - - # version - version_str = "" - if version: - version_str = "--version {}".format(quote(version)) - - # namespace - namespace_str = "" - if namespace: - namespace_str = "--namespace {}".format(quote(namespace)) - - command = ( - "env KUBECONFIG={kubeconfig} {helm} upgrade {namespace} {atomic} --output yaml {params} {timeout} {force}" - "--reuse-values {name} {model} {ver}" - ).format( - kubeconfig=kubeconfig, - helm=self._helm_command, - namespace=namespace_str, - atomic=atomic_str, - force=force_str, - params=params_str, - timeout=timeout_str, - name=quote(kdu_instance), - model=quote(kdu_model), - ver=version_str, - ) - return command - - def _get_rollback_command( - self, kdu_instance, namespace, revision, kubeconfig - ) -> str: - return "env KUBECONFIG={} {} rollback {} {} --wait".format( - kubeconfig, self._helm_command, quote(kdu_instance), revision - ) - - def _get_uninstall_command( - self, kdu_instance: str, namespace: str, kubeconfig: str - ) -> str: - return "env KUBECONFIG={} {} delete --purge {}".format( - kubeconfig, self._helm_command, quote(kdu_instance) - ) diff --git a/n2vc/tests/unit/test_k8s_helm_conn.py b/n2vc/tests/unit/test_k8s_helm_conn.py deleted file mode 100644 index 161471a..0000000 --- a/n2vc/tests/unit/test_k8s_helm_conn.py +++ /dev/null @@ -1,740 +0,0 @@ -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# For those usages not covered by the Apache License, Version 2.0 please -# contact: alfonso.tiernosepulveda@telefonica.com -## - -import asynctest -import logging - -from asynctest.mock import Mock -from osm_common.dbmemory import DbMemory -from osm_common.fslocal import FsLocal -from n2vc.k8s_helm_conn import K8sHelmConnector - -__author__ = "Isabel Lloret " - - -class TestK8sHelmConn(asynctest.TestCase): - logging.basicConfig(level=logging.DEBUG) - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - - async def setUp(self): - self.db = Mock(DbMemory()) - self.fs = asynctest.Mock(FsLocal()) - self.fs.path = "./tmp/" - self.namespace = "testk8s" - self.service_account = "osm" - self.cluster_id = "helm_cluster_id" - self.cluster_uuid = self.cluster_id - # pass fake kubectl and helm commands to make sure it does not call actual commands - K8sHelmConnector._check_file_exists = asynctest.Mock(return_value=True) - K8sHelmConnector._local_async_exec = asynctest.CoroutineMock( - return_value=(0, "") - ) - cluster_dir = self.fs.path + self.cluster_id - self.kube_config = self.fs.path + self.cluster_id + "/.kube/config" - self.helm_home = self.fs.path + self.cluster_id + "/.helm" - self.env = { - "HELM_HOME": "{}/.helm".format(cluster_dir), - "KUBECONFIG": "{}/.kube/config".format(cluster_dir), - } - self.helm_conn = K8sHelmConnector(self.fs, self.db, log=self.logger) - self.logger.debug("Set up executed") - - @asynctest.fail_on(active_handles=True) - async def test_init_env(self): - # TODO - pass - - @asynctest.fail_on(active_handles=True) - async def test_repo_add(self): - repo_name = "bitnami" - repo_url = "https://charts.bitnami.com/bitnami" - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn.repo_add(self.cluster_uuid, repo_name, repo_url) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - self.assertEqual( - self.helm_conn._local_async_exec.call_count, - 2, - "local_async_exec expected 2 calls, called {}".format( - self.helm_conn._local_async_exec.call_count - ), - ) - - repo_update_command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo update {}" - ).format(repo_name) - repo_add_command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo add {} {}" - ).format(repo_name, repo_url) - calls = self.helm_conn._local_async_exec.call_args_list - call0_kargs = calls[0][1] - self.assertEqual( - call0_kargs.get("command"), - repo_add_command, - "Invalid repo add command: {}".format(call0_kargs.get("command")), - ) - self.assertEqual( - call0_kargs.get("env"), - self.env, - "Invalid env for add command: {}".format(call0_kargs.get("env")), - ) - call1_kargs = calls[1][1] - self.assertEqual( - call1_kargs.get("command"), - repo_update_command, - "Invalid repo update command: {}".format(call1_kargs.get("command")), - ) - self.assertEqual( - call1_kargs.get("env"), - self.env, - "Invalid env for update command: {}".format(call1_kargs.get("env")), - ) - - @asynctest.fail_on(active_handles=True) - async def test_repo_list(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn.repo_list(self.cluster_uuid) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo list --output yaml" - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_repo_remove(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - repo_name = "bitnami" - await self.helm_conn.repo_remove(self.cluster_uuid, repo_name) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm repo remove {}".format( - repo_name - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_install(self): - kdu_model = "stable/openldap:1.2.2" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.generate_kdu_instance_name = Mock(return_value=kdu_instance) - - await self.helm_conn.install( - self.cluster_uuid, - kdu_model, - kdu_instance, - atomic=True, - namespace=self.namespace, - db_dict=db_dict, - ) - - self.helm_conn.fs.sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="install", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm install " - "--atomic --output yaml --timeout 300 " - "--name=stable-openldap-0005399828 --namespace testk8s stable/openldap " - "--version 1.2.2" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_upgrade_force_true(self): - kdu_model = "stable/openldap:1.2.3" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.2", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 1, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - # TEST-1 (--force true) - await self.helm_conn.upgrade( - self.cluster_uuid, - kdu_instance, - kdu_model, - atomic=True, - db_dict=db_dict, - force=True, - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="upgrade", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace testk8s " - "--atomic --output yaml --timeout 300 --force --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - # TEST-2 (--force false) - await self.helm_conn.upgrade( - self.cluster_uuid, - kdu_instance, - kdu_model, - atomic=True, - db_dict=db_dict, - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="upgrade", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace testk8s " - "--atomic --output yaml --timeout 300 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_upgrade_namespace(self): - kdu_model = "stable/openldap:1.2.3" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.2", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 1, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - await self.helm_conn.upgrade( - self.cluster_uuid, - kdu_instance, - kdu_model, - atomic=True, - db_dict=db_dict, - namespace="default", - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_has_calls( - [ - asynctest.call(from_path=self.cluster_id), - asynctest.call(from_path=self.cluster_id), - ] - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace="default", - db_dict=db_dict, - operation="upgrade", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm upgrade --namespace default " - "--atomic --output yaml --timeout 300 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_scale(self): - kdu_model = "stable/openldap:1.2.3" - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.3", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 1, - "status": "DEPLOYED", - } - repo_list = [ - { - "name": "stable", - "url": "https://kubernetes-charts.storage.googleapis.com/", - } - ] - kdu_values = """ - # Default values for openldap. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - replicaCount: 1 - dummy-app: - replicas: 2 - """ - - self.helm_conn.repo_list = asynctest.CoroutineMock(return_value=repo_list) - self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values) - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - # TEST-1 - await self.helm_conn.scale( - kdu_instance, - 2, - "", - kdu_model=kdu_model, - cluster_uuid=self.cluster_uuid, - atomic=True, - db_dict=db_dict, - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " - "/usr/bin/helm upgrade --namespace testk8s --atomic --output yaml --set replicaCount=2 " - "--timeout 1800 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - # TEST-2 - await self.helm_conn.scale( - kdu_instance, - 3, - "dummy-app", - kdu_model=kdu_model, - cluster_uuid=self.cluster_uuid, - atomic=True, - db_dict=db_dict, - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " - "/usr/bin/helm upgrade --namespace testk8s --atomic --output yaml --set dummy-app.replicas=3 " - "--timeout 1800 --reuse-values stable-openldap-0005399828 stable/openldap " - "--version 1.2.3" - ) - self.helm_conn._local_async_exec.assert_called_with( - command=command, env=self.env, raise_exception_on_error=False - ) - self.helm_conn.fs.reverse_sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="scale", - ) - - @asynctest.fail_on(active_handles=True) - async def test_rollback(self): - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - instance_info = { - "chart": "openldap-1.2.3", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 2, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - await self.helm_conn.rollback( - self.cluster_uuid, kdu_instance=kdu_instance, revision=1, db_dict=db_dict - ) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - self.helm_conn._store_status.assert_called_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="rollback", - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config " - "/usr/bin/helm rollback stable-openldap-0005399828 1 --wait" - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=False - ) - - @asynctest.fail_on(active_handles=True) - async def test_uninstall(self): - kdu_instance = "stable-openldap-0005399828" - instance_info = { - "chart": "openldap-1.2.2", - "name": kdu_instance, - "namespace": self.namespace, - "revision": 3, - "status": "DEPLOYED", - } - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - self.helm_conn._store_status = asynctest.CoroutineMock() - self.helm_conn.get_instance_info = asynctest.CoroutineMock( - return_value=instance_info - ) - - await self.helm_conn.uninstall(self.cluster_uuid, kdu_instance) - self.helm_conn.fs.sync.assert_called_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm delete --purge {}".format( - kdu_instance - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_get_services(self): - kdu_instance = "test_services_1" - service = {"name": "testservice", "type": "LoadBalancer"} - self.helm_conn._local_async_exec_pipe = asynctest.CoroutineMock( - return_value=("", 0) - ) - self.helm_conn._parse_services = Mock(return_value=["testservice"]) - self.helm_conn._get_service = asynctest.CoroutineMock(return_value=service) - - services = await self.helm_conn.get_services( - self.cluster_uuid, kdu_instance, self.namespace - ) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - self.helm_conn._parse_services.assert_called_once() - command1 = "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get manifest {} ".format( - kdu_instance - ) - command2 = "/usr/bin/kubectl get --namespace={} -f -".format(self.namespace) - self.helm_conn._local_async_exec_pipe.assert_called_once_with( - command1, command2, env=self.env, raise_exception_on_error=True - ) - self.assertEqual( - services, [service], "Invalid service returned from get_service" - ) - - @asynctest.fail_on(active_handles=True) - async def test_get_service(self): - service_name = "service1" - - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - await self.helm_conn.get_service( - self.cluster_uuid, service_name, self.namespace - ) - - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = ( - "/usr/bin/kubectl --kubeconfig=./tmp/helm_cluster_id/.kube/config " - "--namespace=testk8s get service service1 -o=yaml" - ) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_inspect_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_model = "stable/openldap:1.2.4" - repo_url = "https://kubernetes-charts.storage.googleapis.com/" - await self.helm_conn.inspect_kdu(kdu_model, repo_url) - - command = ( - "/usr/bin/helm inspect openldap --repo " - "https://kubernetes-charts.storage.googleapis.com/ " - "--version 1.2.4" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_help_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_model = "stable/openldap:1.2.4" - repo_url = "https://kubernetes-charts.storage.googleapis.com/" - await self.helm_conn.help_kdu(kdu_model, repo_url) - - command = ( - "/usr/bin/helm inspect readme openldap --repo " - "https://kubernetes-charts.storage.googleapis.com/ " - "--version 1.2.4" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_values_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_model = "stable/openldap:1.2.4" - repo_url = "https://kubernetes-charts.storage.googleapis.com/" - await self.helm_conn.values_kdu(kdu_model, repo_url) - - command = ( - "/usr/bin/helm inspect values openldap --repo " - "https://kubernetes-charts.storage.googleapis.com/ " - "--version 1.2.4" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_get_values_kdu(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - kdu_instance = "stable-openldap-0005399828" - await self.helm_conn.get_values_kdu( - kdu_instance, self.namespace, self.env["KUBECONFIG"] - ) - - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm get values " - "stable-openldap-0005399828 --output yaml" - ) - self.helm_conn._local_async_exec.assert_called_with(command=command) - - @asynctest.fail_on(active_handles=True) - async def test_instances_list(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn.instances_list(self.cluster_uuid) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.reverse_sync.assert_called_once_with( - from_path=self.cluster_id - ) - command = "/usr/bin/helm list --output yaml" - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, env=self.env, raise_exception_on_error=True - ) - - @asynctest.fail_on(active_handles=True) - async def test_status_kdu(self): - kdu_instance = "stable-openldap-0005399828" - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn._status_kdu( - self.cluster_id, kdu_instance, self.namespace, yaml_format=True - ) - command = ( - "env KUBECONFIG=./tmp/helm_cluster_id/.kube/config /usr/bin/helm status {} --output yaml" - ).format(kdu_instance) - self.helm_conn._local_async_exec.assert_called_once_with( - command=command, - env=self.env, - raise_exception_on_error=True, - show_error_log=False, - ) - - @asynctest.fail_on(active_handles=True) - async def test_store_status(self): - kdu_instance = "stable-openldap-0005399828" - db_dict = {} - status = { - "info": { - "description": "Install complete", - "status": { - "code": "1", - "notes": "The openldap helm chart has been installed", - }, - } - } - self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=status) - self.helm_conn.write_app_status_to_db = asynctest.CoroutineMock( - return_value=status - ) - - await self.helm_conn._store_status( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - db_dict=db_dict, - operation="install", - ) - self.helm_conn._status_kdu.assert_called_once_with( - cluster_id=self.cluster_id, - kdu_instance=kdu_instance, - namespace=self.namespace, - yaml_format=False, - ) - self.helm_conn.write_app_status_to_db.assert_called_once_with( - db_dict=db_dict, - status="Install complete", - detailed_status=str(status), - operation="install", - ) - - @asynctest.fail_on(active_handles=True) - async def test_reset_uninstall_false(self): - self.helm_conn._uninstall_sw = asynctest.CoroutineMock() - - await self.helm_conn.reset(self.cluster_uuid, force=False, uninstall_sw=False) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.file_delete.assert_called_once_with( - self.cluster_id, ignore_non_exist=True - ) - self.helm_conn._uninstall_sw.assert_not_called() - - @asynctest.fail_on(active_handles=True) - async def test_reset_uninstall(self): - kdu_instance = "stable-openldap-0021099429" - instances = [ - { - "app_version": "2.4.48", - "chart": "openldap-1.2.3", - "name": kdu_instance, - "namespace": self.namespace, - "revision": "1", - "status": "deployed", - "updated": "2020-10-30 11:11:20.376744191 +0000 UTC", - } - ] - self.helm_conn._get_namespace = Mock(return_value=self.namespace) - self.helm_conn._uninstall_sw = asynctest.CoroutineMock() - self.helm_conn.instances_list = asynctest.CoroutineMock(return_value=instances) - self.helm_conn.uninstall = asynctest.CoroutineMock() - - await self.helm_conn.reset(self.cluster_uuid, force=True, uninstall_sw=True) - self.helm_conn.fs.sync.assert_called_once_with(from_path=self.cluster_id) - self.helm_conn.fs.file_delete.assert_called_once_with( - self.cluster_id, ignore_non_exist=True - ) - self.helm_conn._get_namespace.assert_called_once_with( - cluster_uuid=self.cluster_uuid - ) - self.helm_conn.instances_list.assert_called_once_with( - cluster_uuid=self.cluster_uuid - ) - self.helm_conn.uninstall.assert_called_once_with( - cluster_uuid=self.cluster_uuid, kdu_instance=kdu_instance - ) - self.helm_conn._uninstall_sw.assert_called_once_with( - cluster_id=self.cluster_id, namespace=self.namespace - ) - - @asynctest.fail_on(active_handles=True) - async def test_uninstall_sw_namespace(self): - self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) - - await self.helm_conn._uninstall_sw(self.cluster_id, self.namespace) - calls = self.helm_conn._local_async_exec.call_args_list - self.assertEqual( - len(calls), 3, "To uninstall should have executed three commands" - ) - call0_kargs = calls[0][1] - command_0 = "/usr/bin/helm --kubeconfig={} --home={} reset".format( - self.kube_config, self.helm_home - ) - self.assertEqual( - call0_kargs, - {"command": command_0, "raise_exception_on_error": True, "env": self.env}, - "Invalid args for first call to local_exec", - ) - call1_kargs = calls[1][1] - command_1 = ( - "/usr/bin/kubectl --kubeconfig={} delete " - "clusterrolebinding.rbac.authorization.k8s.io/osm-tiller-cluster-rule".format( - self.kube_config - ) - ) - self.assertEqual( - call1_kargs, - {"command": command_1, "raise_exception_on_error": False, "env": self.env}, - "Invalid args for second call to local_exec", - ) - call2_kargs = calls[2][1] - command_2 = ( - "/usr/bin/kubectl --kubeconfig={} --namespace {} delete " - "serviceaccount/{}".format( - self.kube_config, self.namespace, self.service_account - ) - ) - self.assertEqual( - call2_kargs, - {"command": command_2, "raise_exception_on_error": False, "env": self.env}, - "Invalid args for third call to local_exec", - ) -- 2.17.1 From 1c1a25631024278b7caeb6a1dde34d5de326df6c Mon Sep 17 00:00:00 2001 From: Gabriel Cuba Date: Mon, 20 Nov 2023 01:08:39 -0500 Subject: [PATCH 15/16] Feature 10997: Adds helm OCI registry login Change-Id: I1bc12bdf52f082900c3388d03c31e52841017b94 Signed-off-by: Gabriel Cuba --- n2vc/k8s_helm3_conn.py | 4 +- n2vc/k8s_helm_base_conn.py | 111 ++++++++++++++++--------- n2vc/tests/unit/test_k8s_helm3_conn.py | 14 +++- 3 files changed, 85 insertions(+), 44 deletions(-) diff --git a/n2vc/k8s_helm3_conn.py b/n2vc/k8s_helm3_conn.py index 675c851..14f7fe0 100644 --- a/n2vc/k8s_helm3_conn.py +++ b/n2vc/k8s_helm3_conn.py @@ -358,8 +358,8 @@ class K8sHelm3Connector(K8sHelmBaseConnector): Args: show_command: the second part of the command (`helm show `) - kdu_model: The name or path of an Helm Chart - repo_url: Helm Chart repository url + kdu_model: The name or path of a Helm Chart + repo_str: Helm Chart repository url version: constraint with specific version of the Chart to use Returns: diff --git a/n2vc/k8s_helm_base_conn.py b/n2vc/k8s_helm_base_conn.py index 383ce7d..5f004b3 100644 --- a/n2vc/k8s_helm_base_conn.py +++ b/n2vc/k8s_helm_base_conn.py @@ -31,6 +31,7 @@ import stat import os import yaml from uuid import uuid4 +from urllib.parse import urlparse from n2vc.config import EnvironConfig from n2vc.exceptions import K8sException @@ -165,6 +166,7 @@ class K8sHelmBaseConnector(K8sConnector): cert: str = None, user: str = None, password: str = None, + oci: bool = False, ): self.log.debug( "Cluster {}, adding {} repository {}. URL: {}".format( @@ -180,10 +182,23 @@ class K8sHelmBaseConnector(K8sConnector): # sync local dir self.fs.sync(from_path=cluster_uuid) - # helm repo add name url - command = ("env KUBECONFIG={} {} repo add {} {}").format( - paths["kube_config"], self._helm_command, quote(name), quote(url) - ) + if oci: + if user and password: + host_port = urlparse(url).netloc if url.startswith("oci://") else url + # helm registry login url + command = "env KUBECONFIG={} {} registry login {}".format( + paths["kube_config"], self._helm_command, quote(host_port) + ) + else: + self.log.debug( + "OCI registry login is not needed for repo: {}".format(name) + ) + return + else: + # helm repo add name url + command = "env KUBECONFIG={} {} repo add {} {}".format( + paths["kube_config"], self._helm_command, quote(name), quote(url) + ) if cert: temp_cert_file = os.path.join( @@ -205,14 +220,15 @@ class K8sHelmBaseConnector(K8sConnector): command=command, raise_exception_on_error=True, env=env ) - # helm repo update - command = "env KUBECONFIG={} {} repo update {}".format( - paths["kube_config"], self._helm_command, quote(name) - ) - self.log.debug("updating repo: {}".format(command)) - await self._local_async_exec( - command=command, raise_exception_on_error=False, env=env - ) + if not oci: + # helm repo update + command = "env KUBECONFIG={} {} repo update {}".format( + paths["kube_config"], self._helm_command, quote(name) + ) + self.log.debug("updating repo: {}".format(command)) + await self._local_async_exec( + command=command, raise_exception_on_error=False, env=env + ) # sync fs self.fs.reverse_sync(from_path=cluster_uuid) @@ -379,6 +395,11 @@ class K8sHelmBaseConnector(K8sConnector): def _is_helm_chart_a_file(self, chart_name: str): return chart_name.count("/") > 1 + @staticmethod + def _is_helm_chart_a_url(chart_name: str): + result = urlparse(chart_name) + return all([result.scheme, result.netloc]) + async def _install_impl( self, cluster_id: str, @@ -403,12 +424,7 @@ class K8sHelmBaseConnector(K8sConnector): cluster_id=cluster_id, params=params ) - # version - kdu_model, version = self._split_version(kdu_model) - - _, repo = self._split_repo(kdu_model) - if repo: - await self.repo_update(cluster_id, repo) + kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_id) command = self._get_install_command( kdu_model, @@ -512,12 +528,7 @@ class K8sHelmBaseConnector(K8sConnector): cluster_id=cluster_uuid, params=params ) - # version - kdu_model, version = self._split_version(kdu_model) - - _, repo = self._split_repo(kdu_model) - if repo: - await self.repo_update(cluster_uuid, repo) + kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid) command = self._get_upgrade_command( kdu_model, @@ -645,7 +656,7 @@ class K8sHelmBaseConnector(K8sConnector): ) # version - kdu_model, version = self._split_version(kdu_model) + kdu_model, version = await self._prepare_helm_chart(kdu_model, cluster_uuid) repo_url = await self._find_repo(kdu_model, cluster_uuid) @@ -1198,19 +1209,15 @@ class K8sHelmBaseConnector(K8sConnector): # add repo self.log.debug("add repo {}".format(db_repo["name"])) - if "ca_cert" in db_repo: - await self.repo_add( - cluster_uuid, - db_repo["name"], - db_repo["url"], - cert=db_repo["ca_cert"], - ) - else: - await self.repo_add( - cluster_uuid, - db_repo["name"], - db_repo["url"], - ) + await self.repo_add( + cluster_uuid, + db_repo["name"], + db_repo["url"], + cert=db_repo.get("ca_cert"), + user=db_repo.get("user"), + password=db_repo.get("password"), + oci=db_repo.get("oci", False), + ) added_repo_dict[repo_id] = db_repo["name"] except Exception as e: raise K8sException( @@ -2037,7 +2044,13 @@ class K8sHelmBaseConnector(K8sConnector): def _split_version(self, kdu_model: str) -> tuple[str, str]: version = None - if not self._is_helm_chart_a_file(kdu_model) and ":" in kdu_model: + if ( + not ( + self._is_helm_chart_a_file(kdu_model) + or self._is_helm_chart_a_url(kdu_model) + ) + and ":" in kdu_model + ): parts = kdu_model.split(sep=":") if len(parts) == 2: version = str(parts[1]) @@ -2060,7 +2073,7 @@ class K8sHelmBaseConnector(K8sConnector): repo_name = None idx = kdu_model.find("/") - if idx >= 0: + if not self._is_helm_chart_a_url(kdu_model) and idx >= 0: chart_name = kdu_model[idx + 1 :] repo_name = kdu_model[:idx] @@ -2090,6 +2103,24 @@ class K8sHelmBaseConnector(K8sConnector): return repo_url + def _repo_to_oci_url(self, repo): + db_repo = self.db.get_one("k8srepos", {"name": repo}, fail_on_empty=False) + if db_repo and "oci" in db_repo: + return db_repo.get("url") + + async def _prepare_helm_chart(self, kdu_model, cluster_id): + # e.g.: "stable/openldap", "1.0" + kdu_model, version = self._split_version(kdu_model) + # e.g.: "openldap, stable" + chart_name, repo = self._split_repo(kdu_model) + if repo and chart_name: # repo/chart case + oci_url = self._repo_to_oci_url(repo) + if oci_url: # oci does not require helm repo update + kdu_model = f"{oci_url.rstrip('/')}/{chart_name.lstrip('/')}" # urljoin doesn't work for oci schema + else: + await self.repo_update(cluster_id, repo) + return kdu_model, version + async def create_certificate( self, cluster_uuid, namespace, dns_prefix, name, secret_name, usage ): diff --git a/n2vc/tests/unit/test_k8s_helm3_conn.py b/n2vc/tests/unit/test_k8s_helm3_conn.py index a2e75e1..bddfddd 100644 --- a/n2vc/tests/unit/test_k8s_helm3_conn.py +++ b/n2vc/tests/unit/test_k8s_helm3_conn.py @@ -172,6 +172,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) self.helm_conn._status_kdu = asynctest.CoroutineMock(return_value=None) self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn._repo_to_oci_url = Mock(return_value=None) self.kdu_instance = "stable-openldap-0005399828" self.helm_conn.generate_kdu_instance_name = Mock(return_value=self.kdu_instance) self.helm_conn._get_namespaces = asynctest.CoroutineMock(return_value=[]) @@ -266,6 +267,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): } self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn._repo_to_oci_url = Mock(return_value=None) self.helm_conn.get_instance_info = asynctest.CoroutineMock( return_value=instance_info ) @@ -348,6 +350,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): } self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn._repo_to_oci_url = Mock(return_value=None) self.helm_conn.get_instance_info = asynctest.CoroutineMock( return_value=instance_info ) @@ -416,6 +419,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): self.helm_conn.values_kdu = asynctest.CoroutineMock(return_value=kdu_values) self.helm_conn._local_async_exec = asynctest.CoroutineMock(return_value=("", 0)) self.helm_conn._store_status = asynctest.CoroutineMock() + self.helm_conn._repo_to_oci_url = Mock(return_value=None) self.helm_conn.get_instance_info = asynctest.CoroutineMock( return_value=instance_info ) @@ -436,7 +440,7 @@ class TestK8sHelm3Conn(asynctest.TestCase): "--namespace testk8s --atomic --output yaml --set replicaCount=2 --timeout 1800s " "--reuse-values --version 1.2.3" ) - self.helm_conn._local_async_exec.assert_called_once_with( + self.helm_conn._local_async_exec.assert_called_with( command=command, env=self.env, raise_exception_on_error=False ) # TEST-2 @@ -798,7 +802,13 @@ class TestK8sHelm3Conn(asynctest.TestCase): ) self.helm_conn.repo_remove.assert_not_called() self.helm_conn.repo_add.assert_called_once_with( - self.cluster_uuid, "bitnami", "https://charts.bitnami.com/bitnami" + self.cluster_uuid, + "bitnami", + "https://charts.bitnami.com/bitnami", + cert=None, + user=None, + password=None, + oci=False, ) self.assertEqual(deleted_repo_list, [], "Deleted repo list should be empty") self.assertEqual( -- 2.17.1 From f5b7f4147a9d0ebff0a35de08ae9a9c22ee87de8 Mon Sep 17 00:00:00 2001 From: garciadeblas Date: Mon, 29 Jan 2024 18:26:53 +0100 Subject: [PATCH 16/16] Pin black version in tox.ini to 23.12.1 Change-Id: Ica4b7a2ec310cffa116f319818f755c5062f0787 Signed-off-by: garciadeblas --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 704aa6c..63aaf7a 100644 --- a/tox.ini +++ b/tox.ini @@ -31,7 +31,7 @@ deps = -r{toxinidir}/requirements.txt ####################################################################################### [testenv:black] -deps = black +deps = black==23.12.1 skip_install = true commands = black --check --diff n2vc/ -- 2.17.1