From: David Garcia Date: Thu, 16 Jul 2020 15:53:20 +0000 (+0200) Subject: Fix an issue with the service ports in Kubectl.py X-Git-Tag: v8.0.0rc3~2 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=refs%2Fchanges%2F38%2F9438%2F7;p=osm%2FN2VC.git Fix an issue with the service ports in Kubectl.py The ports were retrieved as V1ServicePort classes, and LCM cannot store that in the database Change-Id: I451ab65478f36c5cb7f33594b4454bd315c22878 Signed-off-by: David Garcia --- diff --git a/n2vc/k8s_juju_conn.py b/n2vc/k8s_juju_conn.py index a0bf3cf..1de44cd 100644 --- a/n2vc/k8s_juju_conn.py +++ b/n2vc/k8s_juju_conn.py @@ -124,79 +124,66 @@ class K8sJujuConnector(K8sConnector): # reuse_cluster_uuid, e.g. to try to fix it. # ################################################### - if not reuse_cluster_uuid: - # This is a new cluster, so bootstrap it + # This is a new cluster, so bootstrap it - cluster_uuid = str(uuid.uuid4()) + cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4()) - # Is a local k8s cluster? - localk8s = self.is_local_k8s(k8s_creds) + # Is a local k8s cluster? + localk8s = self.is_local_k8s(k8s_creds) - # If the k8s is external, the juju controller needs a loadbalancer - loadbalancer = False if localk8s else True + # If the k8s is external, the juju controller needs a loadbalancer + loadbalancer = False if localk8s else True - # Name the new k8s cloud - k8s_cloud = "k8s-{}".format(cluster_uuid) + # Name the new k8s cloud + k8s_cloud = "k8s-{}".format(cluster_uuid) - self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) - await self.add_k8s(k8s_cloud, k8s_creds) + self.log.debug("Adding k8s cloud {}".format(k8s_cloud)) + await self.add_k8s(k8s_cloud, k8s_creds) - # Bootstrap Juju controller - self.log.debug("Bootstrapping...") - await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) - self.log.debug("Bootstrap done.") + # Bootstrap Juju controller + self.log.debug("Bootstrapping...") + await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer) + self.log.debug("Bootstrap done.") - # Get the controller information + # Get the controller information - # Parse ~/.local/share/juju/controllers.yaml - # controllers.testing.api-endpoints|ca-cert|uuid - self.log.debug("Getting controller endpoints") - with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f: - controllers = yaml.load(f, Loader=yaml.Loader) - controller = controllers["controllers"][cluster_uuid] - endpoints = controller["api-endpoints"] - self.juju_endpoint = endpoints[0] - self.juju_ca_cert = controller["ca-cert"] + # Parse ~/.local/share/juju/controllers.yaml + # controllers.testing.api-endpoints|ca-cert|uuid + self.log.debug("Getting controller endpoints") + with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f: + controllers = yaml.load(f, Loader=yaml.Loader) + controller = controllers["controllers"][cluster_uuid] + endpoints = controller["api-endpoints"] + self.juju_endpoint = endpoints[0] + self.juju_ca_cert = controller["ca-cert"] - # Parse ~/.local/share/juju/accounts - # controllers.testing.user|password - self.log.debug("Getting accounts") - with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f: - controllers = yaml.load(f, Loader=yaml.Loader) - controller = controllers["controllers"][cluster_uuid] + # Parse ~/.local/share/juju/accounts + # controllers.testing.user|password + self.log.debug("Getting accounts") + with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f: + controllers = yaml.load(f, Loader=yaml.Loader) + controller = controllers["controllers"][cluster_uuid] - self.juju_user = controller["user"] - self.juju_secret = controller["password"] + self.juju_user = controller["user"] + self.juju_secret = controller["password"] - # raise Exception("EOL") + # raise Exception("EOL") - self.juju_public_key = None - - config = { - "endpoint": self.juju_endpoint, - "username": self.juju_user, - "secret": self.juju_secret, - "cacert": self.juju_ca_cert, - "namespace": namespace, - "loadbalancer": loadbalancer, - } - - # Store the cluster configuration so it - # can be used for subsequent calls - self.log.debug("Setting config") - await self.set_config(cluster_uuid, config) - - else: - # This is an existing cluster, so get its config - cluster_uuid = reuse_cluster_uuid - - config = self.get_config(cluster_uuid) + self.juju_public_key = None - self.juju_endpoint = config["endpoint"] - self.juju_user = config["username"] - self.juju_secret = config["secret"] - self.juju_ca_cert = config["cacert"] - self.juju_public_key = None + config = { + "endpoint": self.juju_endpoint, + "username": self.juju_user, + "secret": self.juju_secret, + "cacert": self.juju_ca_cert, + "namespace": namespace, + "loadbalancer": loadbalancer, + } + + # Store the cluster configuration so it + # can be used for subsequent calls + self.log.debug("Setting config") + await self.set_config(cluster_uuid, config) # Login to the k8s cluster if not self.authenticated: diff --git a/n2vc/kubectl.py b/n2vc/kubectl.py index 5836756..9d4ce57 100644 --- a/n2vc/kubectl.py +++ b/n2vc/kubectl.py @@ -37,7 +37,16 @@ class Kubectl: "name": i.metadata.name, "cluster_ip": i.spec.cluster_ip, "type": i.spec.type, - "ports": i.spec.ports, + "ports": [ + { + "name": p.name, + "node_port": p.node_port, + "port": p.port, + "protocol": p.protocol, + "target_port": p.target_port, + } + for p in i.spec.ports + ], "external_ip": [i.ip for i in i.status.load_balancer.ingress] if i.status.load_balancer.ingress else None, diff --git a/n2vc/tests/unit/test_kubectl.py b/n2vc/tests/unit/test_kubectl.py index 8d57975..dfb954a 100644 --- a/n2vc/tests/unit/test_kubectl.py +++ b/n2vc/tests/unit/test_kubectl.py @@ -34,13 +34,15 @@ fake_list_services = Dict( "cluster_ip": "10.152.183.79", "type": "LoadBalancer", "ports": [ - { - "name": None, - "node_port": None, - "port": 30666, - "protocol": "TCP", - "target_port": 30666, - } + Dict( + { + "name": None, + "node_port": None, + "port": 30666, + "protocol": "TCP", + "target_port": 30666, + } + ) ], } ),