blob: 5b158d7b7e0869716f42a6f54078c05a936b7c3f [file] [log] [blame]
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import uuid
import yaml
import tempfile
import binascii
import base64
from n2vc.config import ModelConfig
from n2vc.exceptions import K8sException, N2VCBadArgumentsException
from n2vc.k8s_conn import K8sConnector
from n2vc.kubectl import Kubectl, CORE_CLIENT, RBAC_CLIENT
from .exceptions import MethodNotImplemented
from n2vc.utils import base64_to_cacert
from n2vc.libjuju import Libjuju
from kubernetes.client.models import (
V1ClusterRole,
V1ObjectMeta,
V1PolicyRule,
V1ServiceAccount,
V1ClusterRoleBinding,
V1RoleRef,
V1Subject,
)
from typing import Dict
SERVICE_ACCOUNT_TOKEN_KEY = "token"
SERVICE_ACCOUNT_ROOT_CA_KEY = "ca.crt"
RBAC_LABEL_KEY_NAME = "rbac-id"
ADMIN_NAMESPACE = "kube-system"
RBAC_STACK_PREFIX = "juju-credential"
# from juju.bundle import BundleHandler
# import re
# import ssl
# from .vnf import N2VC
def generate_rbac_id():
return binascii.hexlify(os.urandom(4)).decode()
class K8sJujuConnector(K8sConnector):
def __init__(
self,
fs: object,
db: object,
kubectl_command: str = "/usr/bin/kubectl",
juju_command: str = "/usr/bin/juju",
log: object = None,
loop: object = None,
on_update_db=None,
vca_config: dict = None,
):
"""
:param fs: file system for kubernetes and helm configuration
:param db: Database object
:param kubectl_command: path to kubectl executable
:param helm_command: path to helm executable
:param log: logger
:param: loop: Asyncio loop
"""
# parent class
K8sConnector.__init__(
self,
db,
log=log,
on_update_db=on_update_db,
)
self.fs = fs
self.loop = loop or asyncio.get_event_loop()
self.log.debug("Initializing K8S Juju connector")
required_vca_config = [
"host",
"user",
"secret",
"ca_cert",
]
if not vca_config or not all(k in vca_config for k in required_vca_config):
raise N2VCBadArgumentsException(
message="Missing arguments in vca_config: {}".format(vca_config),
bad_args=required_vca_config,
)
port = vca_config["port"] if "port" in vca_config else 17070
url = "{}:{}".format(vca_config["host"], port)
model_config = ModelConfig(vca_config)
username = vca_config["user"]
secret = vca_config["secret"]
ca_cert = base64_to_cacert(vca_config["ca_cert"])
self.libjuju = Libjuju(
endpoint=url,
api_proxy=None, # Not needed for k8s charms
model_config=model_config,
username=username,
password=secret,
cacert=ca_cert,
loop=self.loop,
log=self.log,
db=self.db,
)
self.log.debug("K8S Juju connector initialized")
# TODO: Remove these commented lines:
# self.authenticated = False
# self.models = {}
# self.juju_secret = ""
"""Initialization"""
async def init_env(
self,
k8s_creds: str,
namespace: str = "kube-system",
reuse_cluster_uuid: str = None,
) -> (str, bool):
"""
It prepares a given K8s cluster environment to run Juju bundles.
:param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
'.kube/config'
:param namespace: optional namespace to be used for juju. By default,
'kube-system' will be used
:param reuse_cluster_uuid: existing cluster uuid for reuse
:return: uuid of the K8s cluster and True if connector has installed some
software in the cluster
(on error, an exception will be raised)
"""
# """Bootstrapping
# Bootstrapping cannot be done, by design, through the API. We need to
# use the CLI tools.
# """
# """
# WIP: Workflow
# 1. Has the environment already been bootstrapped?
# - Check the database to see if we have a record for this env
# 2. If this is a new env, create it
# - Add the k8s cloud to Juju
# - Bootstrap
# - Record it in the database
# 3. Connect to the Juju controller for this cloud
# """
# cluster_uuid = reuse_cluster_uuid
# if not cluster_uuid:
# cluster_uuid = str(uuid4())
##################################################
# TODO: Pull info from db based on the namespace #
##################################################
###################################################
# TODO: Make it idempotent, calling add-k8s and #
# bootstrap whenever reuse_cluster_uuid is passed #
# as parameter #
# `init_env` is called to initialize the K8s #
# cluster for juju. If this initialization fails, #
# it can be called again by LCM with the param #
# reuse_cluster_uuid, e.g. to try to fix it. #
###################################################
# This is a new cluster, so bootstrap it
cluster_uuid = reuse_cluster_uuid or str(uuid.uuid4())
# Is a local k8s cluster?
# localk8s = self.is_local_k8s(k8s_creds)
# If the k8s is external, the juju controller needs a loadbalancer
# loadbalancer = False if localk8s else True
# Name the new k8s cloud
# k8s_cloud = "k8s-{}".format(cluster_uuid)
# self.log.debug("Adding k8s cloud {}".format(k8s_cloud))
# await self.add_k8s(k8s_cloud, k8s_creds)
# Bootstrap Juju controller
# self.log.debug("Bootstrapping...")
# await self.bootstrap(k8s_cloud, cluster_uuid, loadbalancer)
# self.log.debug("Bootstrap done.")
# Get the controller information
# Parse ~/.local/share/juju/controllers.yaml
# controllers.testing.api-endpoints|ca-cert|uuid
# self.log.debug("Getting controller endpoints")
# with open(os.path.expanduser("~/.local/share/juju/controllers.yaml")) as f:
# controllers = yaml.load(f, Loader=yaml.Loader)
# controller = controllers["controllers"][cluster_uuid]
# endpoints = controller["api-endpoints"]
# juju_endpoint = endpoints[0]
# juju_ca_cert = controller["ca-cert"]
# Parse ~/.local/share/juju/accounts
# controllers.testing.user|password
# self.log.debug("Getting accounts")
# with open(os.path.expanduser("~/.local/share/juju/accounts.yaml")) as f:
# controllers = yaml.load(f, Loader=yaml.Loader)
# controller = controllers["controllers"][cluster_uuid]
# juju_user = controller["user"]
# juju_secret = controller["password"]
# config = {
# "endpoint": juju_endpoint,
# "username": juju_user,
# "secret": juju_secret,
# "cacert": juju_ca_cert,
# "loadbalancer": loadbalancer,
# }
# Store the cluster configuration so it
# can be used for subsequent calls
kubecfg = tempfile.NamedTemporaryFile()
with open(kubecfg.name, "w") as kubecfg_file:
kubecfg_file.write(k8s_creds)
kubectl = Kubectl(config_file=kubecfg.name)
# CREATING RESOURCES IN K8S
rbac_id = generate_rbac_id()
metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
labels = {RBAC_STACK_PREFIX: rbac_id}
# Create cleanup dictionary to clean up created resources
# if it fails in the middle of the process
cleanup_data = []
try:
self._create_cluster_role(
kubectl,
name=metadata_name,
labels=labels,
)
cleanup_data.append(
{
"delete": self._delete_cluster_role,
"args": (kubectl, metadata_name),
}
)
self._create_service_account(
kubectl,
name=metadata_name,
labels=labels,
)
cleanup_data.append(
{
"delete": self._delete_service_account,
"args": (kubectl, metadata_name),
}
)
self._create_cluster_role_binding(
kubectl,
name=metadata_name,
labels=labels,
)
cleanup_data.append(
{
"delete": self._delete_service_account,
"args": (kubectl, metadata_name),
}
)
token, client_cert_data = await self._get_secret_data(
kubectl,
metadata_name,
)
default_storage_class = kubectl.get_default_storage_class()
await self.libjuju.add_k8s(
name=cluster_uuid,
rbac_id=rbac_id,
token=token,
client_cert_data=client_cert_data,
configuration=kubectl.configuration,
storage_class=default_storage_class,
credential_name=self._get_credential_name(cluster_uuid),
)
# self.log.debug("Setting config")
# await self.set_config(cluster_uuid, config)
# Test connection
# controller = await self.get_controller(cluster_uuid)
# await controller.disconnect()
# TODO: Remove these commented lines
# raise Exception("EOL")
# self.juju_public_key = None
# Login to the k8s cluster
# if not self.authenticated:
# await self.login(cluster_uuid)
# We're creating a new cluster
# print("Getting model {}".format(self.get_namespace(cluster_uuid),
# cluster_uuid=cluster_uuid))
# model = await self.get_model(
# self.get_namespace(cluster_uuid),
# cluster_uuid=cluster_uuid
# )
# Disconnect from the model
# if model and model.is_connected():
# await model.disconnect()
return cluster_uuid, True
except Exception as e:
self.log.error("Error initializing k8scluster: {}".format(e))
if len(cleanup_data) > 0:
self.log.debug("Cleaning up created resources in k8s cluster...")
for item in cleanup_data:
delete_function = item["delete"]
delete_args = item["args"]
delete_function(*delete_args)
self.log.debug("Cleanup finished")
raise e
"""Repo Management"""
async def repo_add(
self,
name: str,
url: str,
_type: str = "charm",
):
raise MethodNotImplemented()
async def repo_list(self):
raise MethodNotImplemented()
async def repo_remove(
self,
name: str,
):
raise MethodNotImplemented()
async def synchronize_repos(self, cluster_uuid: str, name: str):
"""
Returns None as currently add_repo is not implemented
"""
return None
"""Reset"""
async def reset(
self, cluster_uuid: str, force: bool = False, uninstall_sw: bool = False
) -> bool:
"""Reset a cluster
Resets the Kubernetes cluster by removing the model that represents it.
:param cluster_uuid str: The UUID of the cluster to reset
:return: Returns True if successful or raises an exception.
"""
try:
# Remove k8scluster from database
# self.log.debug("[reset] Removing k8scluster from juju database")
# juju_db = self.db.get_one("admin", {"_id": "juju"})
# for k in juju_db["k8sclusters"]:
# if k["_id"] == cluster_uuid:
# juju_db["k8sclusters"].remove(k)
# self.db.set_one(
# table="admin",
# q_filter={"_id": "juju"},
# update_dict={"k8sclusters": juju_db["k8sclusters"]},
# )
# break
# Destroy the controller (via CLI)
# self.log.debug("[reset] Destroying controller")
# await self.destroy_controller(cluster_uuid)
self.log.debug("[reset] Removing k8s cloud")
# k8s_cloud = "k8s-{}".format(cluster_uuid)
# await self.remove_cloud(k8s_cloud)
cloud_creds = await self.libjuju.get_cloud_credentials(
cluster_uuid,
self._get_credential_name(cluster_uuid),
)
await self.libjuju.remove_cloud(cluster_uuid)
kubecfg = self.get_credentials(cluster_uuid=cluster_uuid)
kubecfg_file = tempfile.NamedTemporaryFile()
with open(kubecfg_file.name, "w") as f:
f.write(kubecfg)
kubectl = Kubectl(config_file=kubecfg_file.name)
delete_functions = [
self._delete_cluster_role_binding,
self._delete_service_account,
self._delete_cluster_role,
]
credential_attrs = cloud_creds[0].result["attrs"]
if RBAC_LABEL_KEY_NAME in credential_attrs:
rbac_id = credential_attrs[RBAC_LABEL_KEY_NAME]
metadata_name = "{}-{}".format(RBAC_STACK_PREFIX, rbac_id)
delete_args = (kubectl, metadata_name)
for delete_func in delete_functions:
try:
delete_func(*delete_args)
except Exception as e:
self.log.warning("Cannot remove resource in K8s {}".format(e))
except Exception as e:
self.log.debug("Caught exception during reset: {}".format(e))
raise e
return True
# TODO: Remove these commented lines
# if not self.authenticated:
# await self.login(cluster_uuid)
# if self.controller.is_connected():
# # Destroy the model
# namespace = self.get_namespace(cluster_uuid)
# if await self.has_model(namespace):
# self.log.debug("[reset] Destroying model")
# await self.controller.destroy_model(namespace, destroy_storage=True)
# # Disconnect from the controller
# self.log.debug("[reset] Disconnecting controller")
# await self.logout()
"""Deployment"""
async def install(
self,
cluster_uuid: str,
kdu_model: str,
kdu_instance: str,
atomic: bool = True,
timeout: float = 1800,
params: dict = None,
db_dict: dict = None,
kdu_name: str = None,
namespace: str = None,
) -> bool:
"""Install a bundle
:param cluster_uuid str: The UUID of the cluster to install to
:param kdu_model str: The name or path of a bundle to install
:param kdu_instance: Kdu instance name
:param atomic bool: If set, waits until the model is active and resets
the cluster on failure.
:param timeout int: The time, in seconds, to wait for the install
to finish
:param params dict: Key-value pairs of instantiation parameters
:param kdu_name: Name of the KDU instance to be installed
:param namespace: K8s namespace to use for the KDU instance
:return: If successful, returns ?
"""
bundle = kdu_model
# controller = await self.get_controller(cluster_uuid)
##
# Get or create the model, based on the NS
# uuid.
if not db_dict:
raise K8sException("db_dict must be set")
if not bundle:
raise K8sException("bundle must be set")
if bundle.startswith("cs:"):
pass
elif bundle.startswith("http"):
# Download the file
pass
else:
new_workdir = kdu_model.strip(kdu_model.split("/")[-1])
os.chdir(new_workdir)
bundle = "local:{}".format(kdu_model)
self.log.debug("Checking for model named {}".format(kdu_instance))
# Create the new model
self.log.debug("Adding model: {}".format(kdu_instance))
await self.libjuju.add_model(
model_name=kdu_instance,
cloud_name=cluster_uuid,
credential_name=self._get_credential_name(cluster_uuid),
)
# if model:
# TODO: Instantiation parameters
"""
"Juju bundle that models the KDU, in any of the following ways:
- <juju-repo>/<juju-bundle>
- <juju-bundle folder under k8s_models folder in the package>
- <juju-bundle tgz file (w/ or w/o extension) under k8s_models folder
in the package>
- <URL_where_to_fetch_juju_bundle>
"""
try:
previous_workdir = os.getcwd()
except FileNotFoundError:
previous_workdir = "/app/storage"
self.log.debug("[install] deploying {}".format(bundle))
await self.libjuju.deploy(
bundle, model_name=kdu_instance, wait=atomic, timeout=timeout
)
# Get the application
# if atomic:
# # applications = model.applications
# self.log.debug("[install] Applications: {}".format(model.applications))
# for name in model.applications:
# self.log.debug("[install] Waiting for {} to settle".format(name))
# application = model.applications[name]
# try:
# # It's not enough to wait for all units to be active;
# # the application status needs to be active as well.
# self.log.debug("Waiting for all units to be active...")
# await model.block_until(
# lambda: all(
# unit.agent_status == "idle"
# and application.status in ["active", "unknown"]
# and unit.workload_status in ["active", "unknown"]
# for unit in application.units
# ),
# timeout=timeout,
# )
# self.log.debug("All units active.")
# # TODO use asyncio.TimeoutError
# except concurrent.futures._base.TimeoutError:
# os.chdir(previous_workdir)
# self.log.debug("[install] Timeout exceeded; resetting cluster")
# await self.reset(cluster_uuid)
# return False
# Wait for the application to be active
# if model.is_connected():
# self.log.debug("[install] Disconnecting model")
# await model.disconnect()
# await controller.disconnect()
os.chdir(previous_workdir)
return True
async def instances_list(self, cluster_uuid: str) -> list:
"""
returns a list of deployed releases in a cluster
:param cluster_uuid: the cluster
:return:
"""
return []
async def upgrade(
self,
cluster_uuid: str,
kdu_instance: str,
kdu_model: str = None,
params: dict = None,
) -> str:
"""Upgrade a model
:param cluster_uuid str: The UUID of the cluster to upgrade
:param kdu_instance str: The unique name of the KDU instance
:param kdu_model str: The name or path of the bundle to upgrade to
:param params dict: Key-value pairs of instantiation parameters
:return: If successful, reference to the new revision number of the
KDU instance.
"""
# TODO: Loop through the bundle and upgrade each charm individually
"""
The API doesn't have a concept of bundle upgrades, because there are
many possible changes: charm revision, disk, number of units, etc.
As such, we are only supporting a limited subset of upgrades. We'll
upgrade the charm revision but leave storage and scale untouched.
Scale changes should happen through OSM constructs, and changes to
storage would require a redeployment of the service, at least in this
initial release.
"""
raise MethodNotImplemented()
# TODO: Remove these commented lines
# model = await self.get_model(namespace, cluster_uuid=cluster_uuid)
# model = None
# namespace = self.get_namespace(cluster_uuid)
# controller = await self.get_controller(cluster_uuid)
# try:
# if namespace not in await controller.list_models():
# raise N2VCNotFound(message="Model {} does not exist".format(namespace))
# model = await controller.get_model(namespace)
# with open(kdu_model, "r") as f:
# bundle = yaml.safe_load(f)
# """
# {
# 'description': 'Test bundle',
# 'bundle': 'kubernetes',
# 'applications': {
# 'mariadb-k8s': {
# 'charm': 'cs:~charmed-osm/mariadb-k8s-20',
# 'scale': 1,
# 'options': {
# 'password': 'manopw',
# 'root_password': 'osm4u',
# 'user': 'mano'
# },
# 'series': 'kubernetes'
# }
# }
# }
# """
# # TODO: This should be returned in an agreed-upon format
# for name in bundle["applications"]:
# self.log.debug(model.applications)
# application = model.applications[name]
# self.log.debug(application)
# path = bundle["applications"][name]["charm"]
# try:
# await application.upgrade_charm(switch=path)
# except juju.errors.JujuError as ex:
# if "already running charm" in str(ex):
# # We're already running this version
# pass
# finally:
# if model:
# await model.disconnect()
# await controller.disconnect()
# return True
"""Rollback"""
async def rollback(
self,
cluster_uuid: str,
kdu_instance: str,
revision: int = 0,
) -> str:
"""Rollback a model
:param cluster_uuid str: The UUID of the cluster to rollback
:param kdu_instance str: The unique name of the KDU instance
:param revision int: The revision to revert to. If omitted, rolls back
the previous upgrade.
:return: If successful, returns the revision of active KDU instance,
or raises an exception
"""
raise MethodNotImplemented()
"""Deletion"""
async def uninstall(self, cluster_uuid: str, kdu_instance: str) -> bool:
"""Uninstall a KDU instance
:param cluster_uuid str: The UUID of the cluster
:param kdu_instance str: The unique name of the KDU instance
:return: Returns True if successful, or raises an exception
"""
# controller = await self.get_controller(cluster_uuid)
self.log.debug("[uninstall] Destroying model")
await self.libjuju.destroy_model(kdu_instance, total_timeout=3600)
# self.log.debug("[uninstall] Model destroyed and disconnecting")
# await controller.disconnect()
return True
# TODO: Remove these commented lines
# if not self.authenticated:
# self.log.debug("[uninstall] Connecting to controller")
# await self.login(cluster_uuid)
async def exec_primitive(
self,
cluster_uuid: str = None,
kdu_instance: str = None,
primitive_name: str = None,
timeout: float = 300,
params: dict = None,
db_dict: dict = None,
) -> str:
"""Exec primitive (Juju action)
:param cluster_uuid str: The UUID of the cluster
:param kdu_instance str: The unique name of the KDU instance
:param primitive_name: Name of action that will be executed
:param timeout: Timeout for action execution
:param params: Dictionary of all the parameters needed for the action
:db_dict: Dictionary for any additional data
:return: Returns the output of the action
"""
# controller = await self.get_controller(cluster_uuid)
if not params or "application-name" not in params:
raise K8sException(
"Missing application-name argument, \
argument needed for K8s actions"
)
try:
self.log.debug(
"[exec_primitive] Getting model "
"kdu_instance: {}".format(kdu_instance)
)
application_name = params["application-name"]
actions = await self.libjuju.get_actions(application_name, kdu_instance)
if primitive_name not in actions:
raise K8sException("Primitive {} not found".format(primitive_name))
output, status = await self.libjuju.execute_action(
application_name, kdu_instance, primitive_name, **params
)
# model = await self.get_model(kdu_instance, controller=controller)
# application_name = params["application-name"]
# application = model.applications[application_name]
# actions = await application.get_actions()
# if primitive_name not in actions:
# raise K8sException("Primitive {} not found".format(primitive_name))
# unit = None
# for u in application.units:
# if await u.is_leader_from_status():
# unit = u
# break
# if unit is None:
# raise K8sException("No leader unit found to execute action")
# self.log.debug("[exec_primitive] Running action: {}".format(primitive_name))
# action = await unit.run_action(primitive_name, **params)
# output = await model.get_action_output(action_uuid=action.entity_id)
# status = await model.get_action_status(uuid_or_prefix=action.entity_id)
# status = (
# status[action.entity_id] if action.entity_id in status else "failed"
# )
if status != "completed":
raise K8sException(
"status is not completed: {} output: {}".format(status, output)
)
return output
except Exception as e:
error_msg = "Error executing primitive {}: {}".format(primitive_name, e)
self.log.error(error_msg)
raise K8sException(message=error_msg)
# finally:
# await controller.disconnect()
# TODO: Remove these commented lines:
# if not self.authenticated:
# self.log.debug("[exec_primitive] Connecting to controller")
# await self.login(cluster_uuid)
"""Introspection"""
async def inspect_kdu(
self,
kdu_model: str,
) -> dict:
"""Inspect a KDU
Inspects a bundle and returns a dictionary of config parameters and
their default values.
:param kdu_model str: The name or path of the bundle to inspect.
:return: If successful, returns a dictionary of available parameters
and their default values.
"""
kdu = {}
if not os.path.exists(kdu_model):
raise K8sException("file {} not found".format(kdu_model))
with open(kdu_model, "r") as f:
bundle = yaml.safe_load(f.read())
"""
{
'description': 'Test bundle',
'bundle': 'kubernetes',
'applications': {
'mariadb-k8s': {
'charm': 'cs:~charmed-osm/mariadb-k8s-20',
'scale': 1,
'options': {
'password': 'manopw',
'root_password': 'osm4u',
'user': 'mano'
},
'series': 'kubernetes'
}
}
}
"""
# TODO: This should be returned in an agreed-upon format
kdu = bundle["applications"]
return kdu
async def help_kdu(
self,
kdu_model: str,
) -> str:
"""View the README
If available, returns the README of the bundle.
:param kdu_model str: The name or path of a bundle
:return: If found, returns the contents of the README.
"""
readme = None
files = ["README", "README.txt", "README.md"]
path = os.path.dirname(kdu_model)
for file in os.listdir(path):
if file in files:
with open(file, "r") as f:
readme = f.read()
break
return readme
async def status_kdu(
self,
cluster_uuid: str,
kdu_instance: str,
) -> dict:
"""Get the status of the KDU
Get the current status of the KDU instance.
:param cluster_uuid str: The UUID of the cluster
:param kdu_instance str: The unique id of the KDU instance
:return: Returns a dictionary containing namespace, state, resources,
and deployment_time.
"""
status = {}
# controller = await self.get_controller(cluster_uuid)
# model = await self.get_model(kdu_instance, controller=controller)
# model_status = await model.get_status()
# status = model_status.applications
model_status = await self.libjuju.get_model_status(kdu_instance)
for name in model_status.applications:
application = model_status.applications[name]
status[name] = {"status": application["status"]["status"]}
# await model.disconnect()
# await controller.disconnect()
return status
async def get_services(
self, cluster_uuid: str, kdu_instance: str, namespace: str
) -> list:
"""Return a list of services of a kdu_instance"""
credentials = self.get_credentials(cluster_uuid=cluster_uuid)
# config_path = "/tmp/{}".format(cluster_uuid)
# config_file = "{}/config".format(config_path)
# if not os.path.exists(config_path):
# os.makedirs(config_path)
# with open(config_file, "w") as f:
# f.write(credentials)
kubecfg = tempfile.NamedTemporaryFile()
with open(kubecfg.name, "w") as kubecfg_file:
kubecfg_file.write(credentials)
kubectl = Kubectl(config_file=kubecfg.name)
return kubectl.get_services(
field_selector="metadata.namespace={}".format(kdu_instance)
)
async def get_service(
self, cluster_uuid: str, service_name: str, namespace: str
) -> object:
"""Return data for a specific service inside a namespace"""
credentials = self.get_credentials(cluster_uuid=cluster_uuid)
# config_path = "/tmp/{}".format(cluster_uuid)
# config_file = "{}/config".format(config_path)
# if not os.path.exists(config_path):
# os.makedirs(config_path)
# with open(config_file, "w") as f:
# f.write(credentials)
kubecfg = tempfile.NamedTemporaryFile()
with open(kubecfg.name, "w") as kubecfg_file:
kubecfg_file.write(credentials)
kubectl = Kubectl(config_file=kubecfg.name)
return kubectl.get_services(
field_selector="metadata.name={},metadata.namespace={}".format(
service_name, namespace
)
)[0]
# Private methods
# async def add_k8s(self, cloud_name: str, credentials: str,) -> bool:
# """Add a k8s cloud to Juju
# Adds a Kubernetes cloud to Juju, so it can be bootstrapped with a
# Juju Controller.
# :param cloud_name str: The name of the cloud to add.
# :param credentials dict: A dictionary representing the output of
# `kubectl config view --raw`.
# :returns: True if successful, otherwise raises an exception.
# """
# cmd = [self.juju_command, "add-k8s", "--local", cloud_name]
# self.log.debug(cmd)
# process = await asyncio.create_subprocess_exec(
# *cmd,
# stdout=asyncio.subprocess.PIPE,
# stderr=asyncio.subprocess.PIPE,
# stdin=asyncio.subprocess.PIPE,
# )
# # Feed the process the credentials
# process.stdin.write(credentials.encode("utf-8"))
# await process.stdin.drain()
# process.stdin.close()
# _stdout, stderr = await process.communicate()
# return_code = process.returncode
# self.log.debug("add-k8s return code: {}".format(return_code))
# if return_code > 0:
# raise Exception(stderr)
# return True
# async def add_model(
# self, model_name: str, cluster_uuid: str, controller: Controller
# ) -> Model:
# """Adds a model to the controller
# Adds a new model to the Juju controller
# :param model_name str: The name of the model to add.
# :param cluster_uuid str: ID of the cluster.
# :param controller: Controller object in which the model will be added
# :returns: The juju.model.Model object of the new model upon success or
# raises an exception.
# """
# self.log.debug(
# "Adding model '{}' to cluster_uuid '{}'".format(model_name, cluster_uuid)
# )
# model = None
# try:
# if self.juju_public_key is not None:
# model = await controller.add_model(
# model_name, config={"authorized-keys": self.juju_public_key}
# )
# else:
# model = await controller.add_model(model_name)
# except Exception as ex:
# self.log.debug(ex)
# self.log.debug("Caught exception: {}".format(ex))
# pass
# return model
# async def bootstrap(
# self, cloud_name: str, cluster_uuid: str, loadbalancer: bool
# ) -> bool:
# """Bootstrap a Kubernetes controller
# Bootstrap a Juju controller inside the Kubernetes cluster
# :param cloud_name str: The name of the cloud.
# :param cluster_uuid str: The UUID of the cluster to bootstrap.
# :param loadbalancer bool: If the controller should use loadbalancer or not.
# :returns: True upon success or raises an exception.
# """
# if not loadbalancer:
# cmd = [self.juju_command, "bootstrap", cloud_name, cluster_uuid]
# else:
# """
# For public clusters, specify that the controller service is using a
# LoadBalancer.
# """
# cmd = [
# self.juju_command,
# "bootstrap",
# cloud_name,
# cluster_uuid,
# "--config",
# "controller-service-type=loadbalancer",
# ]
# self.log.debug(
# "Bootstrapping controller {} in cloud {}".format(cluster_uuid, cloud_name)
# )
# process = await asyncio.create_subprocess_exec(
# *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
# )
# _stdout, stderr = await process.communicate()
# return_code = process.returncode
# if return_code > 0:
# #
# if b"already exists" not in stderr:
# raise Exception(stderr)
# return True
# async def destroy_controller(self, cluster_uuid: str) -> bool:
# """Destroy a Kubernetes controller
# Destroy an existing Kubernetes controller.
# :param cluster_uuid str: The UUID of the cluster to bootstrap.
# :returns: True upon success or raises an exception.
# """
# cmd = [
# self.juju_command,
# "destroy-controller",
# "--destroy-all-models",
# "--destroy-storage",
# "-y",
# cluster_uuid,
# ]
# process = await asyncio.create_subprocess_exec(
# *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
# )
# _stdout, stderr = await process.communicate()
# return_code = process.returncode
# if return_code > 0:
# #
# if "already exists" not in stderr:
# raise Exception(stderr)
def get_credentials(self, cluster_uuid: str) -> str:
"""
Get Cluster Kubeconfig
"""
k8scluster = self.db.get_one(
"k8sclusters", q_filter={"_id": cluster_uuid}, fail_on_empty=False
)
self.db.encrypt_decrypt_fields(
k8scluster.get("credentials"),
"decrypt",
["password", "secret"],
schema_version=k8scluster["schema_version"],
salt=k8scluster["_id"],
)
return yaml.safe_dump(k8scluster.get("credentials"))
def _get_credential_name(self, cluster_uuid: str) -> str:
"""
Get credential name for a k8s cloud
We cannot use the cluster_uuid for the credential name directly,
because it cannot start with a number, it must start with a letter.
Therefore, the k8s cloud credential name will be "cred-" followed
by the cluster uuid.
:param: cluster_uuid: Cluster UUID of the kubernetes cloud (=cloud_name)
:return: Name to use for the credential name.
"""
return "cred-{}".format(cluster_uuid)
# def get_config(self, cluster_uuid: str,) -> dict:
# """Get the cluster configuration
# Gets the configuration of the cluster
# :param cluster_uuid str: The UUID of the cluster.
# :return: A dict upon success, or raises an exception.
# """
# juju_db = self.db.get_one("admin", {"_id": "juju"})
# config = None
# for k in juju_db["k8sclusters"]:
# if k["_id"] == cluster_uuid:
# config = k["config"]
# self.db.encrypt_decrypt_fields(
# config,
# "decrypt",
# ["secret", "cacert"],
# schema_version="1.1",
# salt=k["_id"],
# )
# break
# if not config:
# raise Exception(
# "Unable to locate configuration for cluster {}".format(cluster_uuid)
# )
# return config
# async def get_model(self, model_name: str, controller: Controller) -> Model:
# """Get a model from the Juju Controller.
# Note: Model objects returned must call disconnected() before it goes
# out of scope.
# :param model_name str: The name of the model to get
# :param controller Controller: Controller object
# :return The juju.model.Model object if found, or None.
# """
# models = await controller.list_models()
# if model_name not in models:
# raise N2VCNotFound("Model {} not found".format(model_name))
# self.log.debug("Found model: {}".format(model_name))
# return await controller.get_model(model_name)
def get_namespace(
self,
cluster_uuid: str,
) -> str:
"""Get the namespace UUID
Gets the namespace's unique name
:param cluster_uuid str: The UUID of the cluster
:returns: The namespace UUID, or raises an exception
"""
# config = self.get_config(cluster_uuid)
# Make sure the name is in the config
# if "namespace" not in config:
# raise Exception("Namespace not found.")
# TODO: We want to make sure this is unique to the cluster, in case
# the cluster is being reused.
# Consider pre/appending the cluster id to the namespace string
pass
# TODO: Remove these lines of code
# async def has_model(self, model_name: str) -> bool:
# """Check if a model exists in the controller
# Checks to see if a model exists in the connected Juju controller.
# :param model_name str: The name of the model
# :return: A boolean indicating if the model exists
# """
# models = await self.controller.list_models()
# if model_name in models:
# return True
# return False
# def is_local_k8s(self, credentials: str,) -> bool:
# """Check if a cluster is local
# Checks if a cluster is running in the local host
# :param credentials dict: A dictionary containing the k8s credentials
# :returns: A boolean if the cluster is running locally
# """
# creds = yaml.safe_load(credentials)
# if creds and os.getenv("OSMLCM_VCA_APIPROXY"):
# for cluster in creds["clusters"]:
# if "server" in cluster["cluster"]:
# if os.getenv("OSMLCM_VCA_APIPROXY") in cluster["cluster"]["server"]:
# return True
# return False
# async def get_controller(self, cluster_uuid):
# """Login to the Juju controller."""
# config = self.get_config(cluster_uuid)
# juju_endpoint = config["endpoint"]
# juju_user = config["username"]
# juju_secret = config["secret"]
# juju_ca_cert = config["cacert"]
# controller = Controller()
# if juju_secret:
# self.log.debug(
# "Connecting to controller... ws://{} as {}".format(
# juju_endpoint, juju_user,
# )
# )
# try:
# await controller.connect(
# endpoint=juju_endpoint,
# username=juju_user,
# password=juju_secret,
# cacert=juju_ca_cert,
# )
# self.log.debug("JujuApi: Logged into controller")
# return controller
# except Exception as ex:
# self.log.debug(ex)
# self.log.debug("Caught exception: {}".format(ex))
# else:
# self.log.fatal("VCA credentials not configured.")
# TODO: Remove these commented lines
# self.authenticated = False
# if self.authenticated:
# return
# self.connecting = True
# juju_public_key = None
# self.authenticated = True
# Test: Make sure we have the credentials loaded
# async def logout(self):
# """Logout of the Juju controller."""
# self.log.debug("[logout]")
# if not self.authenticated:
# return False
# for model in self.models:
# self.log.debug("Logging out of model {}".format(model))
# await self.models[model].disconnect()
# if self.controller:
# self.log.debug("Disconnecting controller {}".format(self.controller))
# await self.controller.disconnect()
# self.controller = None
# self.authenticated = False
# async def remove_cloud(self, cloud_name: str,) -> bool:
# """Remove a k8s cloud from Juju
# Removes a Kubernetes cloud from Juju.
# :param cloud_name str: The name of the cloud to add.
# :returns: True if successful, otherwise raises an exception.
# """
# # Remove the bootstrapped controller
# cmd = [self.juju_command, "remove-k8s", "--client", cloud_name]
# process = await asyncio.create_subprocess_exec(
# *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
# )
# _stdout, stderr = await process.communicate()
# return_code = process.returncode
# if return_code > 0:
# raise Exception(stderr)
# # Remove the cloud from the local config
# cmd = [self.juju_command, "remove-cloud", "--client", cloud_name]
# process = await asyncio.create_subprocess_exec(
# *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
# )
# _stdout, stderr = await process.communicate()
# return_code = process.returncode
# if return_code > 0:
# raise Exception(stderr)
# return True
# async def set_config(self, cluster_uuid: str, config: dict,) -> bool:
# """Save the cluster configuration
# Saves the cluster information to the Mongo database
# :param cluster_uuid str: The UUID of the cluster
# :param config dict: A dictionary containing the cluster configuration
# """
# juju_db = self.db.get_one("admin", {"_id": "juju"})
# k8sclusters = juju_db["k8sclusters"] if "k8sclusters" in juju_db else []
# self.db.encrypt_decrypt_fields(
# config,
# "encrypt",
# ["secret", "cacert"],
# schema_version="1.1",
# salt=cluster_uuid,
# )
# k8sclusters.append({"_id": cluster_uuid, "config": config})
# self.db.set_one(
# table="admin",
# q_filter={"_id": "juju"},
# update_dict={"k8sclusters": k8sclusters},
# )
# Private methods to create/delete needed resources in the
# Kubernetes cluster to create the K8s cloud in Juju
def _create_cluster_role(
self,
kubectl: Kubectl,
name: str,
labels: Dict[str, str],
):
cluster_roles = kubectl.clients[RBAC_CLIENT].list_cluster_role(
field_selector="metadata.name={}".format(name)
)
if len(cluster_roles.items) > 0:
raise Exception(
"Cluster role with metadata.name={} already exists".format(name)
)
metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE)
# Cluster role
cluster_role = V1ClusterRole(
metadata=metadata,
rules=[
V1PolicyRule(api_groups=["*"], resources=["*"], verbs=["*"]),
V1PolicyRule(non_resource_ur_ls=["*"], verbs=["*"]),
],
)
kubectl.clients[RBAC_CLIENT].create_cluster_role(cluster_role)
def _delete_cluster_role(self, kubectl: Kubectl, name: str):
kubectl.clients[RBAC_CLIENT].delete_cluster_role(name)
def _create_service_account(
self,
kubectl: Kubectl,
name: str,
labels: Dict[str, str],
):
service_accounts = kubectl.clients[CORE_CLIENT].list_namespaced_service_account(
ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name)
)
if len(service_accounts.items) > 0:
raise Exception(
"Service account with metadata.name={} already exists".format(name)
)
metadata = V1ObjectMeta(name=name, labels=labels, namespace=ADMIN_NAMESPACE)
service_account = V1ServiceAccount(metadata=metadata)
kubectl.clients[CORE_CLIENT].create_namespaced_service_account(
ADMIN_NAMESPACE, service_account
)
def _delete_service_account(self, kubectl: Kubectl, name: str):
kubectl.clients[CORE_CLIENT].delete_namespaced_service_account(
name, ADMIN_NAMESPACE
)
def _create_cluster_role_binding(
self,
kubectl: Kubectl,
name: str,
labels: Dict[str, str],
):
role_bindings = kubectl.clients[RBAC_CLIENT].list_cluster_role_binding(
field_selector="metadata.name={}".format(name)
)
if len(role_bindings.items) > 0:
raise Exception("Generated rbac id already exists")
role_binding = V1ClusterRoleBinding(
metadata=V1ObjectMeta(name=name, labels=labels),
role_ref=V1RoleRef(kind="ClusterRole", name=name, api_group=""),
subjects=[
V1Subject(kind="ServiceAccount", name=name, namespace=ADMIN_NAMESPACE)
],
)
kubectl.clients[RBAC_CLIENT].create_cluster_role_binding(role_binding)
def _delete_cluster_role_binding(self, kubectl: Kubectl, name: str):
kubectl.clients[RBAC_CLIENT].delete_cluster_role_binding(name)
async def _get_secret_data(self, kubectl: Kubectl, name: str) -> (str, str):
v1_core = kubectl.clients[CORE_CLIENT]
retries_limit = 10
secret_name = None
while True:
retries_limit -= 1
service_accounts = v1_core.list_namespaced_service_account(
ADMIN_NAMESPACE, field_selector="metadata.name={}".format(name)
)
if len(service_accounts.items) == 0:
raise Exception(
"Service account not found with metadata.name={}".format(name)
)
service_account = service_accounts.items[0]
if service_account.secrets and len(service_account.secrets) > 0:
secret_name = service_account.secrets[0].name
if secret_name is not None or not retries_limit:
break
if not secret_name:
raise Exception(
"Failed getting the secret from service account {}".format(name)
)
secret = v1_core.list_namespaced_secret(
ADMIN_NAMESPACE,
field_selector="metadata.name={}".format(secret_name),
).items[0]
token = secret.data[SERVICE_ACCOUNT_TOKEN_KEY]
client_certificate_data = secret.data[SERVICE_ACCOUNT_ROOT_CA_KEY]
return (
base64.b64decode(token).decode("utf-8"),
base64.b64decode(client_certificate_data).decode("utf-8"),
)
@staticmethod
def generate_kdu_instance_name(**kwargs):
db_dict = kwargs.get("db_dict")
kdu_name = kwargs.get("kdu_name", None)
if kdu_name:
kdu_instance = "{}-{}".format(kdu_name, db_dict["filter"]["_id"])
else:
kdu_instance = db_dict["filter"]["_id"]
return kdu_instance