import functools
import yaml
import asyncio
-import socket
import uuid
import os
+import ssl
from grpclib.client import Channel
from osm_lcm.lcm_utils import deep_get
+CA_STORE = "/etc/ssl/certs/osm-ca.crt"
+
def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay"):
def wrapper(func):
return wrapper
+def create_secure_context(
+ trusted: str,
+) -> ssl.SSLContext:
+ ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ ctx.verify_mode = ssl.CERT_REQUIRED
+ ctx.check_hostname = True
+ ctx.minimum_version = ssl.TLSVersion.TLSv1_2
+ # TODO: client TLS
+ # ctx.load_cert_chain(str(client_cert), str(client_key))
+ ctx.load_verify_locations(trusted)
+ ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
+ ctx.set_alpn_protocols(["h2"])
+ try:
+ ctx.set_npn_protocols(["h2"])
+ except NotImplementedError:
+ pass
+ return ctx
+
+
class LCMHelmConn(N2VCConnector, LcmBase):
_KUBECTL_OSM_NAMESPACE = "osm"
_KUBECTL_OSM_CLUSTER_NAME = "_system-osm-k8s"
"Applied default retry time: {}".format(self._max_retry_time)
)
+ if self.vca_config and self.vca_config.get("eegrpc_tls_enforce"):
+ self._tls_enforce = str(
+ self.vca_config.get("eegrpc_tls_enforce")
+ ).lower() in ("true", "1", "yes")
+ else:
+ self._tls_enforce = False
+ self.log.debug("TLS enforce enabled: {}".format(self._tls_enforce))
+
# initialize helm connector for helmv2 and helmv3
self._k8sclusterhelm2 = K8sHelmConnector(
kubectl_command=self.vca_config.get("kubectlpath"),
self.log.error("Error upgrading chart ee: {}".format(e), exc_info=True)
raise N2VCException("Error upgrading chart ee: {}".format(e))
+ async def create_tls_certificate(
+ self,
+ nsr_id: str,
+ secret_name: str,
+ usage: str,
+ dns_prefix: str,
+ namespace: str = _KUBECTL_OSM_NAMESPACE,
+ ):
+ # Obtain system cluster id from database
+ system_cluster_uuid = await self._get_system_cluster_id()
+ # use helm-v3 as certificates don't depend on helm version
+ await self._k8sclusterhelm3.create_certificate(
+ cluster_uuid=system_cluster_uuid,
+ namespace=namespace,
+ dns_prefix=dns_prefix,
+ name=nsr_id,
+ secret_name=secret_name,
+ usage=usage,
+ )
+
+ async def delete_tls_certificate(
+ self,
+ certificate_name: str = None,
+ namespace: str = _KUBECTL_OSM_NAMESPACE,
+ ):
+ # Obtain system cluster id from database
+ system_cluster_uuid = await self._get_system_cluster_id()
+ await self._k8sclusterhelm3.delete_certificate(
+ cluster_uuid=system_cluster_uuid,
+ namespace=namespace,
+ certificate_name=certificate_name,
+ )
+
async def register_execution_environment(
self,
namespace: str,
try:
# Obtain ip_addr for the ee service, it is resolved by dns from the ee name by kubernetes
version, namespace, helm_id = get_ee_id_parts(ee_id)
- ip_addr = socket.gethostbyname(helm_id)
-
+ ip_addr = "{}.{}.svc".format(helm_id, namespace)
# Obtain ssh_key from the ee, this method will implement retries to allow the ee
# install libraries and start successfully
ssh_key = await self._get_ssh_key(ip_addr)
try:
version, namespace, helm_id = get_ee_id_parts(ee_id)
- ip_addr = socket.gethostbyname(helm_id)
+ ip_addr = "{}.{}.svc".format(helm_id, namespace)
except Exception as e:
self.log.error("Error getting ee ip ee: {}".format(e))
raise N2VCException("Error getting ee ip ee: {}".format(e))
@retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
async def _get_ssh_key(self, ip_addr):
- channel = Channel(ip_addr, self._ee_service_port)
- try:
- stub = FrontendExecutorStub(channel)
- self.log.debug("get ssh key, ip_addr: {}".format(ip_addr))
- reply: SshKeyReply = await stub.GetSshKey(SshKeyRequest())
- return reply.message
- finally:
- channel.close()
+ return await self._execute_primitive_internal(
+ ip_addr,
+ "_get_ssh_key",
+ None,
+ )
@retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
async def _execute_config_primitive(self, ip_addr, params, db_dict=None):
async def _execute_primitive_internal(
self, ip_addr, primitive_name, params, db_dict=None
):
-
- channel = Channel(ip_addr, self._ee_service_port)
- try:
+ async def execute():
stub = FrontendExecutorStub(channel)
+ if primitive_name == "_get_ssh_key":
+ self.log.debug("get ssh key, ip_addr: {}".format(ip_addr))
+ reply: SshKeyReply = await stub.GetSshKey(SshKeyRequest())
+ return reply.message
+ # For any other primitives
async with stub.RunPrimitive.open() as stream:
primitive_id = str(uuid.uuid1())
result = None
return reply.status, reply.detailed_message
else:
return "ERROR", "No result received"
+
+ ssl_context = create_secure_context(CA_STORE)
+ channel = Channel(ip_addr, self._ee_service_port, ssl=ssl_context)
+ try:
+ return await execute()
+ except ssl.SSLError as ssl_error: # fallback to insecure gRPC
+ if ssl_error.reason == "WRONG_VERSION_NUMBER" and not self._tls_enforce:
+ self.log.debug(
+ "Execution environment doesn't support TLS, falling back to unsecure gRPC"
+ )
+ channel = Channel(ip_addr, self._ee_service_port)
+ return await execute()
+ elif ssl_error.reason == "WRONG_VERSION_NUMBER":
+ raise N2VCException(
+ "Execution environment doesn't support TLS, primitives cannot be executed"
+ )
+ else:
+ raise
finally:
channel.close()
get_juju_ee_ref,
get_kdu_resource_profile,
find_software_version,
+ check_helm_ee_in_ns,
)
from osm_lcm.data_utils.list_utils import find_in_list
from osm_lcm.data_utils.vnfr import (
stage[1] = "Deploying Execution Environments."
self.logger.debug(logging_text + stage[1])
+ # create namespace and certificate if any helm based EE is present in the NS
+ if check_helm_ee_in_ns(db_vnfds):
+ # TODO: create EE namespace
+ # create TLS certificates
+ await self.vca_map["helm-v3"].create_tls_certificate(
+ secret_name="ee-tls-{}".format(nsr_id),
+ dns_prefix="*",
+ nsr_id=nsr_id,
+ usage="server auth",
+ )
+
nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
for vnf_profile in get_vnf_profiles(nsd):
vnfd_id = vnf_profile["vnfd-id"]
# task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
tasks_dict_info[task_delete_ee] = "Terminating all VCA"
+ # Delete Namespace and Certificates if necessary
+ if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
+ await self.vca_map["helm-v3"].delete_tls_certificate(
+ certificate_name=db_nslcmop["nsInstanceId"],
+ )
+ # TODO: Delete namespace
+
# Delete from k8scluster
stage[1] = "Deleting KDUs."
self.logger.debug(logging_text + stage[1])