def _parse_yaml(self, descriptor, response=False):
try:
- return yaml.load(descriptor)
+ return yaml.load(descriptor, Loader=yaml.Loader)
except yaml.YAMLError as exc:
error_pos = ""
if hasattr(exc, 'problem_mark'):
return tuple(filled)
+def deep_get(target_dict, key_list):
+ """
+ Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None
+ Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None
+ :param target_dict: dictionary to be read
+ :param key_list: list of keys to read from target_dict
+ :return: The wanted value if exist, None otherwise
+ """
+ for key in key_list:
+ if not isinstance(target_dict, dict) or key not in target_dict:
+ return None
+ target_dict = target_dict[key]
+ return target_dict
+
+
# LcmBase must be listed before TaskRegistry, as it is a dependency.
class LcmBase:
from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
from osm_lcm import ROclient
-from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase
+from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get
from n2vc.k8s_helm_conn import K8sHelmConnector
from osm_common.dbbase import DbException
target_dict[key_list[-1]] = value
-def deep_get(target_dict, key_list):
- """
- Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None
- Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None
- :param target_dict: dictionary to be read
- :param key_list: list of keys to read from target_dict
- :return: The wanted value if exist, None otherwise
- """
- for key in key_list:
- if not isinstance(target_dict, dict) or key not in target_dict:
- return None
- target_dict = target_dict[key]
- return target_dict
-
-
class NsLcm(LcmBase):
timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
total_deploy_timeout = 2 * 3600 # global timeout for deployment
db_nsr_update["_admin.nsState"] = "INSTANTIATED"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
self.logger.debug(logging_text + "Before deploy_kdus")
- db_k8scluster_list = self.db.get_list("k8sclusters", {})
# Call to deploy_kdus in case exists the "vdu:kdu" param
task_kdu = asyncio.ensure_future(
self.deploy_kdus(
logging_text=logging_text,
nsr_id=nsr_id,
- nsd=nsd,
db_nsr=db_nsr,
- db_nslcmop=db_nslcmop,
db_vnfrs=db_vnfrs,
- db_vnfds_ref=db_vnfds_ref,
- db_k8scluster=db_k8scluster_list
)
)
self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDUs", task_kdu)
# Get additional parameters
deploy_params = {}
if db_vnfr.get("additionalParamsForVnf"):
- deploy_params = db_vnfr["additionalParamsForVnf"].copy()
- for k, v in deploy_params.items():
- if isinstance(v, str) and v.startswith("!!yaml "):
- deploy_params[k] = yaml.safe_load(v[7:])
+ deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
descriptor_config = vnfd.get("vnf-configuration")
if descriptor_config and descriptor_config.get("juju"):
for vdud in get_iterable(vnfd, 'vdu'):
vdu_id = vdud["id"]
descriptor_config = vdud.get('vdu-configuration')
+ vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
+ else:
+ deploy_params_vdu = deploy_params
if descriptor_config and descriptor_config.get("juju"):
# look for vdu index in the db_vnfr["vdu"] section
# for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
member_vnf_index=member_vnf_index,
vdu_index=vdu_index,
vdu_name=vdu_name,
- deploy_params=deploy_params,
+ deploy_params=deploy_params_vdu,
descriptor_config=descriptor_config,
base_folder=base_folder,
task_instantiation_list=task_instantiation_list
# Get additional parameters
deploy_params = {}
if db_nsr.get("additionalParamsForNs"):
- deploy_params = db_nsr["additionalParamsForNs"].copy()
- for k, v in deploy_params.items():
- if isinstance(v, str) and v.startswith("!!yaml "):
- deploy_params[k] = yaml.safe_load(v[7:])
+ deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
base_folder = nsd["_admin"]["storage"]
self._deploy_n2vc(
logging_text=logging_text,
self.logger.debug(logging_text + "Exit")
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
- async def deploy_kdus(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref, db_k8scluster):
+ async def deploy_kdus(self, logging_text, nsr_id, db_nsr, db_vnfrs):
# Launch kdus if present in the descriptor
- logging_text = "Deploy kdus: "
- db_nsr_update = {}
- db_nsr_update["_admin.deployed.K8s"] = []
- try:
- # Look for all vnfds
- # db_nsr_update["_admin.deployed.K8s"] = []
- vnf_update = []
- task_list = []
- for c_vnf in nsd.get("constituent-vnfd", ()):
- vnfr = db_vnfrs[c_vnf["member-vnf-index"]]
- member_vnf_index = c_vnf["member-vnf-index"]
- vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
- vnfd_ref = vnfd["id"]
- desc_params = {}
- step = "Checking kdu from vnf: {} - member-vnf-index: {}".format(vnfd_ref, member_vnf_index)
- self.logger.debug(logging_text + step)
- if vnfd.get("kdu"):
- step = "vnf: {} has kdus".format(vnfd_ref)
- self.logger.debug(logging_text + step)
- for vnfr_name, vnfr_data in db_vnfrs.items():
- if vnfr_data["vnfd-ref"] == vnfd["id"]:
- if vnfr_data.get("additionalParamsForVnf"):
- desc_params = self._format_additional_params(vnfr_data["additionalParamsForVnf"])
- break
- else:
- raise LcmException("VNF descriptor not found with id: {}".format(vnfr_data["vnfd-ref"]))
- self.logger.debug(logging_text + step)
+ k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
- for kdur in vnfr.get("kdur"):
- index = 0
- for k8scluster in db_k8scluster:
- if kdur["k8s-cluster"]["id"] == k8scluster["_id"]:
- cluster_uuid = k8scluster["cluster-uuid"]
- break
- else:
- raise LcmException("K8scluster not found with id: {}".format(kdur["k8s-cluster"]["id"]))
- self.logger.debug(logging_text + step)
+ def _get_cluster_id(cluster_id, cluster_type):
+ nonlocal k8scluster_id_2_uuic
+ if cluster_id in k8scluster_id_2_uuic[cluster_type]:
+ return k8scluster_id_2_uuic[cluster_type][cluster_id]
- step = "Instantiate KDU {} in k8s cluster {}".format(kdur["kdu-name"], cluster_uuid)
- self.logger.debug(logging_text + step)
- for kdu in vnfd.get("kdu"):
- if kdu.get("name") == kdur["kdu-name"]:
- break
- else:
- raise LcmException("KDU not found with name: {} in VNFD {}".format(kdur["kdu-name"],
- vnfd["name"]))
- self.logger.debug(logging_text + step)
- kdumodel = None
- k8sclustertype = None
- if kdu.get("helm-chart"):
- kdumodel = kdu["helm-chart"]
- k8sclustertype = "chart"
- elif kdu.get("juju-bundle"):
- kdumodel = kdu["juju-bundle"]
- k8sclustertype = "juju"
- k8s_instace_info = {"kdu-instance": None, "k8scluster-uuid": cluster_uuid,
- "vnfr-id": vnfr["id"], "k8scluster-type": k8sclustertype,
- "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel}
- db_nsr_update["_admin.deployed.K8s"].append(k8s_instace_info)
- db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s."
- "{}".format(index)}
- if k8sclustertype == "chart":
- task = self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel,
- atomic=True, params=desc_params,
- db_dict=db_dict, timeout=300)
- else:
- # TODO I need the juju connector in place
- pass
- task_list.append(task)
- index += 1
+ db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
+ if not db_k8scluster:
+ raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
+ k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
+ if not k8s_id:
+ raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type))
+ k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
+ return k8s_id
+
+ logging_text += "Deploy kdus: "
+ try:
+ db_nsr_update = {"_admin.deployed.K8s": []}
self.update_db_2("nsrs", nsr_id, db_nsr_update)
- done = None
- pending = None
- if len(task_list) > 0:
- self.logger.debug('Waiting for terminate pending tasks...')
- done, pending = await asyncio.wait(task_list, timeout=3600)
- if not pending:
- for fut in done:
- k8s_instance = fut.result()
- k8s_instace_info = {"kdu-instance": k8s_instance, "k8scluster-uuid": cluster_uuid,
- "vnfr-id": vnfr["id"], "k8scluster-type": k8sclustertype,
- "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel}
- vnf_update.append(k8s_instace_info)
- self.logger.debug('All tasks finished...')
- else:
- self.logger.info('There are pending tasks: {}'.format(pending))
- db_nsr_update["_admin.deployed.K8s"] = vnf_update
+ # Look for all vnfds
+ pending_tasks = {}
+ index = 0
+ for vnfr_data in db_vnfrs.values():
+ for kdur in get_iterable(vnfr_data, "kdur"):
+ desc_params = self._format_additional_params(kdur.get("additionalParams"))
+ kdumodel = None
+ k8sclustertype = None
+ error_text = None
+ cluster_uuid = None
+ if kdur.get("helm-chart"):
+ kdumodel = kdur["helm-chart"]
+ k8sclustertype = "chart"
+ k8sclustertype_full = "helm-chart"
+ elif kdur.get("juju-bundle"):
+ kdumodel = kdur["juju-bundle"]
+ k8sclustertype = "juju"
+ k8sclustertype_full = "juju-bundle"
+ else:
+ error_text = "kdu type is neither helm-chart not juju-bundle. Maybe an old NBI version is" \
+ " running"
+ try:
+ if not error_text:
+ cluster_uuid = _get_cluster_id(kdur["k8s-cluster"]["id"], k8sclustertype_full)
+ except LcmException as e:
+ error_text = str(e)
+ step = "Instantiate KDU {} in k8s cluster {}".format(kdur["kdu-name"], cluster_uuid)
+
+ k8s_instace_info = {"kdu-instance": None, "k8scluster-uuid": cluster_uuid,
+ "k8scluster-type": k8sclustertype,
+ "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel}
+ if error_text:
+ k8s_instace_info["detailed-status"] = error_text
+ db_nsr_update["_admin.deployed.K8s.{}".format(index)] = k8s_instace_info
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ if error_text:
+ continue
+
+ db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s."
+ "{}".format(index)}
+ if k8sclustertype == "chart":
+ task = asyncio.ensure_future(
+ self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel, atomic=True,
+ params=desc_params, db_dict=db_dict, timeout=3600)
+ )
+ else:
+ # TODO juju-bundle connector in place
+ pass
+ pending_tasks[task] = "_admin.deployed.K8s.{}.".format(index)
+ index += 1
+ if not pending_tasks:
+ return
+ self.logger.debug(logging_text + 'Waiting for terminate pending tasks...')
+ pending_list = list(pending_tasks.keys())
+ while pending_list:
+ done_list, pending_list = await asyncio.wait(pending_list, timeout=30*60,
+ return_when=asyncio.FIRST_COMPLETED)
+ if not done_list: # timeout
+ for task in pending_list:
+ db_nsr_update[pending_tasks(task) + "detailed-status"] = "Timeout"
+ break
+ for task in done_list:
+ exc = task.exception()
+ if exc:
+ db_nsr_update[pending_tasks[task] + "detailed-status"] = "{}".format(exc)
+ else:
+ db_nsr_update[pending_tasks[task] + "kdu-instance"] = task.result()
+
except Exception as e:
self.logger.critical(logging_text + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e))
raise LcmException("{} Exit Exception {} while '{}': {}".format(logging_text, type(e).__name__, step, e))
return nslcmop
def _format_additional_params(self, params):
-
+ params = params or {}
for key, value in params.items():
if str(value).startswith("!!yaml "):
params[key] = yaml.safe_load(value[7:])
-
return params
def _get_terminate_primitive_params(self, seq, vnf_index):
# Delete from k8scluster
step = "delete kdus"
self.logger.debug(logging_text + step)
- print(nsr_deployed)
+ # print(nsr_deployed)
if nsr_deployed:
- for kdu in nsr_deployed.get("K8s"):
+ for kdu in nsr_deployed.get("K8s", ()):
+ kdu_instance = kdu.get("kdu-instance")
+ if not kdu_instance:
+ continue
if kdu.get("k8scluster-type") == "chart":
- task_delete_kdu_instance = asyncio.ensure_future(self.k8sclusterhelm.uninstall(
- cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu.get("kdu-instance")))
+ task_delete_kdu_instance = asyncio.ensure_future(
+ self.k8sclusterhelm.uninstall(cluster_uuid=kdu.get("k8scluster-uuid"),
+ kdu_instance=kdu_instance))
elif kdu.get("k8scluster-type") == "juju":
# TODO Juju connector needed
- pass
+ continue
else:
- msg = "k8scluster-type not defined"
- raise LcmException(msg)
-
+ self.error(logging_text + "Unknown k8s deployment type {}".
+ format(kdu.get("k8scluster-type")))
+ continue
pending_tasks.append(task_delete_kdu_instance)
except LcmException as e:
msg = "Failed while deleting KDUs from NS: {}".format(e)
break
elif kdu_name:
self.logger.debug(logging_text + "Checking actions in KDUs")
- desc_params = {}
- if vnf_index:
- if db_vnfr.get("additionalParamsForVnf") and db_vnfr["additionalParamsForVnf"].\
- get("member-vnf-index") == vnf_index:
- desc_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].
- get("additionalParams"))
- if primitive_params:
- desc_params.update(primitive_params)
+ kdur = next((x for x in db_vnfr["kdur"] if x["kdu_name"] == kdu_name), None)
+ desc_params = self._format_additional_params(kdur.get("additionalParams")) or {}
+ if primitive_params:
+ desc_params.update(primitive_params)
# TODO Check if we will need something at vnf level
index = 0
for kdu in get_iterable(nsr_deployed, "K8s"):
desc_params = {}
if vnf_index:
if db_vnfr.get("additionalParamsForVnf"):
- desc_params.update(db_vnfr["additionalParamsForVnf"])
+ desc_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"])
+ if vdu_id:
+ vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
+ if vdur.get("additionalParams"):
+ desc_params = self._format_additional_params(vdur["additionalParams"])
else:
if db_nsr.get("additionalParamsForNs"):
- desc_params.update(db_nsr["additionalParamsForNs"])
+ desc_params.update(self._format_additional_params(db_nsr["additionalParamsForNs"]))
# TODO check if ns is in a proper status
output, detail = await self._ns_execute_primitive(
- member-vnf-index-ref: '2'
vnfd-connection-point-ref: vnf-data
vnfd-id-ref: hackfest3charmed-vnf
+- _admin:
+ created: 1575031728.9257665
+ modified: 1575031728.9257665
+ onboardingState: ONBOARDED
+ operationalState: ENABLED
+ projects_read:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ projects_write:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ storage:
+ descriptor: multikdu_ns/multikdu_nsd.yaml
+ folder: d0f63683-9032-4c6f-8928-ffd4674b9f69
+ fs: local
+ path: /app/storage/
+ pkg-dir: multikdu_ns
+ zipfile: multikdu_ns.tar.gz
+ usageState: NOT_IN_USE
+ userDefinedData: {}
+ _id: d0f63683-9032-4c6f-8928-ffd4674b9f69
+ constituent-vnfd:
+ - member-vnf-index: multikdu
+ vnfd-id-ref: multikdu_knf
+ description: NS consisting of a single KNF multikdu_knf connected to mgmt network
+ id: multikdu_ns
+ logo: osm.png
+ name: multikdu_ns
+ short-name: multikdu_ns
+ vendor: OSM
+ version: '1.0'
+ vld:
+ - id: mgmtnet
+ mgmt-network: true
+ name: mgmtnet
+ type: ELAN
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: multikdu
+ vnfd-connection-point-ref: mgmt
+ vnfd-id-ref: multikdu_knf
"""
+
db_nslcmops_text = """
---
- _admin:
operationState: FAILED
startTime: 1566823354.414689
statusEnteredTime: 1566824534.5112448
+- _admin:
+ created: 1575034637.044651
+ modified: 1575034637.044651
+ projects_read:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ projects_write:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ _id: cf3aa178-7640-4174-b921-2330e6f2aad6
+ detailed-status: done
+ id: cf3aa178-7640-4174-b921-2330e6f2aad6
+ isAutomaticInvocation: false
+ isCancelPending: false
+ lcmOperationType: instantiate
+ links:
+ nsInstance: /osm/nslcm/v1/ns_instances/0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ self: /osm/nslcm/v1/ns_lcm_op_occs/cf3aa178-7640-4174-b921-2330e6f2aad6
+ nsInstanceId: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ operationParams:
+ lcmOperationType: instantiate
+ nsDescription: default description
+ nsInstanceId: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ nsName: multikdu
+ nsdId: d0f63683-9032-4c6f-8928-ffd4674b9f69
+ nsr_id: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ vimAccountId: 74337dcb-ef54-41e7-bd2d-8c0d7fcd326f
+ vld:
+ - name: mgmtnet
+ vim-network-name: internal
+ operationState: COMPLETED
+ startTime: 1575034637.0445576
+ statusEnteredTime: 1575034663.8484545
"""
+
db_nsrs_text = """
---
- _admin:
vim-id: c31364ba-f573-4ab6-bf1a-fed30ede39a8
vnfd-id:
- 7637bcf8-cf14-42dc-ad70-c66fcf1e6e77
+- _admin:
+ created: 1575034637.011233
+ current-operation: null
+ deployed:
+ K8s:
+ - k8scluster-uuid: 73d96432-d692-40d2-8440-e0c73aee209c
+ kdu-instance: stable-mongodb-0086856106
+ kdu-model: stable/mongodb
+ kdu-name: mongo
+ vnfr-id: 5ac34899-a23a-4b3c-918a-cd77acadbea6
+ - k8scluster-uuid: 73d96432-d692-40d2-8440-e0c73aee209c
+ kdu-instance: stable-openldap-0092830263
+ kdu-model: stable/mongodb
+ kdu-name: mongo
+ vnfr-id: 5ac34899-a23a-4b3c-918a-cd77acadbea6
+ RO:
+ detailed-status: Deployed at VIM
+ nsd_id: b03a8de8-1898-4142-bc6d-3b0787df567d
+ nsr_id: b5ce3e00-8647-415d-afaa-d5a612cf3074
+ nsr_status: ACTIVE
+ operational-status: running
+ vnfd:
+ - id: b9493dae-a4c9-4b96-8965-329581efb0a1
+ member-vnf-index: multikdu
+ VCA: []
+ modified: 1575034637.011233
+ nsState: INSTANTIATED
+ nslcmop: null
+ operation-type: null
+ projects_read:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ projects_write:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ _id: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ additionalParamsForNs: null
+ admin-status: ENABLED
+ config-status: configured
+ constituent-vnfr-ref:
+ - 5ac34899-a23a-4b3c-918a-cd77acadbea6
+ create-time: 1575034636.9990137
+ datacenter: ea958ba5-4e58-4405-bf42-6e3be15d4c3a
+ description: default description
+ detailed-status: done
+ id: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ instantiate_params:
+ nsDescription: default description
+ nsName: multikdu
+ nsdId: d0f63683-9032-4c6f-8928-ffd4674b9f69
+ vimAccountId: 74337dcb-ef54-41e7-bd2d-8c0d7fcd326f
+ name: multikdu
+ name-ref: multikdu
+ ns-instance-config-ref: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ nsd-id: d0f63683-9032-4c6f-8928-ffd4674b9f69
+ nsd-name-ref: multikdu_ns
+ nsd-ref: multikdu_ns
+ operational-events: []
+ operational-status: init
+ orchestration-progress: {}
+ resource-orchestrator: osmopenmano
+ short-name: multikdu
+ ssh-authorized-key: null
+ vld:
+ - id: mgmtnet
+ name: null
+ status: ACTIVE
+ status-detailed: null
+ vim-id: 9b6a2ac4-767e-4ec9-9497-8ba63084c77f
+ vim-network-name: mgmt
+ vnfd-id:
+ - 7ab0d10d-8ce2-4c68-aef6-cc5a437a9c62
"""
+
db_ro_ns_text = """
datacenter_tenant_id: dc5c67fa-c7f2-11e9-b9c0-02420aff0004
description: null
vnf_name: hackfest3charmed-vnf.2
vnfd_osm_id: f48163a6-c807-47bc-9682-f72caef5af85.1.2
"""
+
db_vim_accounts_text = """
---
- _admin:
vim_url: http://10.95.87.162:5000/v2.0
vim_user: osm
"""
+
+db_k8sclusters_text = """
+- _admin:
+ created: 1575031378.9268339
+ current_operation: 0
+ modified: 1575031378.9268339
+ operationalState: ENABLED
+ operations:
+ - detailed-status: ''
+ lcmOperationType: create
+ operationParams: null
+ operationState: ''
+ startTime: 1575031378.926895
+ statusEnteredTime: 1575031378.926895
+ worker: 36681ccf7f32
+ projects_read:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ projects_write:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ helm-chart:
+ id: 73d96432-d692-40d2-8440-e0c73aee209c
+ created: True
+ _id: e7169dab-f71a-4f1f-b82b-432605e8c4b3
+ credentials:
+ apiVersion: v1
+ users:
+ - name: admin
+ user:
+ password: qhpdogJXhBLG+JiYyyE0LeNsJXHkCSMy+sGVzlnJqes=
+ username: admin
+ description: Cluster3
+ k8s_version: '1.15'
+ name: cluster3
+ namespace: kube-system
+ nets:
+ net1: None
+ schema_version: '1.11'
+ vim_account: ea958ba5-4e58-4405-bf42-6e3be15d4c3a
+"""
+
db_vnfds_text = """
---
- _admin:
seq: '2'
juju:
charm: simple
+- _admin:
+ created: 1575031727.5383403
+ modified: 1575031727.5383403
+ onboardingState: ONBOARDED
+ operationalState: ENABLED
+ projects_read:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ projects_write:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ storage:
+ descriptor: multikdu_knf/multikdu_vnfd.yaml
+ folder: 7ab0d10d-8ce2-4c68-aef6-cc5a437a9c62
+ fs: local
+ path: /app/storage/
+ pkg-dir: multikdu_knf
+ zipfile: multikdu_knf.tar.gz
+ usageState: NOT_IN_USE
+ userDefinedData: {}
+ _id: 7ab0d10d-8ce2-4c68-aef6-cc5a437a9c62
+ connection-point:
+ - name: mgmt
+ description: KNF with two KDU using helm-charts
+ id: multikdu_knf
+ k8s-cluster:
+ nets:
+ - external-connection-point-ref: mgmt
+ id: mgmtnet
+ kdu:
+ - helm-chart: stable/openldap:1.2.1
+ name: ldap
+ - helm-chart: stable/mongodb
+ name: mongo
+ mgmt-interface:
+ cp: mgmt
+ name: multikdu_knf
+ short-name: multikdu_knf
+ vendor: Telefonica
+ version: '1.0'
"""
+
db_vnfrs_text = """
---
- _admin:
vim-id: ff181e6d-2597-4244-b40b-bb0174bdfeb6
vnfd-id: 7637bcf8-cf14-42dc-ad70-c66fcf1e6e77
vnfd-ref: hackfest3charmed-vnf
+- _admin:
+ created: 1575034637.009597
+ modified: 1575034637.009597
+ nsState: NOT_INSTANTIATED
+ projects_read:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ projects_write:
+ - 25b5aebf-3da1-49ed-99de-1d2b4a86d6e4
+ _id: 5ac34899-a23a-4b3c-918a-cd77acadbea6
+ additionalParamsForVnf: null
+ connection-point:
+ - connection-point-id: null
+ id: null
+ name: mgmt
+ created-time: 1575034636.9990137
+ id: 5ac34899-a23a-4b3c-918a-cd77acadbea6
+ ip-address: null
+ k8s-cluster:
+ nets:
+ - external-connection-point-ref: mgmt
+ id: mgmtnet
+ ns-vld-id: mgmtnet
+ vim_net: internal
+ kdur:
+ - ip-address: null
+ k8s-cluster:
+ id: e7169dab-f71a-4f1f-b82b-432605e8c4b3
+ kdu-name: ldap
+ helm-chart: stable/openldap:1.2.1
+ - ip-address: null
+ k8s-cluster:
+ id: e7169dab-f71a-4f1f-b82b-432605e8c4b3
+ kdu-name: mongo
+ helm-chart: stable/mongodb
+ member-vnf-index-ref: multikdu
+ nsr-id-ref: 0bcb701c-ee4d-41ab-8ee6-f4156f7f114d
+ vdur: []
+ vim-account-id: 74337dcb-ef54-41e7-bd2d-8c0d7fcd326f
+ vnfd-id: 7ab0d10d-8ce2-4c68-aef6-cc5a437a9c62
+ vnfd-ref: multikdu_knf
"""
+
db_nslcmops_scale_text = """
---
- _admin:
def _return_uuid(self, *args, **kwargs):
return str(uuid4())
+ @patch("osm_lcm.ns.N2VCJujuConnector")
@patch("osm_lcm.ns.K8sHelmConnector")
- async def setUp(self, k8s_mock):
+ async def setUp(self, k8s_mock, n2vc_mock):
# Mock DB
if not getenv("OSMLCMTEST_DB_NOMOCK"):
self.db = DbMemory()
self.db.create_list("nsds", yaml.load(descriptors.db_nsds_text, Loader=yaml.Loader))
self.db.create_list("nsrs", yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader))
self.db.create_list("vim_accounts", yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader))
+ self.db.create_list("k8sclusters", yaml.load(descriptors.db_k8sclusters_text, Loader=yaml.Loader))
self.db.create_list("nslcmops", yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader))
self.db.create_list("vnfrs", yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader))
- self.db.set_one = asynctest.Mock()
self.db_vim_accounts = yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader)
self.my_ns.RO.create_action = asynctest.CoroutineMock(self.my_ns.RO.create_action,
return_value={"vm-id": {"vim_result": 200,
"description": "done"}})
+ # self.my_ns.wait_vm_up_insert_key_ro = asynctest.CoroutineMock(return_value="ip-address")
@asynctest.fail_on(active_handles=True) # all async tasks must be completed
async def test_instantiate(self):
+ self.db.set_one = asynctest.Mock()
nsr_id = self.db.get_list("nsrs")[0]["_id"]
nslcmop_id = self.db.get_list("nslcmops")[0]["_id"]
- print("Test instantiate started")
+ # print("Test instantiate started")
# delete deployed information of database
if not getenv("OSMLCMTEST_DB_NOMOCK"):
await self.my_ns.instantiate(nsr_id, nslcmop_id)
- print("instantiate_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
+ # print("instantiate_result: {}".format(self.db.get_one("nslcmops",
+ # {"_id": nslcmop_id}).get("detailed-status")))
self.msg.aiowrite.assert_called_once_with("ns", "instantiated",
{"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
# Test _update_suboperation_status()
def test_scale_update_suboperation_status(self):
+ self.db.set_one = asynctest.Mock()
db_nslcmop = self.db.get_list('nslcmops')[0]
op_index = 0
# Force the initial values to be distinct from the updated ones
db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
self.assertEqual(op_index_skip_RO, self.my_ns.SUBOPERATION_STATUS_SKIP)
+ async def test_deploy_kdus(self):
+ db_nsr = self.db.get_list("nsrs")[1]
+ db_vnfr = self.db.get_list("vnfrs")[2]
+ db_vnfrs = {"multikdu": db_vnfr}
+ nsr_id = db_nsr["_id"]
+ # nslcmop_id = self.db.get_list("nslcmops")[1]["_id"]
+ logging_text = "KDU"
+ self.my_ns.k8sclusterhelm.install = asynctest.CoroutineMock(return_value="k8s_id")
+ await self.my_ns.deploy_kdus(logging_text, nsr_id, db_nsr, db_vnfrs)
+ db_nsr = self.db.get_list("nsrs")[1]
+ self.assertIn("K8s", db_nsr["_admin"]["deployed"], "K8s entry not created at '_admin.deployed'")
+ self.assertIsInstance(db_nsr["_admin"]["deployed"]["K8s"], list, "K8s entry is not of type list")
+ self.assertEqual(len(db_nsr["_admin"]["deployed"]["K8s"]), 2, "K8s entry is not of type list")
+ k8s_instace_info = {"kdu-instance": "k8s_id", "k8scluster-uuid": "73d96432-d692-40d2-8440-e0c73aee209c",
+ "k8scluster-type": "chart",
+ "kdu-name": "ldap", "kdu-model": "stable/openldap:1.2.1"}
+
+ self.assertEqual(db_nsr["_admin"]["deployed"]["K8s"][0], k8s_instace_info)
+ k8s_instace_info["kdu-name"] = "mongo"
+ k8s_instace_info["kdu-model"] = "stable/mongodb"
+ self.assertEqual(db_nsr["_admin"]["deployed"]["K8s"][1], k8s_instace_info)
+
if __name__ == '__main__':
asynctest.main()
import logging
import logging.handlers
from osm_lcm import ROclient
-from osm_lcm.lcm_utils import LcmException, LcmBase
+from osm_lcm.lcm_utils import LcmException, LcmBase, deep_get
from n2vc.k8s_helm_conn import K8sHelmConnector
from osm_common.dbbase import DbException
from copy import deepcopy
db_k8scluster = self.db.get_one("k8sclusters", {"_id": k8scluster_id})
self.db.encrypt_decrypt_fields(db_k8scluster.get("credentials"), 'decrypt', ['password', 'secret'],
schema_version=db_k8scluster["schema_version"], salt=db_k8scluster["_id"])
- print(db_k8scluster.get("credentials"))
- print("\n\n\n FIN CREDENTIALS")
- print(yaml.safe_dump(db_k8scluster.get("credentials")))
- print("\n\n\n FIN OUTPUT")
- cluster_uuid, uninstall_sw = await self.k8scluster.init_env(yaml.safe_dump(db_k8scluster.
- get("credentials")))
- db_k8scluster_update["cluster-uuid"] = cluster_uuid
- if uninstall_sw:
- db_k8scluster_update["uninstall-sw"] = uninstall_sw
+ # print(db_k8scluster.get("credentials"))
+ # print("\n\n\n FIN CREDENTIALS")
+ # print(yaml.safe_dump(db_k8scluster.get("credentials")))
+ # print("\n\n\n FIN OUTPUT")
+ k8s_hc_id, uninstall_sw = await self.k8scluster.init_env(yaml.safe_dump(db_k8scluster.get("credentials")))
+ db_k8scluster_update["_admin.helm-chart.id"] = k8s_hc_id
+ db_k8scluster_update["_admin.helm-chart.created"] = uninstall_sw
step = "Getting the list of repos"
self.logger.debug(logging_text + step)
task_list = []
db_k8srepo_list = self.db.get_list("k8srepos", {})
for repo in db_k8srepo_list:
- step = "Adding repo {} to cluster: {}".format(repo["name"], cluster_uuid)
+ step = "Adding repo {} to cluster: {}".format(repo["name"], k8s_hc_id)
self.logger.debug(logging_text + step)
- task = asyncio.ensure_future(self.k8scluster.repo_add(cluster_uuid=cluster_uuid,
+ task = asyncio.ensure_future(self.k8scluster.repo_add(cluster_uuid=k8s_hc_id,
name=repo["name"], url=repo["url"],
repo_type="chart"))
task_list.append(task)
if not repo["_admin"].get("cluster-inserted"):
repo["_admin"]["cluster-inserted"] = []
- repo["_admin"]["cluster-inserted"].append(cluster_uuid)
+ repo["_admin"]["cluster-inserted"].append(k8s_hc_id)
self.update_db_2("k8srepos", repo["_id"], repo)
done = None
step = "Getting k8scluster='{}' from db".format(k8scluster_id)
self.logger.debug(logging_text + step)
db_k8scluster = self.db.get_one("k8sclusters", {"_id": k8scluster_id})
- uninstall_sw = db_k8scluster.get("uninstall-sw")
- if uninstall_sw is False or uninstall_sw is None:
- uninstall_sw = False
- cluster_removed = await self.k8scluster.reset(cluster_uuid=db_k8scluster.get("cluster-uuid"),
- uninstall_sw=uninstall_sw)
+ k8s_hc_id = deep_get(db_k8scluster, ("_admin", "helm-chart", "id"))
+ uninstall_sw = deep_get(db_k8scluster, ("_admin", "helm-chart", "created"))
+ cluster_removed = True
+ if k8s_hc_id:
+ uninstall_sw = uninstall_sw or False
+ cluster_removed = await self.k8scluster.reset(cluster_uuid=k8s_hc_id, uninstall_sw=uninstall_sw)
if cluster_removed:
step = "Removing k8scluster='{}' from db".format(k8scluster_id)
# under the License.
##
-pyyaml==3.*
+pyyaml
aiohttp==0.20.2
jinja2
git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common
],
install_requires=[
# 'pymongo',
- 'PyYAML>=3.*',
+ 'PyYAML',
'aiohttp==0.20.2',
'osm-common',
'n2vc',