self.vca_config['public_key'] = self.vca_config['pubkey']
if 'cacert' in self.vca_config:
self.vca_config['ca_cert'] = self.vca_config['cacert']
+ if 'apiproxy' in self.vca_config:
+ self.vca_config['api_proxy'] = self.vca_config['apiproxy']
# create N2VC connector
self.n2vc = N2VCJujuConnector(
url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
username=self.vca_config.get('user', None),
vca_config=self.vca_config,
- on_update_db=self._on_update_n2vc_db
- # TODO
- # New N2VC argument
- # api_proxy=vca_config.get('apiproxy')
+ on_update_db=self._on_update_n2vc_db,
+ # ca_cert=self.vca_config.get('cacert'),
+ # api_proxy=self.vca_config.get('apiproxy'),
)
self.k8sclusterhelm = K8sHelmConnector(
if is_proxy_charm:
step = "create execution environment"
self.logger.debug(logging_text + step)
- ee_id, credentials = await self.n2vc.create_execution_environment(
- namespace=namespace,
- reuse_ee_id=ee_id,
- db_dict=db_dict
- )
-
+ ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace,
+ reuse_ee_id=ee_id,
+ db_dict=db_dict)
else:
- step = "register execution environment"
- # TODO wait until deployed by RO, when IP address has been filled. By pooling????
- credentials = {} # TODO db_credentials["ip_address"]
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+ user=None, pub_key=None)
+ credentials = {"hostname": rw_mgmt_ip}
# get username
+ username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
# TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
# merged. Meanwhile let's get username from initial-config-primitive
- if config_descriptor.get("initial-config-primitive"):
- for param in config_descriptor["initial-config-primitive"][0].get("parameter", ()):
- if param["name"] == "ssh-username":
- credentials["username"] = param["value"]
- if config_descriptor.get("config-access") and config_descriptor["config-access"].get("ssh-access"):
- if config_descriptor["config-access"]["ssh-access"].get("required"):
- credentials["username"] = \
- config_descriptor["config-access"]["ssh-access"].get("default-user")
-
+ if not username and config_descriptor.get("initial-config-primitive"):
+ for config_primitive in config_descriptor["initial-config-primitive"]:
+ for param in config_primitive.get("parameter", ()):
+ if param["name"] == "ssh-username":
+ username = param["value"]
+ break
+ if not username:
+ raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with "
+ "'config-access.ssh-access.default-user'")
+ credentials["username"] = username
# n2vc_redesign STEP 3.2
+
+ step = "register execution environment {}".format(credentials)
self.logger.debug(logging_text + step)
- ee_id = await self.n2vc.register_execution_environment(
- credentials=credentials,
- namespace=namespace,
- db_dict=db_dict
- )
+ ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace,
+ db_dict=db_dict)
# for compatibility with MON/POL modules, the need model and application name at database
# TODO ask to N2VC instead of assuming the format "model_name.application_name"
db_update_entry + "ee_id": ee_id})
# n2vc_redesign STEP 3.3
- # TODO check if already done
+
step = "Install configuration Software"
+ # TODO check if already done
self.logger.debug(logging_text + step)
- await self.n2vc.install_configuration_sw(
- ee_id=ee_id,
- artifact_path=artifact_path,
- db_dict=db_dict
- )
+ await self.n2vc.install_configuration_sw(ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict)
# if SSH access is required, then get execution environment SSH public
- required = deep_get(config_descriptor, ("config-access", "ssh-access", "required"))
- pub_key = None
- user = None
- if is_proxy_charm and required:
- user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
- step = "Install configuration Software, getting public ssh key"
- pub_key = await self.n2vc.get_ee_ssh_public__key(
- ee_id=ee_id,
- db_dict=db_dict
- )
+ if is_proxy_charm: # if native charm we have waited already to VM be UP
+ pub_key = None
+ user = None
+ if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
+ # Needed to inject a ssh key
+ user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
+ step = "Install configuration Software, getting public ssh key"
+ pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
+
+ step = "Insert public key into VM"
+ else:
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
- step = "Insert public key into VM"
- else:
- step = "Waiting to VM being up and getting IP address"
- self.logger.debug(logging_text + step)
+ # n2vc_redesign STEP 5.1
+ # wait for RO (ip-address) Insert pub_key into VM
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+ user=user, pub_key=pub_key)
- # n2vc_redesign STEP 5.1
- # wait for RO (ip-address) Insert pub_key into VM
- rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
- logging_text=logging_text,
- nsr_id=nsr_id,
- vnfr_id=vnfr_id,
- vdu_id=vdu_id,
- vdu_index=vdu_index,
- user=user,
- pub_key=pub_key
- )
- self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
+ self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
# store rw_mgmt_ip in deploy params for later replacement
deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
deploy_params["ns_config_info"] = self._get_ns_config_info(vca_deployed_list)
# TODO check if already done
primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
+
step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
self.logger.debug(logging_text + step)
await self.n2vc.exec_primitive(
task_instantiation_list.append(task_kdu)
# n2vc_redesign STEP 1 Get VCA public ssh-key
# feature 1429. Add n2vc public key to needed VMs
- n2vc_key = await self.n2vc.get_public_key()
+ n2vc_key = self.n2vc.get_public_key()
n2vc_key_list = [n2vc_key]
if self.vca_config.get("public_key"):
n2vc_key_list.append(self.vca_config["public_key"])
# import logging
from os import getenv
from osm_lcm.ns import NsLcm
-from osm_common.dbmongo import DbMongo
+from osm_common.dbmemory import DbMemory
from osm_common.msgkafka import MsgKafka
from osm_common.fslocal import FsLocal
from osm_lcm.lcm_utils import TaskRegistry
from n2vc.vnf import N2VC
+# from n2vc.k8s_helm_conn import K8sHelmConnector
from uuid import uuid4
+from asynctest.mock import patch
from osm_lcm.tests import test_db_descriptors as descriptors
class TestMyNS(asynctest.TestCase):
- def _db_get_one(self, table, q_filter=None, fail_on_empty=True, fail_on_more=True):
- if table not in self.db_content:
- self.assertTrue(False, "db.get_one called with table={}".format(table))
- for db_item in self.db_content[table]:
- if db_item["_id"] == q_filter["_id"]:
- return db_item
- else:
- self.assertTrue(False, "db.get_one, table={}, not found _id={}".format(table, q_filter["_id"]))
-
- def _db_get_list(self, table, q_filter=None):
- if table not in self.db_content:
- self.assertTrue(False, "db.get_list called with table={} not found".format(table))
- return self.db_content[table]
-
- def _db_set_one(self, table, q_filter, update_dict, fail_on_empty=True, unset=None, pull=None, push=None):
- db_item = self._db_get_one(table, q_filter, fail_on_empty=fail_on_empty)
- for k, v in update_dict.items():
- db_nested = db_item
- k_list = k.split(".")
- for k_nested in k_list[0:-1]:
- if isinstance(db_nested, list):
- db_nested = db_nested[int(k_nested)]
- else:
- if k_nested not in db_nested:
- db_nested[k_nested] = {}
- db_nested = db_nested[k_nested]
- k_nested = k_list[-1]
- if isinstance(db_nested, list):
- if int(k_nested) < len(db_nested):
- db_nested[int(k_nested)] = v
- else:
- db_nested.insert(int(k_nested), v)
- else:
- db_nested[k_nested] = v
-
async def _n2vc_DeployCharms(self, model_name, application_name, vnfd, charm_path, params={}, machine_spec={},
callback=None, *callback_args):
if callback:
def _return_uuid(self, *args, **kwargs):
return str(uuid4())
- async def setUp(self):
+ @patch("osm_lcm.ns.K8sHelmConnector")
+ async def setUp(self, k8s_mock):
# Mock DB
if not getenv("OSMLCMTEST_DB_NOMOCK"):
- self.db = asynctest.Mock(DbMongo())
- self.db.get_one.side_effect = self._db_get_one
- self.db.get_list.side_effect = self._db_get_list
- self.db.set_one.side_effect = self._db_set_one
- self.db_content = {
- "nsrs": yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader),
- "nslcmops": yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader),
- "vnfrs": yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader),
- "vnfds": yaml.load(descriptors.db_vnfds_text, Loader=yaml.Loader),
- "vim_accounts": yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader),
- }
+ self.db = DbMemory()
+ self.db.create_list("vnfds", yaml.load(descriptors.db_vnfds_text, Loader=yaml.Loader))
+ self.db.create_list("nsds", yaml.load(descriptors.db_nsds_text, Loader=yaml.Loader))
+ self.db.create_list("nsrs", yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader))
+ self.db.create_list("vim_accounts", yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader))
+ self.db.create_list("nslcmops", yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader))
+ self.db.create_list("vnfrs", yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader))
+ self.db.set_one = asynctest.Mock()
+
self.db_vim_accounts = yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader)
# Mock kafka
self.my_ns.n2vc.get_public_key = asynctest.CoroutineMock(
return_value=getenv("OSMLCM_VCA_PUBKEY", "public_key"))
+ # # Mock VCA - K8s
+ # if not getenv("OSMLCMTEST_VCA_K8s_NOMOCK"):
+ # pub_key = getenv("OSMLCMTEST_NS_PUBKEY", "ssh-rsa test-pub-key t@osm.com")
+ # self.my_ns.k8sclusterhelm = asynctest.Mock(K8sHelmConnector())
+
# Mock RO
if not getenv("OSMLCMTEST_RO_NOMOCK"):
# self.my_ns.RO = asynctest.Mock(ROclient.ROClient(self.loop, **ro_config))
@asynctest.fail_on(active_handles=True) # all async tasks must be completed
async def test_instantiate(self):
- nsr_id = self.db_content["nsrs"][0]["_id"]
- nslcmop_id = self.db_content["nslcmops"][0]["_id"]
+ nsr_id = self.db.get_list("nsrs")[0]["_id"]
+ nslcmop_id = self.db.get_list("nslcmops")[0]["_id"]
print("Test instantiate started")
# delete deployed information of database
if not getenv("OSMLCMTEST_DB_NOMOCK"):
- if self.db_content["nsrs"][0]["_admin"].get("deployed"):
- del self.db_content["nsrs"][0]["_admin"]["deployed"]
- for db_vnfr in self.db_content["vnfrs"]:
+ if self.db.get_list("nsrs")[0]["_admin"].get("deployed"):
+ del self.db.get_list("nsrs")[0]["_admin"]["deployed"]
+ for db_vnfr in self.db.get_list("vnfrs"):
db_vnfr.pop("ip_address", None)
for db_vdur in db_vnfr["vdur"]:
db_vdur.pop("ip_address", None)
db_vdur.pop("mac_address", None)
if getenv("OSMLCMTEST_RO_VIMID"):
- self.db_content["vim_accounts"][0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
+ self.db.get_list("vim_accounts")[0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
if getenv("OSMLCMTEST_RO_VIMID"):
- self.db_content["nsrs"][0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
+ self.db.get_list("nsrs")[0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
await self.my_ns.instantiate(nsr_id, nslcmop_id)
- print("instantiate_result: {}".format(self._db_get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
+ print("instantiate_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
self.msg.aiowrite.assert_called_once_with("ns", "instantiated",
{"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
# TODO add a terminate
def test_ns_params_2_RO(self):
- vim = self._db_get_list("vim_accounts")[0]
+ vim = self.db.get_list("vim_accounts")[0]
vim_id = vim["_id"]
ro_vim_id = vim["_admin"]["deployed"]["RO"]
ns_params = {"vimAccountId": vim_id}
# scale-out/scale-in operations with success/error result
# Test scale() with missing 'scaleVnfData', should return operationState = 'FAILED'
- nsr_id = self.db_content["nsrs"][0]["_id"]
- nslcmop_id = self.db_content["nslcmops"][0]["_id"]
+ nsr_id = self.db.get_list("nsrs")[0]["_id"]
+ nslcmop_id = self.db.get_list("nslcmops")[0]["_id"]
await self.my_ns.scale(nsr_id, nslcmop_id)
expected_value = 'FAILED'
- return_value = self._db_get_one("nslcmops", {"_id": nslcmop_id}).get("operationState")
+ return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("operationState")
self.assertEqual(return_value, expected_value)
- # print("scale_result: {}".format(self._db_get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
+ # print("scale_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
# Test _reintent_or_skip_suboperation()
# Expected result:
# - if marked as anything but 'COMPLETED', the suboperation index is expected
def test_scale_reintent_or_skip_suboperation(self):
# Load an alternative 'nslcmops' YAML for this test
- self.db_content['nslcmops'] = yaml.load(descriptors.db_nslcmops_scale_text, Loader=yaml.Loader)
- db_nslcmop = self.db_content['nslcmops'][0]
+ db_nslcmop = self.db.get_list('nslcmops')[0]
op_index = 2
# Test when 'operationState' is 'COMPLETED'
db_nslcmop['_admin']['operations'][op_index]['operationState'] = 'COMPLETED'
# Expected result: index of the found sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if not found
def test_scale_find_suboperation(self):
# Load an alternative 'nslcmops' YAML for this test
- self.db_content['nslcmops'] = yaml.load(descriptors.db_nslcmops_scale_text, Loader=yaml.Loader)
- db_nslcmop = self.db_content['nslcmops'][0]
+ db_nslcmop = self.db.get_list('nslcmops')[0]
# Find this sub-operation
op_index = 2
vnf_index = db_nslcmop['_admin']['operations'][op_index]['member_vnf_index']
# Test _update_suboperation_status()
def test_scale_update_suboperation_status(self):
- db_nslcmop = self.db_content['nslcmops'][0]
+ db_nslcmop = self.db.get_list('nslcmops')[0]
op_index = 0
# Force the initial values to be distinct from the updated ones
- db_nslcmop['_admin']['operations'][op_index]['operationState'] = 'PROCESSING'
- db_nslcmop['_admin']['operations'][op_index]['detailed-status'] = 'In progress'
+ q_filter = {"_id": db_nslcmop["_id"]}
# Test to change 'operationState' and 'detailed-status'
operationState = 'COMPLETED'
detailed_status = 'Done'
- self.my_ns._update_suboperation_status(
- db_nslcmop, op_index, operationState, detailed_status)
- operationState_new = db_nslcmop['_admin']['operations'][op_index]['operationState']
- detailed_status_new = db_nslcmop['_admin']['operations'][op_index]['detailed-status']
- # print("DEBUG: operationState_new={}, detailed_status_new={}".format(operationState_new, detailed_status_new))
- self.assertEqual(operationState, operationState_new)
- self.assertEqual(detailed_status, detailed_status_new)
-
- # Test _add_suboperation()
+ expected_update_dict = {'_admin.operations.0.operationState': operationState,
+ '_admin.operations.0.detailed-status': detailed_status,
+ }
+ self.my_ns._update_suboperation_status(db_nslcmop, op_index, operationState, detailed_status)
+ self.db.set_one.assert_called_once_with("nslcmops", q_filter=q_filter, update_dict=expected_update_dict,
+ fail_on_empty=False)
+
def test_scale_add_suboperation(self):
- db_nslcmop = self.db_content['nslcmops'][0]
+ db_nslcmop = self.db.get_list('nslcmops')[0]
vnf_index = '1'
num_ops_before = len(db_nslcmop.get('_admin', {}).get('operations', [])) - 1
vdu_id = None
# - op_index (non-negative number): This is an existing sub-operation, operationState != 'COMPLETED'
# - SUBOPERATION_STATUS_SKIP: This is an existing sub-operation, operationState == 'COMPLETED'
def test_scale_check_or_add_scale_suboperation(self):
- db_nslcmop = self.db_content['nslcmops'][0]
+ db_nslcmop = self.db.get_list('nslcmops')[0]
operationType = 'PRE-SCALE'
vnf_index = '1'
primitive = 'touch'