Fix vca_config parameter for N2VC. 47/8247/3
authortierno <alfonso.tiernosepulveda@telefonica.com>
Wed, 27 Nov 2019 15:46:57 +0000 (15:46 +0000)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Thu, 28 Nov 2019 10:44:51 +0000 (10:44 +0000)
Fix unittests.
Provide hostname/username for native charms

Change-Id: I19f740a4053b9f1aa738f373450a907e7e7d9078
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
devops-stages/stage-test.sh
osm_lcm/ns.py
osm_lcm/tests/test_ns.py
tox.ini

index 56433dc..e637200 100755 (executable)
@@ -14,5 +14,4 @@
 # under the License.
 ##
 
-tox -e flake8
-# tox -e unittest
+tox   # flake8 unittest
index c448823..a5a6730 100644 (file)
@@ -114,6 +114,8 @@ class NsLcm(LcmBase):
             self.vca_config['public_key'] = self.vca_config['pubkey']
         if 'cacert' in self.vca_config:
             self.vca_config['ca_cert'] = self.vca_config['cacert']
+        if 'apiproxy' in self.vca_config:
+            self.vca_config['api_proxy'] = self.vca_config['apiproxy']
 
         # create N2VC connector
         self.n2vc = N2VCJujuConnector(
@@ -124,10 +126,9 @@ class NsLcm(LcmBase):
             url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
             username=self.vca_config.get('user', None),
             vca_config=self.vca_config,
-            on_update_db=self._on_update_n2vc_db
-            # TODO
-            # New N2VC argument
-            # api_proxy=vca_config.get('apiproxy')
+            on_update_db=self._on_update_n2vc_db,
+            # ca_cert=self.vca_config.get('cacert'),
+            # api_proxy=self.vca_config.get('apiproxy'),
         )
 
         self.k8sclusterhelm = K8sHelmConnector(
@@ -1001,35 +1002,35 @@ class NsLcm(LcmBase):
             if is_proxy_charm:
                 step = "create execution environment"
                 self.logger.debug(logging_text + step)
-                ee_id, credentials = await self.n2vc.create_execution_environment(
-                    namespace=namespace,
-                    reuse_ee_id=ee_id,
-                    db_dict=db_dict
-                )
-
+                ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace,
+                                                                                  reuse_ee_id=ee_id,
+                                                                                  db_dict=db_dict)
             else:
-                step = "register execution environment"
-                # TODO wait until deployed by RO, when IP address has been filled. By pooling????
-                credentials = {}   # TODO db_credentials["ip_address"]
+                step = "Waiting to VM being up and getting IP address"
+                self.logger.debug(logging_text + step)
+                rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+                                                                 user=None, pub_key=None)
+                credentials = {"hostname": rw_mgmt_ip}
                 # get username
+                username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
                 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
                 #  merged. Meanwhile let's get username from initial-config-primitive
-                if config_descriptor.get("initial-config-primitive"):
-                    for param in config_descriptor["initial-config-primitive"][0].get("parameter", ()):
-                        if param["name"] == "ssh-username":
-                            credentials["username"] = param["value"]
-                if config_descriptor.get("config-access") and config_descriptor["config-access"].get("ssh-access"):
-                    if config_descriptor["config-access"]["ssh-access"].get("required"):
-                        credentials["username"] = \
-                            config_descriptor["config-access"]["ssh-access"].get("default-user")
-
+                if not username and config_descriptor.get("initial-config-primitive"):
+                    for config_primitive in config_descriptor["initial-config-primitive"]:
+                        for param in config_primitive.get("parameter", ()):
+                            if param["name"] == "ssh-username":
+                                username = param["value"]
+                                break
+                if not username:
+                    raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with "
+                                       "'config-access.ssh-access.default-user'")
+                credentials["username"] = username
                 # n2vc_redesign STEP 3.2
+
+                step = "register execution environment {}".format(credentials)
                 self.logger.debug(logging_text + step)
-                ee_id = await self.n2vc.register_execution_environment(
-                    credentials=credentials,
-                    namespace=namespace,
-                    db_dict=db_dict
-                )
+                ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace,
+                                                                       db_dict=db_dict)
 
             # for compatibility with MON/POL modules, the need model and application name at database
             # TODO ask to N2VC instead of assuming the format "model_name.application_name"
@@ -1041,44 +1042,33 @@ class NsLcm(LcmBase):
                                               db_update_entry + "ee_id": ee_id})
 
             # n2vc_redesign STEP 3.3
-            # TODO check if already done
+
             step = "Install configuration Software"
+            # TODO check if already done
             self.logger.debug(logging_text + step)
-            await self.n2vc.install_configuration_sw(
-                ee_id=ee_id,
-                artifact_path=artifact_path,
-                db_dict=db_dict
-            )
+            await self.n2vc.install_configuration_sw(ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict)
 
             # if SSH access is required, then get execution environment SSH public
-            required = deep_get(config_descriptor, ("config-access", "ssh-access", "required"))
-            pub_key = None
-            user = None
-            if is_proxy_charm and required:
-                user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
-                step = "Install configuration Software, getting public ssh key"
-                pub_key = await self.n2vc.get_ee_ssh_public__key(
-                    ee_id=ee_id,
-                    db_dict=db_dict
-                )
+            if is_proxy_charm:  # if native charm we have waited already to VM be UP
+                pub_key = None
+                user = None
+                if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
+                    # Needed to inject a ssh key
+                    user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
+                    step = "Install configuration Software, getting public ssh key"
+                    pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
+
+                    step = "Insert public key into VM"
+                else:
+                    step = "Waiting to VM being up and getting IP address"
+                self.logger.debug(logging_text + step)
 
-                step = "Insert public key into VM"
-            else:
-                step = "Waiting to VM being up and getting IP address"
-            self.logger.debug(logging_text + step)
+                # n2vc_redesign STEP 5.1
+                # wait for RO (ip-address) Insert pub_key into VM
+                rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+                                                                 user=user, pub_key=pub_key)
 
-            # n2vc_redesign STEP 5.1
-            # wait for RO (ip-address) Insert pub_key into VM
-            rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
-                logging_text=logging_text,
-                nsr_id=nsr_id,
-                vnfr_id=vnfr_id,
-                vdu_id=vdu_id,
-                vdu_index=vdu_index,
-                user=user,
-                pub_key=pub_key
-            )
-            self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
+                self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
 
             # store rw_mgmt_ip in deploy params for later replacement
             deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
@@ -1103,6 +1093,7 @@ class NsLcm(LcmBase):
                     deploy_params["ns_config_info"] = self._get_ns_config_info(vca_deployed_list)
                 # TODO check if already done
                 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
+
                 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
                 self.logger.debug(logging_text + step)
                 await self.n2vc.exec_primitive(
@@ -1248,7 +1239,7 @@ class NsLcm(LcmBase):
             task_instantiation_list.append(task_kdu)
             # n2vc_redesign STEP 1 Get VCA public ssh-key
             # feature 1429. Add n2vc public key to needed VMs
-            n2vc_key = await self.n2vc.get_public_key()
+            n2vc_key = self.n2vc.get_public_key()
             n2vc_key_list = [n2vc_key]
             if self.vca_config.get("public_key"):
                 n2vc_key_list.append(self.vca_config["public_key"])
index 219724c..9ad899d 100644 (file)
@@ -22,12 +22,14 @@ import yaml
 # import logging
 from os import getenv
 from osm_lcm.ns import NsLcm
-from osm_common.dbmongo import DbMongo
+from osm_common.dbmemory import DbMemory
 from osm_common.msgkafka import MsgKafka
 from osm_common.fslocal import FsLocal
 from osm_lcm.lcm_utils import TaskRegistry
 from n2vc.vnf import N2VC
+# from n2vc.k8s_helm_conn import K8sHelmConnector
 from uuid import uuid4
+from asynctest.mock import patch
 
 from osm_lcm.tests import test_db_descriptors as descriptors
 
@@ -69,41 +71,6 @@ ro_config = {
 
 class TestMyNS(asynctest.TestCase):
 
-    def _db_get_one(self, table, q_filter=None, fail_on_empty=True, fail_on_more=True):
-        if table not in self.db_content:
-            self.assertTrue(False, "db.get_one called with table={}".format(table))
-        for db_item in self.db_content[table]:
-            if db_item["_id"] == q_filter["_id"]:
-                return db_item
-        else:
-            self.assertTrue(False, "db.get_one, table={}, not found _id={}".format(table, q_filter["_id"]))
-
-    def _db_get_list(self, table, q_filter=None):
-        if table not in self.db_content:
-            self.assertTrue(False, "db.get_list called with table={} not found".format(table))
-        return self.db_content[table]
-
-    def _db_set_one(self, table, q_filter, update_dict, fail_on_empty=True, unset=None, pull=None, push=None):
-        db_item = self._db_get_one(table, q_filter, fail_on_empty=fail_on_empty)
-        for k, v in update_dict.items():
-            db_nested = db_item
-            k_list = k.split(".")
-            for k_nested in k_list[0:-1]:
-                if isinstance(db_nested, list):
-                    db_nested = db_nested[int(k_nested)]
-                else:
-                    if k_nested not in db_nested:
-                        db_nested[k_nested] = {}
-                    db_nested = db_nested[k_nested]
-            k_nested = k_list[-1]
-            if isinstance(db_nested, list):
-                if int(k_nested) < len(db_nested):
-                    db_nested[int(k_nested)] = v
-                else:
-                    db_nested.insert(int(k_nested), v)
-            else:
-                db_nested[k_nested] = v
-
     async def _n2vc_DeployCharms(self, model_name, application_name, vnfd, charm_path, params={}, machine_spec={},
                                  callback=None, *callback_args):
         if callback:
@@ -165,20 +132,19 @@ class TestMyNS(asynctest.TestCase):
     def _return_uuid(self, *args, **kwargs):
         return str(uuid4())
 
-    async def setUp(self):
+    @patch("osm_lcm.ns.K8sHelmConnector")
+    async def setUp(self, k8s_mock):
         # Mock DB
         if not getenv("OSMLCMTEST_DB_NOMOCK"):
-            self.db = asynctest.Mock(DbMongo())
-            self.db.get_one.side_effect = self._db_get_one
-            self.db.get_list.side_effect = self._db_get_list
-            self.db.set_one.side_effect = self._db_set_one
-            self.db_content = {
-                "nsrs": yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader),
-                "nslcmops": yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader),
-                "vnfrs": yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader),
-                "vnfds": yaml.load(descriptors.db_vnfds_text, Loader=yaml.Loader),
-                "vim_accounts": yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader),
-            }
+            self.db = DbMemory()
+            self.db.create_list("vnfds", yaml.load(descriptors.db_vnfds_text, Loader=yaml.Loader))
+            self.db.create_list("nsds", yaml.load(descriptors.db_nsds_text, Loader=yaml.Loader))
+            self.db.create_list("nsrs", yaml.load(descriptors.db_nsrs_text, Loader=yaml.Loader))
+            self.db.create_list("vim_accounts", yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader))
+            self.db.create_list("nslcmops", yaml.load(descriptors.db_nslcmops_text, Loader=yaml.Loader))
+            self.db.create_list("vnfrs", yaml.load(descriptors.db_vnfrs_text, Loader=yaml.Loader))
+            self.db.set_one = asynctest.Mock()
+
             self.db_vim_accounts = yaml.load(descriptors.db_vim_accounts_text, Loader=yaml.Loader)
 
         # Mock kafka
@@ -224,6 +190,11 @@ class TestMyNS(asynctest.TestCase):
             self.my_ns.n2vc.get_public_key = asynctest.CoroutineMock(
                 return_value=getenv("OSMLCM_VCA_PUBKEY", "public_key"))
 
+        # # Mock VCA - K8s
+        # if not getenv("OSMLCMTEST_VCA_K8s_NOMOCK"):
+        #     pub_key = getenv("OSMLCMTEST_NS_PUBKEY", "ssh-rsa test-pub-key t@osm.com")
+        #     self.my_ns.k8sclusterhelm = asynctest.Mock(K8sHelmConnector())
+
         # Mock RO
         if not getenv("OSMLCMTEST_RO_NOMOCK"):
             # self.my_ns.RO = asynctest.Mock(ROclient.ROClient(self.loop, **ro_config))
@@ -237,27 +208,27 @@ class TestMyNS(asynctest.TestCase):
 
     @asynctest.fail_on(active_handles=True)   # all async tasks must be completed
     async def test_instantiate(self):
-        nsr_id = self.db_content["nsrs"][0]["_id"]
-        nslcmop_id = self.db_content["nslcmops"][0]["_id"]
+        nsr_id = self.db.get_list("nsrs")[0]["_id"]
+        nslcmop_id = self.db.get_list("nslcmops")[0]["_id"]
         print("Test instantiate started")
 
         # delete deployed information of database
         if not getenv("OSMLCMTEST_DB_NOMOCK"):
-            if self.db_content["nsrs"][0]["_admin"].get("deployed"):
-                del self.db_content["nsrs"][0]["_admin"]["deployed"]
-            for db_vnfr in self.db_content["vnfrs"]:
+            if self.db.get_list("nsrs")[0]["_admin"].get("deployed"):
+                del self.db.get_list("nsrs")[0]["_admin"]["deployed"]
+            for db_vnfr in self.db.get_list("vnfrs"):
                 db_vnfr.pop("ip_address", None)
                 for db_vdur in db_vnfr["vdur"]:
                     db_vdur.pop("ip_address", None)
                     db_vdur.pop("mac_address", None)
             if getenv("OSMLCMTEST_RO_VIMID"):
-                self.db_content["vim_accounts"][0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
+                self.db.get_list("vim_accounts")[0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
             if getenv("OSMLCMTEST_RO_VIMID"):
-                self.db_content["nsrs"][0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
+                self.db.get_list("nsrs")[0]["_admin"]["deployed"]["RO"] = getenv("OSMLCMTEST_RO_VIMID")
 
         await self.my_ns.instantiate(nsr_id, nslcmop_id)
 
-        print("instantiate_result: {}".format(self._db_get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
+        print("instantiate_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
 
         self.msg.aiowrite.assert_called_once_with("ns", "instantiated",
                                                   {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
@@ -276,7 +247,7 @@ class TestMyNS(asynctest.TestCase):
         # TODO add a terminate
 
     def test_ns_params_2_RO(self):
-        vim = self._db_get_list("vim_accounts")[0]
+        vim = self.db.get_list("vim_accounts")[0]
         vim_id = vim["_id"]
         ro_vim_id = vim["_admin"]["deployed"]["RO"]
         ns_params = {"vimAccountId": vim_id}
@@ -310,13 +281,13 @@ class TestMyNS(asynctest.TestCase):
         # scale-out/scale-in operations with success/error result
 
         # Test scale() with missing 'scaleVnfData', should return operationState = 'FAILED'
-        nsr_id = self.db_content["nsrs"][0]["_id"]
-        nslcmop_id = self.db_content["nslcmops"][0]["_id"]
+        nsr_id = self.db.get_list("nsrs")[0]["_id"]
+        nslcmop_id = self.db.get_list("nslcmops")[0]["_id"]
         await self.my_ns.scale(nsr_id, nslcmop_id)
         expected_value = 'FAILED'
-        return_value = self._db_get_one("nslcmops", {"_id": nslcmop_id}).get("operationState")
+        return_value = self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("operationState")
         self.assertEqual(return_value, expected_value)
-        # print("scale_result: {}".format(self._db_get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
+        # print("scale_result: {}".format(self.db.get_one("nslcmops", {"_id": nslcmop_id}).get("detailed-status")))
 
     # Test _reintent_or_skip_suboperation()
     # Expected result:
@@ -324,8 +295,7 @@ class TestMyNS(asynctest.TestCase):
     # - if marked as anything but 'COMPLETED', the suboperation index is expected
     def test_scale_reintent_or_skip_suboperation(self):
         # Load an alternative 'nslcmops' YAML for this test
-        self.db_content['nslcmops'] = yaml.load(descriptors.db_nslcmops_scale_text, Loader=yaml.Loader)
-        db_nslcmop = self.db_content['nslcmops'][0]
+        db_nslcmop = self.db.get_list('nslcmops')[0]
         op_index = 2
         # Test when 'operationState' is 'COMPLETED'
         db_nslcmop['_admin']['operations'][op_index]['operationState'] = 'COMPLETED'
@@ -342,8 +312,7 @@ class TestMyNS(asynctest.TestCase):
     # Expected result: index of the found sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if not found
     def test_scale_find_suboperation(self):
         # Load an alternative 'nslcmops' YAML for this test
-        self.db_content['nslcmops'] = yaml.load(descriptors.db_nslcmops_scale_text, Loader=yaml.Loader)
-        db_nslcmop = self.db_content['nslcmops'][0]
+        db_nslcmop = self.db.get_list('nslcmops')[0]
         # Find this sub-operation
         op_index = 2
         vnf_index = db_nslcmop['_admin']['operations'][op_index]['member_vnf_index']
@@ -371,25 +340,22 @@ class TestMyNS(asynctest.TestCase):
 
     # Test _update_suboperation_status()
     def test_scale_update_suboperation_status(self):
-        db_nslcmop = self.db_content['nslcmops'][0]
+        db_nslcmop = self.db.get_list('nslcmops')[0]
         op_index = 0
         # Force the initial values to be distinct from the updated ones
-        db_nslcmop['_admin']['operations'][op_index]['operationState'] = 'PROCESSING'
-        db_nslcmop['_admin']['operations'][op_index]['detailed-status'] = 'In progress'
+        q_filter = {"_id": db_nslcmop["_id"]}
         # Test to change 'operationState' and 'detailed-status'
         operationState = 'COMPLETED'
         detailed_status = 'Done'
-        self.my_ns._update_suboperation_status(
-            db_nslcmop, op_index, operationState, detailed_status)
-        operationState_new = db_nslcmop['_admin']['operations'][op_index]['operationState']
-        detailed_status_new = db_nslcmop['_admin']['operations'][op_index]['detailed-status']
-        # print("DEBUG: operationState_new={}, detailed_status_new={}".format(operationState_new, detailed_status_new))
-        self.assertEqual(operationState, operationState_new)
-        self.assertEqual(detailed_status, detailed_status_new)
-
-    # Test _add_suboperation()
+        expected_update_dict = {'_admin.operations.0.operationState': operationState,
+                                '_admin.operations.0.detailed-status': detailed_status,
+                                }
+        self.my_ns._update_suboperation_status(db_nslcmop, op_index, operationState, detailed_status)
+        self.db.set_one.assert_called_once_with("nslcmops", q_filter=q_filter, update_dict=expected_update_dict,
+                                                fail_on_empty=False)
+
     def test_scale_add_suboperation(self):
-        db_nslcmop = self.db_content['nslcmops'][0]
+        db_nslcmop = self.db.get_list('nslcmops')[0]
         vnf_index = '1'
         num_ops_before = len(db_nslcmop.get('_admin', {}).get('operations', [])) - 1
         vdu_id = None
@@ -440,7 +406,7 @@ class TestMyNS(asynctest.TestCase):
     # - op_index (non-negative number): This is an existing sub-operation, operationState != 'COMPLETED'
     # - SUBOPERATION_STATUS_SKIP: This is an existing sub-operation, operationState == 'COMPLETED'
     def test_scale_check_or_add_scale_suboperation(self):
-        db_nslcmop = self.db_content['nslcmops'][0]
+        db_nslcmop = self.db.get_list('nslcmops')[0]
         operationType = 'PRE-SCALE'
         vnf_index = '1'
         primitive = 'touch'
diff --git a/tox.ini b/tox.ini
index 5896bfc..de33e96 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 [tox]
-envlist = py3
+envlist = flake8, unittest
 toxworkdir={homedir}/.tox
 
 [testenv]