Adding unit tests for Bug 2086 and enabling cover in tox.ini. 25/12325/7 v10.1.2
authoraticig <gulsum.atici@canonical.com>
Mon, 11 Jul 2022 17:28:19 +0000 (20:28 +0300)
committeraticig <gulsum.atici@canonical.com>
Sun, 31 Jul 2022 21:48:34 +0000 (00:48 +0300)
Change-Id: I177da516e99c29433491358aaf129be059d18add
Signed-off-by: aticig <gulsum.atici@canonical.com>
NG-RO/osm_ng_ro/tests/test_ns_thread.py
releasenotes/notes/Add_unit_tests_for_bug2086-b5ab2a0d49494aef.yaml [new file with mode: 0644]
tox.ini

index 9f163a8..fa0352e 100644 (file)
@@ -19,7 +19,239 @@ import logging
 import unittest
 from unittest.mock import MagicMock, patch
 
-from osm_ng_ro.ns_thread import VimInteractionAffinityGroup
+from osm_common.dbmemory import DbMemory
+from osm_ng_ro.ns_thread import (
+    ConfigValidate,
+    NsWorker,
+    VimInteractionAffinityGroup,
+)
+
+
+class TestConfigValidate(unittest.TestCase):
+    def setUp(self):
+        self.config_dict = {
+            "period": {
+                "refresh_active": 65,
+                "refresh_build": 20,
+                "refresh_image": 3600,
+                "refresh_error": 300,
+                "queue_size": 50,
+            }
+        }
+
+    def test_get_configuration(self):
+        with self.subTest(i=1, t="Get config attributes with config input"):
+            configuration = ConfigValidate(self.config_dict)
+            self.assertEqual(configuration.active, 65)
+            self.assertEqual(configuration.build, 20)
+            self.assertEqual(configuration.image, 3600)
+            self.assertEqual(configuration.error, 300)
+            self.assertEqual(configuration.queue_size, 50)
+
+        with self.subTest(i=2, t="Unallowed refresh active input"):
+            # > 60  (except -1) is not allowed to set, so it should return default value 60
+            self.config_dict["period"]["refresh_active"] = 20
+            configuration = ConfigValidate(self.config_dict)
+            self.assertEqual(configuration.active, 60)
+
+        with self.subTest(i=3, t="Config to disable VM status periodic checks"):
+            # -1 is allowed to set to disable VM status updates
+            self.config_dict["period"]["refresh_active"] = -1
+            configuration = ConfigValidate(self.config_dict)
+            self.assertEqual(configuration.active, -1)
+
+
+class TestNsWorker(unittest.TestCase):
+    def setUp(self):
+        self.task_depends = None
+        self.plugins = {}
+        self.worker_index = "worker-3"
+        self.config = {
+            "period": {
+                "refresh_active": 60,
+                "refresh_build": 20,
+                "refresh_image": 3600,
+                "refresh_error": 600,
+                "queue_size": 100,
+            },
+            "process_id": "343435353",
+            "global": {"task_locked_time": 16373242100.994312},
+        }
+
+        self.ro_task = {
+            "_id": "122436:1",
+            "locked_by": None,
+            "locked_at": 0.0,
+            "target_id": "vim_openstack_1",
+            "vim_info": {
+                "created": False,
+                "created_items": None,
+                "vim_id": "test-vim-id",
+                "vim_name": "test-vim",
+                "vim_status": "DONE",
+                "vim_details": "",
+                "vim_message": None,
+                "refresh_at": None,
+            },
+            "modified_at": 1637324200.994312,
+            "created_at": 1637324200.994312,
+            "to_check_at": 16373242400.994312,
+            "tasks": [
+                {
+                    "target_id": 0,
+                    "action_id": "123456",
+                    "nsr_id": "654321",
+                    "task_id": "123456:1",
+                    "status": "DONE",
+                    "action": "CREATE",
+                    "item": "test_item",
+                    "target_record": "test_target_record",
+                    "target_record_id": "test_target_record_id",
+                },
+            ],
+        }
+
+    def get_disabled_tasks(self, db, status):
+        db_disabled_tasks = db.get_list(
+            "ro_tasks",
+            q_filter={
+                "tasks.status": status,
+                "to_check_at.lt": 0,
+            },
+        )
+        return db_disabled_tasks
+
+    def test_update_vm_refresh(self):
+        with self.subTest(
+            i=1,
+            t="1 disabled task with status BUILD in DB, refresh_active parameter is not equal to -1",
+        ):
+            # Disabled task with status build will not enabled again
+            db = DbMemory()
+            self.ro_task["tasks"][0]["status"] = "BUILD"
+            self.ro_task["to_check_at"] = -1
+            db.create("ro_tasks", self.ro_task)
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "BUILD"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance.update_vm_refresh()
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "BUILD")), disabled_tasks_count
+                )
+
+        with self.subTest(
+            i=2,
+            t="1 disabled task with status DONE in DB, refresh_active parameter is equal to -1",
+        ):
+            # As refresh_active parameter is equal to -1, task will not be enabled to process again
+            db = DbMemory()
+            self.config["period"]["refresh_active"] = -1
+            self.ro_task["tasks"][0]["status"] = "DONE"
+            self.ro_task["to_check_at"] = -1
+            db.create("ro_tasks", self.ro_task)
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance.update_vm_refresh()
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
+                )
+
+        with self.subTest(
+            i=3,
+            t="2 disabled task with status DONE in DB, refresh_active parameter is not equal to -1",
+        ):
+            # Disabled tasks should be enabled to process again
+            db = DbMemory()
+            self.config["period"]["refresh_active"] = 66
+            self.ro_task["tasks"][0]["status"] = "DONE"
+            self.ro_task["to_check_at"] = -1
+            db.create("ro_tasks", self.ro_task)
+            self.ro_task2 = self.ro_task
+            self.ro_task2["_id"] = "122437:1"
+            db.create("ro_tasks", self.ro_task2)
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance.update_vm_refresh()
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count - 2
+                )
+
+        with self.subTest(
+            i=4,
+            t="No disabled task with status DONE in DB, refresh_active parameter is not equal to -1",
+        ):
+            # If there is not any disabled task, method will not change anything
+            db = DbMemory()
+            self.config["period"]["refresh_active"] = 66
+            self.ro_task["tasks"][0]["status"] = "DONE"
+            self.ro_task["to_check_at"] = 16373242400.994312
+            db.create("ro_tasks", self.ro_task)
+            self.ro_task2 = self.ro_task
+            self.ro_task2["_id"] = "122437:1"
+            db.create("ro_tasks", self.ro_task2)
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance.update_vm_refresh()
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
+                )
+
+    def test_process_pending_tasks(self):
+        with self.subTest(
+            i=1,
+            t="refresh_active parameter is equal to -1, task status is DONE",
+        ):
+            # Task should be disabled to process again
+            db = DbMemory()
+            self.config["period"]["refresh_active"] = -1
+            self.ro_task["tasks"][0]["status"] = "DONE"
+            self.ro_task["to_check_at"] = 16373242400.994312
+            db.create("ro_tasks", self.ro_task)
+            # Number of disabled tasks in DB
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance._process_pending_tasks(self.ro_task)
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count + 1
+                )
+
+        with self.subTest(
+            i=2, t="refresh_active parameter is equal to -1, task status is FAILED"
+        ):
+            # Task will not be disabled to process as task status is not DONE
+            db = DbMemory()
+            self.config["period"]["refresh_active"] = -1
+            self.ro_task["tasks"][0]["status"] = "FAILED"
+            self.ro_task["to_check_at"] = 16373242400.994312
+            db.create("ro_tasks", self.ro_task)
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "FAILED"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance._process_pending_tasks(self.ro_task)
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "FAILED")), disabled_tasks_count
+                )
+
+        with self.subTest(
+            i=3, t="refresh_active parameter is not equal to -1, task status is DONE"
+        ):
+            # Task will not be disabled to process as refresh_active parameter is not -1
+            db = DbMemory()
+            self.config["period"]["refresh_active"] = 70
+            self.ro_task["tasks"][0]["status"] = "DONE"
+            self.ro_task["to_check_at"] = 16373242400.994312
+            db.create("ro_tasks", self.ro_task)
+            disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+            instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+            with patch.object(instance, "logger", logging):
+                instance._process_pending_tasks(self.ro_task)
+                self.assertEqual(
+                    len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
+                )
 
 
 class TestVimInteractionAffinityGroup(unittest.TestCase):
diff --git a/releasenotes/notes/Add_unit_tests_for_bug2086-b5ab2a0d49494aef.yaml b/releasenotes/notes/Add_unit_tests_for_bug2086-b5ab2a0d49494aef.yaml
new file mode 100644 (file)
index 0000000..89b48dd
--- /dev/null
@@ -0,0 +1,21 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+  - |
+    Adding unit tests for Bug 2086 and enabling cover in tox.ini.
+
diff --git a/tox.ini b/tox.ini
index e7c9c77..bb217f7 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -16,7 +16,7 @@
 #######################################################################################
 
 [tox]
-envlist = black, flake8, safety
+envlist = black, flake8, safety, cover
 
 [tox:jenkins]
 toxworkdir = /tmp/.tox
@@ -78,52 +78,52 @@ commands =
         nose2 -C --coverage NG-RO/osm_ng_ro -s NG-RO/osm_ng_ro
         sh -c 'mv .coverage .coverage_ng_ro'
         # RO-plugin
-        nose2 -C --coverage RO-plugin/osm_ro_plugin -s RO-plugin/osm_ro_plugin
-        sh -c 'mv .coverage .coverage_ro_plugin'
+        nose2 -C --coverage RO-plugin/osm_ro_plugin -s RO-plugin/osm_ro_plugin
+        sh -c 'mv .coverage .coverage_ro_plugin'
         # RO-SDN-arista_cloudvision
-        nose2 -C --coverage RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision -s RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision
-        sh -c 'mv .coverage .coverage_rosdn_arista_cloudvision'
+        nose2 -C --coverage RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision -s RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision
+        sh -c 'mv .coverage .coverage_rosdn_arista_cloudvision'
         # RO-SDN-dpb
-        nose2 -C --coverage RO-SDN-dpb/osm_rosdn_dpb -s RO-SDN-dpb/osm_rosdn_dpb
-        sh -c 'mv .coverage .coverage_rosdn_dpb'
+        nose2 -C --coverage RO-SDN-dpb/osm_rosdn_dpb -s RO-SDN-dpb/osm_rosdn_dpb
+        sh -c 'mv .coverage .coverage_rosdn_dpb'
         # RO-SDN-dynpac
-        nose2 -C --coverage RO-SDN-dynpac/osm_rosdn_dynpac -s RO-SDN-dynpac/osm_rosdn_dynpac
-        sh -c 'mv .coverage .coverage_rosdn_dynpac'
+        nose2 -C --coverage RO-SDN-dynpac/osm_rosdn_dynpac -s RO-SDN-dynpac/osm_rosdn_dynpac
+        sh -c 'mv .coverage .coverage_rosdn_dynpac'
         # RO-SDN-floodlight_openflow
-        nose2 -C --coverage RO-SDN-floodlight_openflow/osm_rosdn_floodlightof -s RO-SDN-floodlight_openflow/osm_rosdn_floodlightof
-        sh -c 'mv .coverage .coverage_rosdn_floodlightof'
+        nose2 -C --coverage RO-SDN-floodlight_openflow/osm_rosdn_floodlightof -s RO-SDN-floodlight_openflow/osm_rosdn_floodlightof
+        sh -c 'mv .coverage .coverage_rosdn_floodlightof'
         # RO-SDN-ietfl2vpn
-        nose2 -C --coverage RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn -s RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn
-        sh -c 'mv .coverage .coverage_rosdn_ietfl2vpn'
+        nose2 -C --coverage RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn -s RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn
+        sh -c 'mv .coverage .coverage_rosdn_ietfl2vpn'
         # RO-SDN-juniper_contrail
-        nose2 -C --coverage RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail -s RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail
-        sh -c 'mv .coverage .coverage_rosdn_juniper_contrail'
+        nose2 -C --coverage RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail -s RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail
+        sh -c 'mv .coverage .coverage_rosdn_juniper_contrail'
         # RO-SDN-odl_openflow
-        nose2 -C --coverage RO-SDN-odl_openflow/osm_rosdn_odlof -s RO-SDN-odl_openflow/osm_rosdn_odlof
-        sh -c 'mv .coverage .coverage_rosdn_odlof'
+        nose2 -C --coverage RO-SDN-odl_openflow/osm_rosdn_odlof -s RO-SDN-odl_openflow/osm_rosdn_odlof
+        sh -c 'mv .coverage .coverage_rosdn_odlof'
         # RO-SDN-onos_openflow
-        nose2 -C --coverage RO-SDN-onos_openflow/osm_rosdn_onosof -s RO-SDN-onos_openflow/osm_rosdn_onosof
-        sh -c 'mv .coverage .coverage_rosdn_onosof'
+        nose2 -C --coverage RO-SDN-onos_openflow/osm_rosdn_onosof -s RO-SDN-onos_openflow/osm_rosdn_onosof
+        sh -c 'mv .coverage .coverage_rosdn_onosof'
         # RO-SDN-onos_vpls
-        nose2 -C --coverage RO-SDN-onos_vpls/osm_rosdn_onos_vpls -s RO-SDN-onos_vpls/osm_rosdn_onos_vpls
-        sh -c 'mv .coverage .coverage_rosdn_onos_vpls'
+        nose2 -C --coverage RO-SDN-onos_vpls/osm_rosdn_onos_vpls -s RO-SDN-onos_vpls/osm_rosdn_onos_vpls
+        sh -c 'mv .coverage .coverage_rosdn_onos_vpls'
         # RO-VIM-aws
-        nose2 -C --coverage RO-VIM-aws/osm_rovim_aws -s RO-VIM-aws/osm_rovim_aws
-        sh -c 'mv .coverage .coverage_rovim_aws'
+        nose2 -C --coverage RO-VIM-aws/osm_rovim_aws -s RO-VIM-aws/osm_rovim_aws
+        sh -c 'mv .coverage .coverage_rovim_aws'
         # RO-VIM-azure
-        nose2 -C --coverage RO-VIM-azure/osm_rovim_azure -s RO-VIM-azure/osm_rovim_azure
-        sh -c 'mv .coverage .coverage_rovim_azure'
+        nose2 -C --coverage RO-VIM-azure/osm_rovim_azure -s RO-VIM-azure/osm_rovim_azure
+        sh -c 'mv .coverage .coverage_rovim_azure'
         # RO-VIM-openstack
         # nose2 -C --coverage RO-VIM-openstack/osm_rovim_openstack -s RO-VIM-openstack/osm_rovim_openstack
         # sh -c 'mv .coverage .coverage_rovim_openstack'
         # RO-VIM-openvim
-        nose2 -C --coverage RO-VIM-openvim/osm_rovim_openvim -s RO-VIM-openvim/osm_rovim_openvim
-        sh -c 'mv .coverage .coverage_rovim_openvim'
+        nose2 -C --coverage RO-VIM-openvim/osm_rovim_openvim -s RO-VIM-openvim/osm_rovim_openvim
+        sh -c 'mv .coverage .coverage_rovim_openvim'
         # RO-VIM-vmware
         # nose2 -C --coverage RO-VIM-vmware/osm_rovim_vmware -s RO-VIM-vmware/osm_rovim_vmware
         # sh -c 'mv .coverage .coverage_rovim_vmware'
         # Combine results and generate reports
-        coverage combine .coverage_ng_ro .coverage_ro_plugin .coverage_rosdn_arista_cloudvision .coverage_rosdn_dpb .coverage_rosdn_dynpac .coverage_rosdn_floodlightof .coverage_rosdn_ietfl2vpn .coverage_rosdn_juniper_contrail .coverage_rosdn_odlof .coverage_rosdn_onos_vpls .coverage_rosdn_onosof .coverage_rovim_aws .coverage_rovim_azure .coverage_rovim_openvim # .coverage_rovim_openstack .coverage_rovim_vmware
+        coverage combine .coverage_ng_ro
         coverage report --omit='*tests*'
         coverage html -d ./cover --omit='*tests*'
         coverage xml -o coverage.xml --omit='*tests*'
@@ -427,7 +427,8 @@ ignore =
         E125,
         E203,
         E226,
-        E241
+        E241,
+        E501,
 exclude =
         .git,
         __pycache__,