Merge remote-tracking branch 'origin/master' into paas

Change-Id: I2136af33df17a3ca1e7d8db426c1ec74feef07ff
Signed-off-by: Mark Beierl <mark.beierl@canonical.com>
diff --git a/NG-RO/osm_ng_ro/ns.py b/NG-RO/osm_ng_ro/ns.py
index 19ff791..4e88cc4 100644
--- a/NG-RO/osm_ng_ro/ns.py
+++ b/NG-RO/osm_ng_ro/ns.py
@@ -722,9 +722,12 @@
             guest_epa_quota.get("cpu-pinning-policy") == "DEDICATED"
             and not epa_vcpu_set
         ):
+            # Pinning policy "REQUIRE" uses threads as host should support SMT architecture
+            # Pinning policy "ISOLATE" uses cores as host should not support SMT architecture
+            # Pinning policy "PREFER" uses threads in case host supports SMT architecture
             numa[
                 "cores"
-                if guest_epa_quota.get("cpu-thread-pinning-policy") != "PREFER"
+                if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
                 else "threads"
             ] = max(vcpu_count, 1)
             local_epa_vcpu_set = True
@@ -1009,12 +1012,10 @@
                     == "persistent-storage:persistent-storage"
                 ):
                     for vdu_volume in vdu_instantiation_volumes_list:
-
                         if (
                             vdu_volume["vim-volume-id"]
                             and root_disk["id"] == vdu_volume["name"]
                         ):
-
                             persistent_root_disk[vsd["id"]] = {
                                 "vim_volume_id": vdu_volume["vim-volume-id"],
                                 "image_id": vdu.get("sw-image-desc"),
@@ -1025,7 +1026,6 @@
                             return persistent_root_disk
 
                     else:
-
                         if root_disk.get("size-of-storage"):
                             persistent_root_disk[vsd["id"]] = {
                                 "image_id": vdu.get("sw-image-desc"),
@@ -1062,9 +1062,7 @@
                 and disk["id"] not in persistent_root_disk.keys()
             ):
                 for vdu_volume in vdu_instantiation_volumes_list:
-
                     if vdu_volume["vim-volume-id"] and disk["id"] == vdu_volume["name"]:
-
                         persistent_disk[disk["id"]] = {
                             "vim_volume_id": vdu_volume["vim-volume-id"],
                         }
@@ -1323,7 +1321,6 @@
             net_list    (list):                             Net list of VDU
         """
         for iface_index, interface in enumerate(target_vdu["interfaces"]):
-
             net_text = Ns._check_vld_information_of_interfaces(
                 interface, ns_preffix, vnf_preffix
             )
@@ -1542,13 +1539,11 @@
             True if i.get("position") is not None else False
             for i in target_vdu["interfaces"]
         ):
-
             Ns._sort_vdu_interfaces(target_vdu)
 
         # If the position info is provided for some interfaces but not all of them, the interfaces
         # which has specific position numbers will be placed and others' positions will not be taken care.
         else:
-
             Ns._partially_locate_vdu_interfaces(target_vdu)
 
         # If the position info is not provided for the interfaces, interfaces will be attached
@@ -1575,7 +1570,6 @@
             )
 
         if vdu_instantiation_volumes_list:
-
             # Find the root volumes and add to the disk_list
             persistent_root_disk = Ns.find_persistent_root_volumes(
                 vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
@@ -1701,7 +1695,6 @@
             vim_details = yaml.safe_load(f"{vim_details_text}")
 
         for iface_index, interface in enumerate(existing_vdu["interfaces"]):
-
             if "port-security-enabled" in interface:
                 interface["port_security"] = interface.pop("port-security-enabled")
 
@@ -3030,7 +3023,6 @@
                             task_index += 1
                             break
                 else:
-
                     for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
                         extra_dict["params"] = {
                             "vim_vm_id": vdu["vim-id"],
diff --git a/NG-RO/osm_ng_ro/ns_thread.py b/NG-RO/osm_ng_ro/ns_thread.py
index 6e94956..23622f7 100644
--- a/NG-RO/osm_ng_ro/ns_thread.py
+++ b/NG-RO/osm_ng_ro/ns_thread.py
@@ -1040,7 +1040,6 @@
         return self.new(ro_task, task_create_index, None)
 
     def new(self, ro_task, task_index, task_depends):
-
         task = ro_task["tasks"][task_index]
         task_id = task["task_id"]
         target_vim = self.my_vims[ro_task["target_id"]]
@@ -1943,128 +1942,6 @@
 
         return None
 
-    def _get_db_all_tasks(self):
-        """
-        Read all content of table ro_tasks to log it
-        :return: None
-        """
-        try:
-            # Checking the content of the BD:
-
-            # read and return
-            ro_task = self.db.get_list("ro_tasks")
-            for rt in ro_task:
-                self._log_ro_task(rt, None, None, "TASK_WF", "GET_ALL_TASKS")
-            return ro_task
-
-        except DbException as e:
-            self.logger.error("Database exception at _get_db_all_tasks: {}".format(e))
-        except Exception as e:
-            self.logger.critical(
-                "Unexpected exception at _get_db_all_tasks: {}".format(e), exc_info=True
-            )
-
-        return None
-
-    def _log_ro_task(self, ro_task, db_ro_task_update, db_ro_task_delete, mark, event):
-        """
-        Generate a log with the following format:
-
-        Mark;Event;ro_task_id;locked_at;modified_at;created_at;to_check_at;locked_by;
-        target_id;vim_info.refresh_at;vim_info;no_of_tasks;task_status;action_id;
-        task_array_index;task_id;task_action;task_item;task_args
-
-        Example:
-
-        TASK_WF;GET_TASK;888f1864-749a-4fc2-bc1a-97c0fffd6a6f:2;1642158724.8210013;
-        1642158640.7986135;1642158640.7986135;1642158640.7986135;b134c9494e75:0a
-        ;vim:b7ff9e24-8868-4d68-8a57-a59dc11d0327;None;{'created': False,
-        'created_items': None, 'vim_id': None, 'vim_name': None, 'vim_status': None,
-        'vim_details': None, 'vim_message': None, 'refresh_at': None};1;SCHEDULED;
-        888f1864-749a-4fc2-bc1a-97c0fffd6a6f;0;888f1864-749a-4fc2-bc1a-97c0fffd6a6f:2;
-        CREATE;image;{'filter_dict': {'name': 'ubuntu-os-cloud:image-family:ubuntu-1804-lts'}}
-        """
-        try:
-            line = []
-            i = 0
-            if ro_task is not None and isinstance(ro_task, dict):
-                for t in ro_task["tasks"]:
-                    line.clear()
-                    line.append(mark)
-                    line.append(event)
-                    line.append(ro_task.get("_id", ""))
-                    line.append(str(ro_task.get("locked_at", "")))
-                    line.append(str(ro_task.get("modified_at", "")))
-                    line.append(str(ro_task.get("created_at", "")))
-                    line.append(str(ro_task.get("to_check_at", "")))
-                    line.append(str(ro_task.get("locked_by", "")))
-                    line.append(str(ro_task.get("target_id", "")))
-                    line.append(str(ro_task.get("vim_info", {}).get("refresh_at", "")))
-                    line.append(str(ro_task.get("vim_info", "")))
-                    line.append(str(ro_task.get("tasks", "")))
-                    if isinstance(t, dict):
-                        line.append(str(t.get("status", "")))
-                        line.append(str(t.get("action_id", "")))
-                        line.append(str(i))
-                        line.append(str(t.get("task_id", "")))
-                        line.append(str(t.get("action", "")))
-                        line.append(str(t.get("item", "")))
-                        line.append(str(t.get("find_params", "")))
-                        line.append(str(t.get("params", "")))
-                    else:
-                        line.extend([""] * 2)
-                        line.append(str(i))
-                        line.extend([""] * 5)
-
-                    i += 1
-                    self.logger.debug(";".join(line))
-            elif db_ro_task_update is not None and isinstance(db_ro_task_update, dict):
-                i = 0
-                while True:
-                    st = "tasks.{}.status".format(i)
-                    if st not in db_ro_task_update:
-                        break
-                    line.clear()
-                    line.append(mark)
-                    line.append(event)
-                    line.append(db_ro_task_update.get("_id", ""))
-                    line.append(str(db_ro_task_update.get("locked_at", "")))
-                    line.append(str(db_ro_task_update.get("modified_at", "")))
-                    line.append("")
-                    line.append(str(db_ro_task_update.get("to_check_at", "")))
-                    line.append(str(db_ro_task_update.get("locked_by", "")))
-                    line.append("")
-                    line.append(str(db_ro_task_update.get("vim_info.refresh_at", "")))
-                    line.append("")
-                    line.append(str(db_ro_task_update.get("vim_info", "")))
-                    line.append(str(str(db_ro_task_update).count(".status")))
-                    line.append(db_ro_task_update.get(st, ""))
-                    line.append("")
-                    line.append(str(i))
-                    line.extend([""] * 3)
-                    i += 1
-                    self.logger.debug(";".join(line))
-
-            elif db_ro_task_delete is not None and isinstance(db_ro_task_delete, dict):
-                line.clear()
-                line.append(mark)
-                line.append(event)
-                line.append(db_ro_task_delete.get("_id", ""))
-                line.append("")
-                line.append(db_ro_task_delete.get("modified_at", ""))
-                line.extend([""] * 13)
-                self.logger.debug(";".join(line))
-
-            else:
-                line.clear()
-                line.append(mark)
-                line.append(event)
-                line.extend([""] * 16)
-                self.logger.debug(";".join(line))
-
-        except Exception as e:
-            self.logger.error("Error logging ro_task: {}".format(e))
-
     def _delete_task(self, ro_task, task_index, task_depends, db_update):
         """
         Determine if this task need to be done or superseded
@@ -2415,7 +2292,10 @@
                             )
 
                         if task["action"] == "DELETE":
-                            (new_status, db_vim_info_update,) = self._delete_task(
+                            (
+                                new_status,
+                                db_vim_info_update,
+                            ) = self._delete_task(
                                 ro_task, task_index, task_depends, db_ro_task_update
                             )
                             new_status = (
@@ -2463,7 +2343,10 @@
                             else:
                                 refresh_at = ro_task["vim_info"]["refresh_at"]
                                 if refresh_at and refresh_at != -1 and now > refresh_at:
-                                    (new_status, db_vim_info_update,) = self.item2class[
+                                    (
+                                        new_status,
+                                        db_vim_info_update,
+                                    ) = self.item2class[
                                         task["item"]
                                     ].refresh(ro_task)
                                     _update_refresh(new_status)
diff --git a/NG-RO/osm_ng_ro/ro_main.py b/NG-RO/osm_ng_ro/ro_main.py
index 86e839c..47296dc 100644
--- a/NG-RO/osm_ng_ro/ro_main.py
+++ b/NG-RO/osm_ng_ro/ro_main.py
@@ -214,7 +214,6 @@
         }
 
     def _format_in(self, kwargs):
-
         error_text = ""
         try:
             indata = None
diff --git a/NG-RO/osm_ng_ro/tests/test_ns.py b/NG-RO/osm_ng_ro/tests/test_ns.py
index 97f072b..d2fdc4d 100644
--- a/NG-RO/osm_ng_ro/tests/test_ns.py
+++ b/NG-RO/osm_ng_ro/tests/test_ns.py
@@ -1494,7 +1494,7 @@
         self.assertDictEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
-    def test__process_guest_epa_cpu_pinning_params_with_threads(self):
+    def test__process_guest_epa_cpu_pinning_params_with_policy_prefer(self):
         expected_numa_result = {"threads": 3}
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
@@ -1513,11 +1513,49 @@
         self.assertDictEqual(expected_numa_result, numa_result)
         self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
 
-    def test__process_guest_epa_cpu_pinning_params(self):
+    def test__process_guest_epa_cpu_pinning_params_with_policy_isolate(self):
         expected_numa_result = {"cores": 3}
         expected_epa_vcpu_set_result = True
         guest_epa_quota = {
             "cpu-pinning-policy": "DEDICATED",
+            "cpu-thread-pinning-policy": "ISOLATE",
+        }
+        vcpu_count = 3
+        epa_vcpu_set = False
+
+        numa_result, epa_vcpu_set_result = Ns._process_guest_epa_cpu_pinning_params(
+            guest_epa_quota=guest_epa_quota,
+            vcpu_count=vcpu_count,
+            epa_vcpu_set=epa_vcpu_set,
+        )
+
+        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
+
+    def test__process_guest_epa_cpu_pinning_params_with_policy_require(self):
+        expected_numa_result = {"threads": 3}
+        expected_epa_vcpu_set_result = True
+        guest_epa_quota = {
+            "cpu-pinning-policy": "DEDICATED",
+            "cpu-thread-pinning-policy": "REQUIRE",
+        }
+        vcpu_count = 3
+        epa_vcpu_set = False
+
+        numa_result, epa_vcpu_set_result = Ns._process_guest_epa_cpu_pinning_params(
+            guest_epa_quota=guest_epa_quota,
+            vcpu_count=vcpu_count,
+            epa_vcpu_set=epa_vcpu_set,
+        )
+
+        self.assertDictEqual(expected_numa_result, numa_result)
+        self.assertEqual(expected_epa_vcpu_set_result, epa_vcpu_set_result)
+
+    def test__process_guest_epa_cpu_pinning_params(self):
+        expected_numa_result = {"threads": 3}
+        expected_epa_vcpu_set_result = True
+        guest_epa_quota = {
+            "cpu-pinning-policy": "DEDICATED",
         }
         vcpu_count = 3
         epa_vcpu_set = False
@@ -1777,7 +1815,6 @@
         self,
         epa_params,
     ):
-
         target_flavor = {}
         indata = {
             "vnf": [
@@ -1804,7 +1841,6 @@
         self,
         epa_params,
     ):
-
         target_flavor = {
             "no-target-flavor": "here",
         }
@@ -1827,7 +1863,6 @@
         self,
         epa_params,
     ):
-
         expected_result = {
             "find_params": {
                 "flavor_data": {
@@ -1872,7 +1907,6 @@
         self,
         epa_params,
     ):
-
         expected_result = {
             "find_params": {
                 "flavor_data": {
@@ -2029,7 +2063,6 @@
         self,
         epa_params,
     ):
-
         expected_result = {
             "find_params": {
                 "flavor_data": {
@@ -2094,7 +2127,6 @@
         self,
         epa_params,
     ):
-
         kwargs = {
             "db": db,
         }
@@ -2205,7 +2237,6 @@
         self,
         epa_params,
     ):
-
         expected_result = {
             "find_params": {
                 "flavor_data": {
@@ -2270,7 +2301,6 @@
         self,
         epa_params,
     ):
-
         kwargs = {
             "db": db,
         }
@@ -3612,7 +3642,8 @@
         self, mock_volume_keeping_required
     ):
         """Find persistent ordinary volume, volume id is not persistent_root_disk dict,
-        vim-volume-id is given as instantiation parameter but disk id is not matching."""
+        vim-volume-id is given as instantiation parameter but disk id is not matching.
+        """
         mock_volume_keeping_required.return_value = True
         vdu_instantiation_volumes_list = [
             {
diff --git a/NG-RO/osm_ng_ro/vim_admin.py b/NG-RO/osm_ng_ro/vim_admin.py
index 3350d43..b6e34e1 100644
--- a/NG-RO/osm_ng_ro/vim_admin.py
+++ b/NG-RO/osm_ng_ro/vim_admin.py
@@ -39,7 +39,6 @@
 
 
 class LockRenew:
-
     renew_list = []
     # ^ static method, common for all RO. Time ordered list of dictionaries with information of locks that needs to
     # be renewed. The time order is achieved as it is appended at the end
diff --git a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py
index 6af7c43..12f3920 100644
--- a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py
+++ b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py
@@ -72,34 +72,6 @@
 

         return new_data

 

-    def get_pending_tasks(self):

-        return self.cvpClientApi.get_tasks_by_status("Pending")

-

-    def get_pending_tasks_old(self):

-        taskList = []

-        tasksField = {

-            "workOrderId": "workOrderId",

-            "workOrderState": "workOrderState",

-            "currentTaskName": "currentTaskName",

-            "description": "description",

-            "workOrderUserDefinedStatus": "workOrderUserDefinedStatus",

-            "note": "note",

-            "taskStatus": "taskStatus",

-            "workOrderDetails": "workOrderDetails",

-        }

-        tasks = self.cvpClientApi.get_tasks_by_status("Pending")

-

-        # Reduce task data to required fields

-        for task in tasks:

-            taskFacts = {}

-            for field in task.keys():

-                if field in tasksField:

-                    taskFacts[tasksField[field]] = task[field]

-

-            taskList.append(taskFacts)

-

-        return taskList

-

     def task_action(self, tasks, wait, state):

         changed = False

         data = dict()

diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py
index fff489d..c607fea 100644
--- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py
+++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py
@@ -16,7 +16,6 @@
 
 import copy
 import json
-from time import time
 
 import requests
 from requests.exceptions import ConnectionError
@@ -64,30 +63,6 @@
 
         return resp.json()
 
-    def post_headers_cmd(self, url, headers, post_fields_dict=None):
-        self._logger.debug("")
-
-        # obfuscate password before logging dict
-        if (
-            post_fields_dict.get("auth", {})
-            .get("identity", {})
-            .get("password", {})
-            .get("user", {})
-            .get("password")
-        ):
-            post_fields_dict_copy = copy.deepcopy(post_fields_dict)
-            post_fields_dict["auth"]["identity"]["password"]["user"][
-                "password"
-            ] = "******"
-            json_data_log = post_fields_dict_copy
-        else:
-            json_data_log = post_fields_dict
-
-        self._logger.debug("Request POSTFIELDS: {}".format(json.dumps(json_data_log)))
-        resp = self._request("POST_HEADERS", url, headers, data=post_fields_dict)
-
-        return resp.text
-
     def post_cmd(self, url, headers, post_fields_dict=None):
         self._logger.debug("")
 
@@ -118,34 +93,6 @@
 
         return resp.text
 
-    def _get_token(self, headers):
-        if self.auth_url:
-            self._logger.debug("Current Token: {}".format(self.token))
-            auth_url = self.auth_url + "auth/tokens"
-
-            if self.token is None or self._token_expired():
-                if not self.auth_url:
-                    self.token = ""
-
-                resp = self._request_noauth(
-                    url=auth_url, op="POST", headers=headers, data=self.auth_dict
-                )
-                self.token = resp.headers.get("x-subject-token")
-                self.last_token_time = time.time()
-                self._logger.debug("Obtained token: {}".format(self.token))
-
-                return self.token
-
-    def _token_expired(self):
-        current_time = time.time()
-
-        if self.last_token_time and (
-            current_time - self.last_token_time < self.token_timeout
-        ):
-            return False
-        else:
-            return True
-
     def _request(self, op, url, http_headers, data=None, retry_auth_error=True):
         headers = http_headers.copy()
 
diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py
index 0b4f9a8..7e344f4 100644
--- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py
+++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py
@@ -233,9 +233,6 @@
             "virtual-port-group"
         )
 
-    def get_vpgs(self):
-        return self.get_all_by_type(self.controller_url, "virtual-port-groups")
-
     def get_vpg_by_name(self, vpg_name):
         fq_name = ["default-global-system-config", self.fabric, vpg_name]
 
diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py
index 691397d..0894713 100644
--- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py
+++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py
@@ -213,9 +213,6 @@
     def get_url(self):
         return self.url
 
-    def get_overlay_url(self):
-        return self.overlay_url
-
     def _create_port(self, switch_id, switch_port, network, vlan):
         """
         1 - Look for virtual port groups for provided switch_id, switch_port using name
diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/tests/test_sdn_asssist_juniper_contrail.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/tests/test_sdn_asssist_juniper_contrail.py
index 8ef04e1..144603a 100644
--- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/tests/test_sdn_asssist_juniper_contrail.py
+++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/tests/test_sdn_asssist_juniper_contrail.py
@@ -35,7 +35,6 @@
 
     @patch("osm_rosdn_juniper_contrail.sdn_api.UnderlayApi")
     def test_juniper_contrail_sdn_with_ssl_cert(self, mock_underlay_api):
-
         config = {
             "ca_cert": "/path/to/certfile",
             "project": "test_project",
diff --git a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py
index 63184a1..393acba 100644
--- a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py
+++ b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py
@@ -204,19 +204,6 @@
 
         raise vimconn.VimConnConnectionException(type(e).__name__ + ": " + str(e))
 
-    def get_availability_zones_list(self):
-        """Obtain AvailabilityZones from AWS"""
-        try:
-            self._reload_connection()
-            az_list = []
-
-            for az in self.conn.get_all_zones():
-                az_list.append(az.name)
-
-            return az_list
-        except Exception as e:
-            self.format_vimconn_exception(e)
-
     def get_tenant_list(self, filter_dict={}):
         """Obtain tenants of VIM
         filter_dict dictionary that can contain the following keys:
@@ -819,7 +806,6 @@
 
             else:
                 for index, subnet in enumerate(net_list):
-
                     net_intr = self.conn_vpc.create_network_interface(
                         subnet_id=subnet.get("net_id"),
                         groups=None,
diff --git a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py
index 0b31d02..bf4a306 100755
--- a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py
+++ b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py
@@ -21,7 +21,6 @@
 from azure.core.exceptions import ResourceNotFoundError
 from azure.identity import ClientSecretCredential
 from azure.mgmt.compute import ComputeManagementClient
-from azure.mgmt.compute.models import DiskCreateOption
 from azure.mgmt.network import NetworkManagementClient
 from azure.mgmt.resource import ResourceManagementClient
 from azure.profiles import ProfileDefinition
@@ -57,7 +56,6 @@
 
 
 class vimconnector(vimconn.VimConnector):
-
     # Translate azure provisioning state to OSM provision state
     # The first three ones are the transitional status once a user initiated action has been requested
     # Once the operation is complete, it will transition into the states Succeeded or Failed
@@ -952,32 +950,8 @@
             virtual_machine = creation_result.result()
             self.logger.debug("created vm name: %s", vm_name)
 
-            """ Por ahora no hacer polling para ver si tarda menos
-            # Add disks if they are provided
-            if disk_list:
-                for disk_index, disk in enumerate(disk_list):
-                    self.logger.debug(
-                        "add disk size: %s, image: %s",
-                        disk.get("size"),
-                        disk.get("image"),
-                    )
-                    self._add_newvm_disk(
-                        virtual_machine, vm_name, disk_index, disk, created_items
-                    )
-
-            if start:
-                self.conn_compute.virtual_machines.start(self.resource_group, vm_name)
-            # start_result.wait()
-            """
-
             return virtual_machine.id, created_items
 
-            # run_command_parameters = {
-            #     "command_id": "RunShellScript", # For linux, don't change it
-            #     "script": [
-            #     "date > /tmp/test.txt"
-            #     ]
-            # }
         except Exception as e:
             # Rollback vm creacion
             vm_id = None
@@ -995,7 +969,6 @@
             self._format_vimconn_exception(e)
 
     def _build_os_profile(self, vm_name, cloud_config, image_id):
-
         # initial os_profile
         os_profile = {"computer_name": vm_name}
 
@@ -1112,92 +1085,6 @@
     def _get_azure_availability_zones(self):
         return self.AZURE_ZONES
 
-    def _add_newvm_disk(
-        self, virtual_machine, vm_name, disk_index, disk, created_items={}
-    ):
-        disk_name = None
-        data_disk = None
-
-        # Check if must create empty disk or from image
-        if disk.get("vim_id"):
-            # disk already exists, just get
-            parsed_id = azure_tools.parse_resource_id(disk.get("vim_id"))
-            disk_name = parsed_id.get("name")
-            data_disk = self.conn_compute.disks.get(self.resource_group, disk_name)
-        else:
-            disk_name = vm_name + "_DataDisk_" + str(disk_index)
-            if not disk.get("image_id"):
-                self.logger.debug("create new data disk name: %s", disk_name)
-                async_disk_creation = self.conn_compute.disks.begin_create_or_update(
-                    self.resource_group,
-                    disk_name,
-                    {
-                        "location": self.region,
-                        "disk_size_gb": disk.get("size"),
-                        "creation_data": {"create_option": DiskCreateOption.empty},
-                    },
-                )
-                data_disk = async_disk_creation.result()
-                created_items[data_disk.id] = True
-            else:
-                image_id = disk.get("image_id")
-
-                if azure_tools.is_valid_resource_id(image_id):
-                    parsed_id = azure_tools.parse_resource_id(image_id)
-
-                    # Check if image is snapshot or disk
-                    image_name = parsed_id.get("name")
-                    type = parsed_id.get("resource_type")
-
-                    if type == "snapshots" or type == "disks":
-                        self.logger.debug("create disk from copy name: %s", image_name)
-                        # ¿Should check that snapshot exists?
-                        async_disk_creation = (
-                            self.conn_compute.disks.begin_create_or_update(
-                                self.resource_group,
-                                disk_name,
-                                {
-                                    "location": self.region,
-                                    "creation_data": {
-                                        "create_option": "Copy",
-                                        "source_uri": image_id,
-                                    },
-                                },
-                            )
-                        )
-                        data_disk = async_disk_creation.result()
-                        created_items[data_disk.id] = True
-                    else:
-                        raise vimconn.VimConnNotFoundException(
-                            "Invalid image_id: %s ", image_id
-                        )
-                else:
-                    raise vimconn.VimConnNotFoundException(
-                        "Invalid image_id: %s ", image_id
-                    )
-
-        # Attach the disk created
-        virtual_machine.storage_profile.data_disks.append(
-            {
-                "lun": disk_index,
-                "name": disk_name,
-                "create_option": DiskCreateOption.attach,
-                "managed_disk": {"id": data_disk.id},
-                "disk_size_gb": disk.get("size"),
-            }
-        )
-        self.logger.debug("attach disk name: %s", disk_name)
-        self.conn_compute.virtual_machines.begin_create_or_update(
-            self.resource_group, virtual_machine.name, virtual_machine
-        )
-
-    # It is necesary extract from image_id data to create the VM with this format
-    #        "image_reference": {
-    #           "publisher": vm_reference["publisher"],
-    #           "offer": vm_reference["offer"],
-    #           "sku": vm_reference["sku"],
-    #           "version": vm_reference["version"]
-    #        },
     def _get_image_reference(self, image_id):
         try:
             # The data input format example:
@@ -1426,7 +1313,6 @@
             self._format_vimconn_exception(e)
 
     def delete_inuse_nic(self, nic_name):
-
         # Obtain nic data
         nic_data = self.conn_vnet.network_interfaces.get(self.resource_group, nic_name)
 
@@ -1449,7 +1335,6 @@
 
             # TODO - check if there is a public ip to delete and delete it
             if network_interfaces:
-
                 # Deallocate the vm
                 async_vm_deallocate = (
                     self.conn_compute.virtual_machines.begin_deallocate(
@@ -2074,128 +1959,3 @@
         log_level=None,
         config=config,
     )
-
-    """
-    logger.debug("List images")
-    image = azure.get_image_list({"name": "Canonical:UbuntuServer:18.04-LTS:18.04.201809110"})
-    logger.debug("image: {}".format(image))
-
-    logger.debug("List networks")
-    network_list = azure.get_network_list({"name": "internal"})
-    logger.debug("Network_list: {}".format(network_list))
-
-    logger.debug("List flavors")
-    flavors = azure.get_flavor_id_from_data({"vcpus": 2})
-    logger.debug("flavors: {}".format(flavors))
-    """
-
-    """
-    # Create network and test machine
-    #new_network_id, _ = azure.new_network("testnet1", "data")
-    new_network_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers")
-                      "/Microsoft.Network/virtualNetworks/osm_vnet/subnets/testnet1"
-                      ).format(test_params["resource_group"])
-    logger.debug("new_network_id: {}".format(new_network_id))
-
-    logger.debug("Delete network")
-    new_network_id = azure.delete_network(new_network_id)
-    logger.debug("deleted network_id: {}".format(new_network_id))
-    """
-
-    """
-    logger.debug("List networks")
-    network_list = azure.get_network_list({"name": "internal"})
-    logger.debug("Network_list: {}".format(network_list))
-
-    logger.debug("Show machine isabelvm")
-    vmachine = azure.get_vminstance( ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}"
-                                      "/providers/Microsoft.Compute/virtualMachines/isabelVM"
-                                      ).format(test_params["resource_group"])
-                                    )
-    logger.debug("Vmachine: {}".format(vmachine))
-    """
-
-    """
-    logger.debug("List images")
-    image = azure.get_image_list({"name": "Canonical:UbuntuServer:16.04"})
-    # image = azure.get_image_list({"name": "Canonical:UbuntuServer:18.04-LTS"})
-    logger.debug("image: {}".format(image))
-    """
-
-    """
-    # Create network and test machine
-    new_network_id, _ = azure.new_network("testnet1", "data")
-    image_id = ("/Subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/Providers/Microsoft.Compute"
-                "/Locations/northeurope/Publishers/Canonical/ArtifactTypes/VMImage/Offers/UbuntuServer"
-                "/Skus/18.04-LTS/Versions/18.04.201809110")
-    """
-    """
-
-    network_id = ("subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}
-                  "/providers/Microsoft.Network/virtualNetworks/osm_vnet/subnets/internal"
-                 ).format(test_params["resource_group"])
-    """
-
-    """
-    logger.debug("Create machine")
-    image_id = ("/Subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/Providers/Microsoft.Compute/Locations"
-                "/northeurope/Publishers/Canonical/ArtifactTypes/VMImage/Offers/UbuntuServer/Skus/18.04-LTS"
-                "/Versions/18.04.202103151")
-    cloud_config = {"user-data": (
-                    "#cloud-config\n"
-                    "password: osm4u\n"
-                    "chpasswd: { expire: False }\n"
-                    "ssh_pwauth: True\n\n"
-                    "write_files:\n"
-                    "-   content: |\n"
-                    "        # My new helloworld file\n\n"
-                    "    owner: root:root\n"
-                    "    permissions: '0644'\n"
-                    "    path: /root/helloworld.txt",
-                    "key-pairs": [
-                        ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/p7fuw/W0+6uhx9XNPY4dN/K2cXZweDfjJN8W/sQ1AhKvn"
-                         "j0MF+dbBdsd2tfq6XUhx5LiKoGTunRpRonOw249ivH7pSyNN7FYpdLaij7Krn3K+QRNEOahMI4eoqdglVftA3"
-                         "vlw4Oe/aZOU9BXPdRLxfr9hRKzg5zkK91/LBkEViAijpCwK6ODPZLDDUwY4iihYK9R5eZ3fmM4+3k3Jd0hPRk"
-                         "B5YbtDQOu8ASWRZ9iTAWqr1OwQmvNc6ohSVg1tbq3wSxj/5bbz0J24A7TTpY0giWctne8Qkl/F2e0ZSErvbBB"
-                         "GXKxfnq7sc23OK1hPxMAuS+ufzyXsnL1+fB4t2iF azureuser@osm-test-client\n"
-                        )]
-    }
-    network_id = ("subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers"
-                  "/Microsoft.Network/virtualNetworks/osm_vnet/subnets/internal"
-                 ).format(test_params["resource_group"])
-    vm = azure.new_vminstance(name="isabelvm",
-                            description="testvm",
-                            start=True,
-                            image_id=image_id,
-                            flavor_id="Standard_B1ls",
-                            net_list = [{"net_id": network_id, "name": "internal", "use": "mgmt", "floating_ip":True}],
-                            cloud_config = cloud_config)
-    logger.debug("vm: {}".format(vm))
-    """
-
-    """
-    # Delete nonexistent vm
-    try:
-        logger.debug("Delete machine")
-        vm_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Compute/"
-                "virtualMachines/isabelvm"
-                ).format(test_params["resource_group"])
-        created_items = {
-            ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
-             "/networkInterfaces/isabelvm-nic-0"
-            ).format(test_params["resource_group"]): True,
-            ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
-             "/publicIPAddresses/isabelvm-nic-0-public-ip"
-            ).format(test_params["resource_group"]): True
-        }
-        azure.delete_vminstance(vm_id, created_items)
-    except vimconn.VimConnNotFoundException as e:
-        print("Ok: excepcion no encontrada")
-    """
-
-    """
-    network_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
-                  "/virtualNetworks/osm_vnet/subnets/hfcloudinit-internal-1"
-                 ).format(test_params["resource_group"])
-    azure.delete_network(network_id)
-    """
diff --git a/RO-VIM-gcp/osm_rovim_gcp/vimconn_gcp.py b/RO-VIM-gcp/osm_rovim_gcp/vimconn_gcp.py
index e8ecbb3..7e7f606 100644
--- a/RO-VIM-gcp/osm_rovim_gcp/vimconn_gcp.py
+++ b/RO-VIM-gcp/osm_rovim_gcp/vimconn_gcp.py
@@ -19,9 +19,6 @@
 from random import choice as random_choice
 import time
 
-from cryptography.hazmat.backends import default_backend as crypto_default_backend
-from cryptography.hazmat.primitives import serialization as crypto_serialization
-from cryptography.hazmat.primitives.asymmetric import rsa
 from google.oauth2 import service_account
 import googleapiclient.discovery
 from osm_ro_plugin import vimconn
@@ -40,7 +37,6 @@
 
 
 class vimconnector(vimconn.VimConnector):
-
     # Translate Google Cloud provisioning state to OSM provision state
     # The first three ones are the transitional status once a user initiated action has been requested
     # Once the operation is complete, it will transition into the states Succeeded or Failed
@@ -306,7 +302,6 @@
         self.logger.debug("create network name %s, ip_profile %s", net_name, ip_profile)
 
         try:
-
             self.logger.debug("creating network_name: {}".format(net_name))
 
             network = "projects/{}/global/networks/default".format(self.project)
@@ -375,7 +370,6 @@
         )
 
         try:
-
             self.logger.debug("creating subnet_name: {}".format(subnet_name))
 
             subnetwork_body = {
@@ -424,7 +418,6 @@
         )
 
         try:
-
             if self.reload_client:
                 self._reload_connection()
 
@@ -504,7 +497,6 @@
         self.logger.debug("Deleting network: {}".format(str(net_id)))
 
         try:
-
             net_name = self._get_resource_name_from_resource_id(net_id)
 
             # Check associated VMs
@@ -814,9 +806,6 @@
         except Exception as e:
             self._format_vimconn_exception(e)
 
-    def delete_inuse_nic(self, nic_name):
-        raise vimconn.VimConnNotImplemented("Not necessary")
-
     def delete_image(self, image_id):
         raise vimconn.VimConnNotImplemented("Not implemented")
 
@@ -1019,85 +1008,10 @@
 
         # either password of ssh-keys are required
         # we will always use ssh-keys, in case it is not available we will generate it
-        """
-        if cloud_config and cloud_config.get("key-pairs"):
-            key_data = ""
-            key_pairs = {}
-            if cloud_config.get("key-pairs"):
-                if isinstance(cloud_config["key-pairs"], list):
-                    # Transform the format "<key> <user@host>" into "<user>:<key>"
-                    key_data = ""
-                    for key in cloud_config.get("key-pairs"):
-                        key_data = key_data + key + "\n"
-                    key_pairs = {
-                        "key": "ssh-keys",
-                        "value": key_data
-                    }
-        else:
-            # If there is no ssh key in cloud config, a new key is generated:
-            _, key_data = self._generate_keys()
-            key_pairs = {
-                "key": "ssh-keys",
-                "value": "" + key_data
-            }
-            self.logger.debug("generated keys: %s", key_data)
-
-        metadata["items"].append(key_pairs)
-        """
         self.logger.debug("metadata: %s", metadata)
 
         return metadata
 
-    def _generate_keys(self):
-        """Method used to generate a pair of private/public keys.
-        This method is used because to create a vm in Azure we always need a key or a password
-        In some cases we may have a password in a cloud-init file but it may not be available
-        """
-        key = rsa.generate_private_key(
-            backend=crypto_default_backend(), public_exponent=65537, key_size=2048
-        )
-        private_key = key.private_bytes(
-            crypto_serialization.Encoding.PEM,
-            crypto_serialization.PrivateFormat.PKCS8,
-            crypto_serialization.NoEncryption(),
-        )
-        public_key = key.public_key().public_bytes(
-            crypto_serialization.Encoding.OpenSSH,
-            crypto_serialization.PublicFormat.OpenSSH,
-        )
-        private_key = private_key.decode("utf8")
-        # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
-        i = private_key.find("\n")
-        private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
-        public_key = public_key.decode("utf8")
-
-        return private_key, public_key
-
-    def _get_unused_vm_name(self, vm_name):
-        """
-        Checks the vm name and in case it is used adds a suffix to the name to allow creation
-        :return:
-        """
-        all_vms = (
-            self.conn_compute.instances()
-            .list(project=self.project, zone=self.zone)
-            .execute()
-        )
-        # Filter to vms starting with the indicated name
-        vms = list(filter(lambda vm: (vm.name.startswith(vm_name)), all_vms))
-        vm_names = [str(vm.name) for vm in vms]
-
-        # get the name with the first not used suffix
-        name_suffix = 0
-        # name = subnet_name + "-" + str(name_suffix)
-        name = vm_name  # first subnet created will have no prefix
-
-        while name in vm_names:
-            name_suffix += 1
-            name = vm_name + "-" + str(name_suffix)
-
-        return name
-
     def get_vminstance(self, vm_id):
         """
         Obtaing the vm instance data from v_id
@@ -1158,18 +1072,6 @@
             else:
                 self._format_vimconn_exception(e)
 
-    def _get_net_name_from_resource_id(self, resource_id):
-        try:
-            net_name = str(resource_id.split("/")[-1])
-
-            return net_name
-        except Exception:
-            raise vimconn.VimConnException(
-                "Unable to get google cloud net_name from invalid resource_id format '{}'".format(
-                    resource_id
-                )
-            )
-
     def _get_resource_name_from_resource_id(self, resource_id):
         """
         Obtains resource_name from the google cloud complete identifier: resource_name will always be last item
@@ -1347,12 +1249,6 @@
             )
             self._format_vimconn_exception(e)
 
-    def _get_default_admin_user(self, image_id):
-        if "ubuntu" in image_id.lower():
-            return "ubuntu"
-        else:
-            return self._default_admin_user
-
     def _create_firewall_rules(self, network):
         """
         Creates the necessary firewall rules to allow the traffic in the network
diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
index acf6be4..1de45fd 100644
--- a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
+++ b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
@@ -23,17 +23,13 @@
 This module contains unit tests for the OpenStack VIM connector
 Run this directly with python2 or python3.
 """
-import copy
 from copy import deepcopy
 import logging
 import unittest
 
-import mock
 from mock import MagicMock, patch
-from neutronclient.v2_0.client import Client
 from novaclient import exceptions as nvExceptions
 from novaclient.exceptions import ClientException, Conflict
-from osm_ro_plugin import vimconn
 from osm_ro_plugin.vimconn import (
     VimConnConnectionException,
     VimConnException,
@@ -81,7 +77,6 @@
 
 
 # Variables used in TestNewFlavor Class
-flavor_id = "075d2482-5edb-43e3-91b3-234e65b6268a"
 name1 = "sample-flavor"
 extended = (
     {
@@ -108,1026 +103,6 @@
 }
 
 
-class TestSfcOperations(unittest.TestCase):
-    @mock.patch("logging.getLogger", autospec=True)
-    def setUp(self, mock_logger):
-        # Instantiate dummy VIM connector so we can test it
-        # It throws exception because of dummy parameters,
-        # We are disabling the logging of exception not to print them to console.
-        mock_logger = logging.getLogger()
-        mock_logger.disabled = True
-        self.vimconn = vimconnector(
-            "123",
-            "openstackvim",
-            "456",
-            "789",
-            "http://dummy.url",
-            None,
-            "user",
-            "pass",
-        )
-
-    def _test_new_sfi(
-        self,
-        create_sfc_port_pair,
-        sfc_encap,
-        ingress_ports=["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
-        egress_ports=["230cdf1b-de37-4891-bc07-f9010cf1f967"],
-    ):
-        # input to VIM connector
-        name = "osm_sfi"
-        # + ingress_ports
-        # + egress_ports
-        # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
-        correlation = "nsh"
-        if sfc_encap is not None:
-            if not sfc_encap:
-                correlation = None
-
-        # what OpenStack is assumed to respond (patch OpenStack"s return value)
-        dict_from_neutron = {
-            "port_pair": {
-                "id": "3d7ddc13-923c-4332-971e-708ed82902ce",
-                "name": name,
-                "description": "",
-                "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
-                "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
-                "ingress": ingress_ports[0] if len(ingress_ports) else None,
-                "egress": egress_ports[0] if len(egress_ports) else None,
-                "service_function_parameters": {"correlation": correlation},
-            }
-        }
-        create_sfc_port_pair.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {
-            "port_pair": {
-                "name": name,
-                "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                "egress": "230cdf1b-de37-4891-bc07-f9010cf1f967",
-                "service_function_parameters": {"correlation": correlation},
-            }
-        }
-
-        # call the VIM connector
-        if sfc_encap is None:
-            result = self.vimconn.new_sfi(name, ingress_ports, egress_ports)
-        else:
-            result = self.vimconn.new_sfi(name, ingress_ports, egress_ports, sfc_encap)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_port_pair.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron["port_pair"]["id"])
-
-    def _test_new_sf(self, create_sfc_port_pair_group):
-        # input to VIM connector
-        name = "osm_sf"
-        instances = [
-            "bbd01220-cf72-41f2-9e70-0669c2e5c4cd",
-            "12ba215e-3987-4892-bd3a-d0fd91eecf98",
-            "e25a7c79-14c8-469a-9ae1-f601c9371ffd",
-        ]
-
-        # what OpenStack is assumed to respond (patch OpenStack"s return value)
-        dict_from_neutron = {
-            "port_pair_group": {
-                "id": "3d7ddc13-923c-4332-971e-708ed82902ce",
-                "name": name,
-                "description": "",
-                "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
-                "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
-                "port_pairs": instances,
-                "group_id": 1,
-                "port_pair_group_parameters": {
-                    "lb_fields": [],
-                    "ppg_n_tuple_mapping": {
-                        "ingress_n_tuple": {},
-                        "egress_n_tuple": {},
-                    },
-                },
-            }
-        }
-        create_sfc_port_pair_group.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {
-            "port_pair_group": {
-                "name": name,
-                "port_pairs": [
-                    "bbd01220-cf72-41f2-9e70-0669c2e5c4cd",
-                    "12ba215e-3987-4892-bd3a-d0fd91eecf98",
-                    "e25a7c79-14c8-469a-9ae1-f601c9371ffd",
-                ],
-            }
-        }
-
-        # call the VIM connector
-        result = self.vimconn.new_sf(name, instances)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_port_pair_group.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron["port_pair_group"]["id"])
-
-    def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi):
-        # input to VIM connector
-        name = "osm_sfp"
-        classifications = [
-            "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19",
-            "00f23389-bdfa-43c2-8b16-5815f2582fa8",
-        ]
-        sfs = [
-            "2314daec-c262-414a-86e3-69bb6fa5bc16",
-            "d8bfdb5d-195e-4f34-81aa-6135705317df",
-        ]
-
-        # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
-        correlation = "nsh"
-        chain_id = 33
-        if spi:
-            chain_id = spi
-
-        # what OpenStack is assumed to respond (patch OpenStack"s return value)
-        dict_from_neutron = {
-            "port_chain": {
-                "id": "5bc05721-079b-4b6e-a235-47cac331cbb6",
-                "name": name,
-                "description": "",
-                "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
-                "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
-                "chain_id": chain_id,
-                "flow_classifiers": classifications,
-                "port_pair_groups": sfs,
-                "chain_parameters": {"correlation": correlation},
-            }
-        }
-        create_sfc_port_chain.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {
-            "port_chain": {
-                "name": name,
-                "flow_classifiers": [
-                    "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19",
-                    "00f23389-bdfa-43c2-8b16-5815f2582fa8",
-                ],
-                "port_pair_groups": [
-                    "2314daec-c262-414a-86e3-69bb6fa5bc16",
-                    "d8bfdb5d-195e-4f34-81aa-6135705317df",
-                ],
-                "chain_parameters": {"correlation": correlation},
-            }
-        }
-        if spi:
-            dict_to_neutron["port_chain"]["chain_id"] = spi
-
-        # call the VIM connector
-        if sfc_encap is None:
-            dict_to_neutron["port_chain"]["chain_parameters"] = {"correlation": "mpls"}
-            if spi is None:
-                result = self.vimconn.new_sfp(
-                    name, classifications, sfs, sfc_encap=False
-                )
-            else:
-                result = self.vimconn.new_sfp(
-                    name, classifications, sfs, sfc_encap=False, spi=spi
-                )
-        else:
-            if spi is None:
-                result = self.vimconn.new_sfp(name, classifications, sfs, sfc_encap)
-            else:
-                result = self.vimconn.new_sfp(
-                    name, classifications, sfs, sfc_encap, spi
-                )
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_port_chain.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron["port_chain"]["id"])
-
-    def _test_new_classification(self, create_sfc_flow_classifier, ctype):
-        # input to VIM connector
-        name = "osm_classification"
-        definition = {
-            "ethertype": "IPv4",
-            "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-            "protocol": "tcp",
-            "source_ip_prefix": "192.168.2.0/24",
-            "source_port_range_max": 99,
-            "source_port_range_min": 50,
-        }
-
-        # what OpenStack is assumed to respond (patch OpenStack"s return value)
-        dict_from_neutron = {"flow_classifier": copy.copy(definition)}
-        dict_from_neutron["flow_classifier"][
-            "id"
-        ] = "7735ec2c-fddf-4130-9712-32ed2ab6a372"
-        dict_from_neutron["flow_classifier"]["name"] = name
-        dict_from_neutron["flow_classifier"]["description"] = ""
-        dict_from_neutron["flow_classifier"][
-            "tenant_id"
-        ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c"
-        dict_from_neutron["flow_classifier"][
-            "project_id"
-        ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c"
-        create_sfc_flow_classifier.return_value = dict_from_neutron
-
-        # what the VIM connector is expected to
-        # send to OpenStack based on the input
-        dict_to_neutron = {"flow_classifier": copy.copy(definition)}
-        dict_to_neutron["flow_classifier"]["name"] = "osm_classification"
-
-        # call the VIM connector
-        result = self.vimconn.new_classification(name, ctype, definition)
-
-        # assert that the VIM connector made the expected call to OpenStack
-        create_sfc_flow_classifier.assert_called_with(dict_to_neutron)
-        # assert that the VIM connector had the expected result / return value
-        self.assertEqual(result, dict_from_neutron["flow_classifier"]["id"])
-
-    @mock.patch.object(Client, "create_sfc_flow_classifier")
-    def test_new_classification(self, create_sfc_flow_classifier):
-        self._test_new_classification(
-            create_sfc_flow_classifier, "legacy_flow_classifier"
-        )
-
-    @mock.patch.object(Client, "create_sfc_flow_classifier")
-    def test_new_classification_unsupported_type(self, create_sfc_flow_classifier):
-        self.assertRaises(
-            vimconn.VimConnNotSupportedException,
-            self._test_new_classification,
-            create_sfc_flow_classifier,
-            "h265",
-        )
-
-    @mock.patch.object(Client, "create_sfc_port_pair")
-    def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair):
-        self._test_new_sfi(create_sfc_port_pair, True)
-
-    @mock.patch.object(Client, "create_sfc_port_pair")
-    def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair):
-        self._test_new_sfi(create_sfc_port_pair, False)
-
-    @mock.patch.object(Client, "create_sfc_port_pair")
-    def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair):
-        self._test_new_sfi(create_sfc_port_pair, None)
-
-    @mock.patch.object(Client, "create_sfc_port_pair")
-    def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair):
-        ingress_ports = [
-            "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-            "a0273f64-82c9-11e7-b08f-6328e53f0fa7",
-        ]
-        self.assertRaises(
-            vimconn.VimConnNotSupportedException,
-            self._test_new_sfi,
-            create_sfc_port_pair,
-            True,
-            ingress_ports=ingress_ports,
-        )
-        ingress_ports = []
-        self.assertRaises(
-            vimconn.VimConnNotSupportedException,
-            self._test_new_sfi,
-            create_sfc_port_pair,
-            True,
-            ingress_ports=ingress_ports,
-        )
-
-    @mock.patch.object(Client, "create_sfc_port_pair")
-    def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair):
-        egress_ports = [
-            "230cdf1b-de37-4891-bc07-f9010cf1f967",
-            "b41228fe-82c9-11e7-9b44-17504174320b",
-        ]
-        self.assertRaises(
-            vimconn.VimConnNotSupportedException,
-            self._test_new_sfi,
-            create_sfc_port_pair,
-            True,
-            egress_ports=egress_ports,
-        )
-        egress_ports = []
-        self.assertRaises(
-            vimconn.VimConnNotSupportedException,
-            self._test_new_sfi,
-            create_sfc_port_pair,
-            True,
-            egress_ports=egress_ports,
-        )
-
-    @mock.patch.object(vimconnector, "get_sfi")
-    @mock.patch.object(Client, "create_sfc_port_pair_group")
-    def test_new_sf(self, create_sfc_port_pair_group, get_sfi):
-        get_sfi.return_value = {"sfc_encap": True}
-        self._test_new_sf(create_sfc_port_pair_group)
-
-    @mock.patch.object(vimconnector, "get_sfi")
-    @mock.patch.object(Client, "create_sfc_port_pair_group")
-    def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group, get_sfi):
-        get_sfi.return_value = {"sfc_encap": "nsh"}
-        self.assertRaises(
-            vimconn.VimConnNotSupportedException,
-            self._test_new_sf,
-            create_sfc_port_pair_group,
-        )
-
-    @mock.patch.object(Client, "create_sfc_port_chain")
-    def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, True, None)
-
-    @mock.patch.object(Client, "create_sfc_port_chain")
-    def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, None, None)
-        self._test_new_sfp(create_sfc_port_chain, None, 25)
-
-    @mock.patch.object(Client, "create_sfc_port_chain")
-    def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, None, None)
-
-    @mock.patch.object(Client, "create_sfc_port_chain")
-    def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, True, 25)
-
-    @mock.patch.object(Client, "create_sfc_port_chain")
-    def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain):
-        self._test_new_sfp(create_sfc_port_chain, None, 25)
-
-    @mock.patch.object(Client, "list_sfc_flow_classifiers")
-    def test_get_classification_list(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {
-            "flow_classifiers": [
-                {
-                    "source_port_range_min": 2000,
-                    "destination_ip_prefix": "192.168.3.0/24",
-                    "protocol": "udp",
-                    "description": "",
-                    "ethertype": "IPv4",
-                    "l7_parameters": {},
-                    "source_port_range_max": 2000,
-                    "destination_port_range_min": 3000,
-                    "source_ip_prefix": "192.168.2.0/24",
-                    "logical_destination_port": None,
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "destination_port_range_max": None,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-                    "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
-                    "name": "fc1",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        filter_dict = {"protocol": "tcp", "ethertype": "IPv4"}
-        result = self.vimconn.get_classification_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(
-            result,
-            [
-                {
-                    "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
-                    "name": "fc1",
-                    "description": "",
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "ctype": "legacy_flow_classifier",
-                    "definition": {
-                        "source_port_range_min": 2000,
-                        "destination_ip_prefix": "192.168.3.0/24",
-                        "protocol": "udp",
-                        "ethertype": "IPv4",
-                        "l7_parameters": {},
-                        "source_port_range_max": 2000,
-                        "destination_port_range_min": 3000,
-                        "source_ip_prefix": "192.168.2.0/24",
-                        "logical_destination_port": None,
-                        "destination_port_range_max": None,
-                        "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-                    },
-                }
-            ],
-        )
-
-    def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap):
-        # what OpenStack is assumed to return to the VIM connector
-        list_port_pair.return_value = {
-            "port_pairs": [
-                {
-                    "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "service_function_parameters": {"correlation": correlation},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
-                    "name": "osm_sfi",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        filter_dict = {"name": "osm_sfi", "description": ""}
-        result = self.vimconn.get_sfi_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_port_pair.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(
-            result,
-            [
-                {
-                    "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
-                    "sfc_encap": sfc_encap,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
-                    "name": "osm_sfi",
-                }
-            ],
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pairs")
-    def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs):
-        self._test_get_sfi_list(list_sfc_port_pairs, "nsh", True)
-
-    @mock.patch.object(Client, "list_sfc_port_pairs")
-    def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs):
-        self._test_get_sfi_list(list_sfc_port_pairs, None, False)
-
-    @mock.patch.object(Client, "list_sfc_port_pair_groups")
-    def test_get_sf_list(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {
-            "port_pair_groups": [
-                {
-                    "port_pairs": [
-                        "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2",
-                        "0d63799c-82d6-11e7-8deb-a746bb3ae9f5",
-                    ],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "port_pair_group_parameters": {},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f",
-                    "name": "osm_sf",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        filter_dict = {"name": "osm_sf", "description": ""}
-        result = self.vimconn.get_sf_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(
-            result,
-            [
-                {
-                    "sfis": [
-                        "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2",
-                        "0d63799c-82d6-11e7-8deb-a746bb3ae9f5",
-                    ],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f",
-                    "name": "osm_sf",
-                }
-            ],
-        )
-
-    def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {
-            "port_chains": [
-                {
-                    "port_pair_groups": [
-                        "7d8e3bf8-82d6-11e7-a032-8ff028839d25",
-                        "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518",
-                    ],
-                    "flow_classifiers": [
-                        "1333c2f4-82d7-11e7-a5df-9327f33d104e",
-                        "1387ab44-82d7-11e7-9bb0-476337183905",
-                    ],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "chain_parameters": {"correlation": correlation},
-                    "chain_id": 40,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
-                    "name": "osm_sfp",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        filter_dict = {"name": "osm_sfp", "description": ""}
-        result = self.vimconn.get_sfp_list(filter_dict.copy())
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(**filter_dict)
-        # assert that the VIM connector successfully
-        # translated and returned the OpenStack result
-        self.assertEqual(
-            result,
-            [
-                {
-                    "service_functions": [
-                        "7d8e3bf8-82d6-11e7-a032-8ff028839d25",
-                        "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518",
-                    ],
-                    "classifications": [
-                        "1333c2f4-82d7-11e7-a5df-9327f33d104e",
-                        "1387ab44-82d7-11e7-9bb0-476337183905",
-                    ],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "sfc_encap": sfc_encap,
-                    "spi": 40,
-                    "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
-                    "name": "osm_sfp",
-                }
-            ],
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_chains")
-    def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains):
-        self._test_get_sfp_list(list_sfc_port_chains, "nsh", True)
-
-    @mock.patch.object(Client, "list_sfc_port_chains")
-    def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains):
-        self._test_get_sfp_list(list_sfc_port_chains, None, False)
-
-    @mock.patch.object(Client, "list_sfc_flow_classifiers")
-    def test_get_classification(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {
-            "flow_classifiers": [
-                {
-                    "source_port_range_min": 2000,
-                    "destination_ip_prefix": "192.168.3.0/24",
-                    "protocol": "udp",
-                    "description": "",
-                    "ethertype": "IPv4",
-                    "l7_parameters": {},
-                    "source_port_range_max": 2000,
-                    "destination_port_range_min": 3000,
-                    "source_ip_prefix": "192.168.2.0/24",
-                    "logical_destination_port": None,
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "destination_port_range_max": None,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-                    "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
-                    "name": "fc1",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        result = self.vimconn.get_classification("22198366-d4e8-4d6b-b4d2-637d5d6cbb7d")
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(
-            id="22198366-d4e8-4d6b-b4d2-637d5d6cbb7d"
-        )
-        # assert that VIM connector successfully returned the OpenStack result
-        self.assertEqual(
-            result,
-            {
-                "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
-                "name": "fc1",
-                "description": "",
-                "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "ctype": "legacy_flow_classifier",
-                "definition": {
-                    "source_port_range_min": 2000,
-                    "destination_ip_prefix": "192.168.3.0/24",
-                    "protocol": "udp",
-                    "ethertype": "IPv4",
-                    "l7_parameters": {},
-                    "source_port_range_max": 2000,
-                    "destination_port_range_min": 3000,
-                    "source_ip_prefix": "192.168.2.0/24",
-                    "logical_destination_port": None,
-                    "destination_port_range_max": None,
-                    "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-                },
-            },
-        )
-
-    @mock.patch.object(Client, "list_sfc_flow_classifiers")
-    def test_get_classification_many_results(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {
-            "flow_classifiers": [
-                {
-                    "source_port_range_min": 2000,
-                    "destination_ip_prefix": "192.168.3.0/24",
-                    "protocol": "udp",
-                    "description": "",
-                    "ethertype": "IPv4",
-                    "l7_parameters": {},
-                    "source_port_range_max": 2000,
-                    "destination_port_range_min": 3000,
-                    "source_ip_prefix": "192.168.2.0/24",
-                    "logical_destination_port": None,
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "destination_port_range_max": None,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-                    "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
-                    "name": "fc1",
-                },
-                {
-                    "source_port_range_min": 1000,
-                    "destination_ip_prefix": "192.168.3.0/24",
-                    "protocol": "udp",
-                    "description": "",
-                    "ethertype": "IPv4",
-                    "l7_parameters": {},
-                    "source_port_range_max": 1000,
-                    "destination_port_range_min": 3000,
-                    "source_ip_prefix": "192.168.2.0/24",
-                    "logical_destination_port": None,
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "destination_port_range_max": None,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
-                    "id": "3196bafc-82dd-11e7-a205-9bf6c14b0721",
-                    "name": "fc2",
-                },
-            ]
-        }
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnConflictException,
-            self.vimconn.get_classification,
-            "3196bafc-82dd-11e7-a205-9bf6c14b0721",
-        )
-
-        # assert the VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(
-            id="3196bafc-82dd-11e7-a205-9bf6c14b0721"
-        )
-
-    @mock.patch.object(Client, "list_sfc_flow_classifiers")
-    def test_get_classification_no_results(self, list_sfc_flow_classifiers):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_flow_classifiers.return_value = {"flow_classifiers": []}
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnNotFoundException,
-            self.vimconn.get_classification,
-            "3196bafc-82dd-11e7-a205-9bf6c14b0721",
-        )
-
-        # assert the VIM connector called OpenStack with the expected filter
-        list_sfc_flow_classifiers.assert_called_with(
-            id="3196bafc-82dd-11e7-a205-9bf6c14b0721"
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pairs")
-    def test_get_sfi(self, list_sfc_port_pairs):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pairs.return_value = {
-            "port_pairs": [
-                {
-                    "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "service_function_parameters": {"correlation": "nsh"},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
-                    "name": "osm_sfi1",
-                },
-            ]
-        }
-
-        # call the VIM connector
-        result = self.vimconn.get_sfi("c121ebdd-7f2d-4213-b933-3325298a6966")
-
-        # assert the VIM connector called OpenStack with the expected filter
-        list_sfc_port_pairs.assert_called_with(
-            id="c121ebdd-7f2d-4213-b933-3325298a6966"
-        )
-        # assert the VIM connector successfully returned the OpenStack result
-        self.assertEqual(
-            result,
-            {
-                "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
-                "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
-                "sfc_encap": True,
-                "description": "",
-                "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
-                "name": "osm_sfi1",
-            },
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pairs")
-    def test_get_sfi_many_results(self, list_sfc_port_pairs):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pairs.return_value = {
-            "port_pairs": [
-                {
-                    "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "service_function_parameters": {"correlation": "nsh"},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
-                    "name": "osm_sfi1",
-                },
-                {
-                    "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
-                    "service_function_parameters": {"correlation": "nsh"},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "c0436d92-82db-11e7-8f9c-5fa535f1261f",
-                    "name": "osm_sfi2",
-                },
-            ]
-        }
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnConflictException,
-            self.vimconn.get_sfi,
-            "c0436d92-82db-11e7-8f9c-5fa535f1261f",
-        )
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pairs.assert_called_with(
-            id="c0436d92-82db-11e7-8f9c-5fa535f1261f"
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pairs")
-    def test_get_sfi_no_results(self, list_sfc_port_pairs):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pairs.return_value = {"port_pairs": []}
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnNotFoundException,
-            self.vimconn.get_sfi,
-            "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
-        )
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pairs.assert_called_with(
-            id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pair_groups")
-    def test_get_sf(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {
-            "port_pair_groups": [
-                {
-                    "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "port_pair_group_parameters": {},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
-                    "name": "osm_sf1",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        result = self.vimconn.get_sf("b22892fc-82d9-11e7-ae85-0fea6a3b3757")
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(
-            id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
-        )
-        # assert that VIM connector successfully returned the OpenStack result
-        self.assertEqual(
-            result,
-            {
-                "description": "",
-                "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "sfis": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
-                "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
-                "name": "osm_sf1",
-            },
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pair_groups")
-    def test_get_sf_many_results(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {
-            "port_pair_groups": [
-                {
-                    "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "port_pair_group_parameters": {},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
-                    "name": "osm_sf1",
-                },
-                {
-                    "port_pairs": ["0d63799c-82d6-11e7-8deb-a746bb3ae9f5"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "port_pair_group_parameters": {},
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
-                    "name": "osm_sf2",
-                },
-            ]
-        }
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnConflictException,
-            self.vimconn.get_sf,
-            "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
-        )
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(
-            id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_pair_groups")
-    def test_get_sf_no_results(self, list_sfc_port_pair_groups):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_pair_groups.return_value = {"port_pair_groups": []}
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnNotFoundException,
-            self.vimconn.get_sf,
-            "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
-        )
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_pair_groups.assert_called_with(
-            id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_chains")
-    def test_get_sfp(self, list_sfc_port_chains):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {
-            "port_chains": [
-                {
-                    "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
-                    "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "chain_parameters": {"correlation": "nsh"},
-                    "chain_id": 40,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
-                    "name": "osm_sfp1",
-                }
-            ]
-        }
-
-        # call the VIM connector
-        result = self.vimconn.get_sfp("821bc9be-82d7-11e7-8ce3-23a08a27ab47")
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(
-            id="821bc9be-82d7-11e7-8ce3-23a08a27ab47"
-        )
-        # assert that VIM connector successfully returned the OpenStack result
-        self.assertEqual(
-            result,
-            {
-                "service_functions": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
-                "classifications": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
-                "description": "",
-                "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                "sfc_encap": True,
-                "spi": 40,
-                "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
-                "name": "osm_sfp1",
-            },
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_chains")
-    def test_get_sfp_many_results(self, list_sfc_port_chains):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {
-            "port_chains": [
-                {
-                    "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
-                    "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "chain_parameters": {"correlation": "nsh"},
-                    "chain_id": 40,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
-                    "name": "osm_sfp1",
-                },
-                {
-                    "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
-                    "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
-                    "description": "",
-                    "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "chain_parameters": {"correlation": "nsh"},
-                    "chain_id": 50,
-                    "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
-                    "id": "5d002f38-82de-11e7-a770-f303f11ce66a",
-                    "name": "osm_sfp2",
-                },
-            ]
-        }
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnConflictException,
-            self.vimconn.get_sfp,
-            "5d002f38-82de-11e7-a770-f303f11ce66a",
-        )
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(
-            id="5d002f38-82de-11e7-a770-f303f11ce66a"
-        )
-
-    @mock.patch.object(Client, "list_sfc_port_chains")
-    def test_get_sfp_no_results(self, list_sfc_port_chains):
-        # what OpenStack is assumed to return to the VIM connector
-        list_sfc_port_chains.return_value = {"port_chains": []}
-
-        # call the VIM connector
-        self.assertRaises(
-            vimconn.VimConnNotFoundException,
-            self.vimconn.get_sfp,
-            "5d002f38-82de-11e7-a770-f303f11ce66a",
-        )
-
-        # assert that VIM connector called OpenStack with the expected filter
-        list_sfc_port_chains.assert_called_with(
-            id="5d002f38-82de-11e7-a770-f303f11ce66a"
-        )
-
-    @mock.patch.object(Client, "delete_sfc_flow_classifier")
-    def test_delete_classification(self, delete_sfc_flow_classifier):
-        result = self.vimconn.delete_classification(
-            "638f957c-82df-11e7-b7c8-132706021464"
-        )
-        delete_sfc_flow_classifier.assert_called_with(
-            "638f957c-82df-11e7-b7c8-132706021464"
-        )
-        self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
-
-    @mock.patch.object(Client, "delete_sfc_port_pair")
-    def test_delete_sfi(self, delete_sfc_port_pair):
-        result = self.vimconn.delete_sfi("638f957c-82df-11e7-b7c8-132706021464")
-        delete_sfc_port_pair.assert_called_with("638f957c-82df-11e7-b7c8-132706021464")
-        self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
-
-    @mock.patch.object(Client, "delete_sfc_port_pair_group")
-    def test_delete_sf(self, delete_sfc_port_pair_group):
-        result = self.vimconn.delete_sf("638f957c-82df-11e7-b7c8-132706021464")
-        delete_sfc_port_pair_group.assert_called_with(
-            "638f957c-82df-11e7-b7c8-132706021464"
-        )
-        self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
-
-    @mock.patch.object(Client, "delete_sfc_port_chain")
-    def test_delete_sfp(self, delete_sfc_port_chain):
-        result = self.vimconn.delete_sfp("638f957c-82df-11e7-b7c8-132706021464")
-        delete_sfc_port_chain.assert_called_with("638f957c-82df-11e7-b7c8-132706021464")
-        self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
-
-
 class Status:
     def __init__(self, s):
         self.status = s
@@ -5946,6 +4921,7 @@
         for mocking in mocks:
             mocking.assert_not_called()
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -5962,6 +4938,7 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, id, memory, vcpu exist, vim type is VIO,
         paired-threads, cores, threads do not exist in numa.
@@ -5970,21 +4947,17 @@
             {"id": 0, "memory": 1, "vcpu": [1, 3]},
             {"id": 1, "memory": 2, "vcpu": [2]},
         ]
-        vcpus = 3
         extra_specs = {}
         expected_extra_specs = {
             "hw:numa_nodes": "2",
-            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-            "vmware:latency_sensitivity_level": "high",
             "hw:cpu_sockets": "2",
         }
         self.vimconn.vim_type = "VIO"
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
+
         self.assertEqual(mock_process_numa_memory.call_count, 2)
         self.assertEqual(mock_process_numa_vcpu.call_count, 2)
+        mock_process_vio_numa_nodes.assert_called_once_with(2, {"hw:numa_nodes": "2"})
         _call_mock_process_numa_memory = mock_process_numa_memory.call_args_list
         self.assertEqual(
             _call_mock_process_numa_memory[0].args,
@@ -5993,8 +4966,6 @@
                 0,
                 {
                     "hw:numa_nodes": "2",
-                    "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-                    "vmware:latency_sensitivity_level": "high",
                 },
             ),
         )
@@ -6006,8 +4977,6 @@
                 {
                     "hw:cpu_sockets": "2",
                     "hw:numa_nodes": "2",
-                    "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-                    "vmware:latency_sensitivity_level": "high",
                 },
             ),
         )
@@ -6019,8 +4988,6 @@
                 0,
                 {
                     "hw:numa_nodes": "2",
-                    "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-                    "vmware:latency_sensitivity_level": "high",
                 },
             ),
         )
@@ -6032,8 +4999,6 @@
                 {
                     "hw:cpu_sockets": "2",
                     "hw:numa_nodes": "2",
-                    "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-                    "vmware:latency_sensitivity_level": "high",
                 },
             ),
         )
@@ -6046,6 +5011,7 @@
             ]
         )
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6062,6 +5028,7 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, id, memory, vcpu exist, vim type is openstack,
         paired-threads, cores, threads do not exist in numa.
@@ -6070,17 +5037,14 @@
             {"id": 0, "memory": 1, "vcpu": [1, 3]},
             {"id": 1, "memory": 2, "vcpu": [2]},
         ]
-        vcpus = 3
         extra_specs = {}
         expected_extra_specs = {
             "hw:numa_nodes": "2",
             "hw:cpu_sockets": "2",
         }
         self.vimconn.vim_type = "openstack"
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
+
         self.assertEqual(mock_process_numa_memory.call_count, 2)
         self.assertEqual(mock_process_numa_vcpu.call_count, 2)
         _call_mock_process_numa_memory = mock_process_numa_memory.call_args_list
@@ -6126,6 +5090,7 @@
             ]
         )
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6142,6 +5107,7 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, id, paired-threads exist, vim type is openstack.
         vcpus calculation according to paired-threads in numa, there is extra_spec.
@@ -6149,17 +5115,15 @@
         numas = [{"id": 0, "paired-threads": 3}, {"id": 1, "paired-threads": 3}]
         extra_specs = {"some-key": "some-value"}
         expected_extra_specs = {
-            "hw:numa_nodes": "2",
             "hw:cpu_sockets": "2",
+            "hw:cpu_threads": "12",
+            "hw:numa_nodes": "2",
             "some-key": "some-value",
         }
         self.vimconn.vim_type = "openstack"
-        vcpus = 6
         mock_process_numa_paired_threads.side_effect = [6, 6]
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
+
         self.check_if_assert_not_called(
             [mock_process_numa_threads, mock_process_numa_cores]
         )
@@ -6185,6 +5149,7 @@
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6201,26 +5166,22 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, id, paired-threads exist, vim type is VIO.
         vcpus calculation according to paired-threads in numa, there is extra_spec.
         """
-        numas = [{"id": 0, "paired-threads": 3}, {"id": 1, "paired-threads": 3}]
+        numas = [{"id": 0, "paired-threads": 2}, {"id": 1, "paired-threads": 2}]
         extra_specs = {"some-key": "some-value"}
         expected_extra_specs = {
             "hw:numa_nodes": "2",
-            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-            "vmware:latency_sensitivity_level": "high",
             "hw:cpu_sockets": "2",
+            "hw:cpu_threads": "8",
             "some-key": "some-value",
         }
         self.vimconn.vim_type = "VIO"
-        vcpus = 6
-        mock_process_numa_paired_threads.side_effect = [6, 6]
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        mock_process_numa_paired_threads.side_effect = [4, 4]
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
         self.check_if_assert_not_called(
             [mock_process_numa_threads, mock_process_numa_cores]
         )
@@ -6230,34 +5191,34 @@
         _call_mock_process_numa_paired_threads = (
             mock_process_numa_paired_threads.call_args_list
         )
+        mock_process_vio_numa_nodes.assert_called_once_with(
+            2, {"some-key": "some-value", "hw:numa_nodes": "2"}
+        )
         self.assertEqual(
             _call_mock_process_numa_paired_threads[0].args,
             (
-                {"id": 0, "paired-threads": 3},
+                {"id": 0, "paired-threads": 2},
                 {
                     "hw:cpu_sockets": "2",
                     "hw:numa_nodes": "2",
                     "some-key": "some-value",
-                    "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-                    "vmware:latency_sensitivity_level": "high",
                 },
             ),
         )
         self.assertEqual(
             _call_mock_process_numa_paired_threads[1].args,
             (
-                {"id": 1, "paired-threads": 3},
+                {"id": 1, "paired-threads": 2},
                 {
                     "hw:cpu_sockets": "2",
                     "hw:numa_nodes": "2",
                     "some-key": "some-value",
-                    "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-                    "vmware:latency_sensitivity_level": "high",
                 },
             ),
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6274,20 +5235,23 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, id, cores exist, vim type is openstack.
         vcpus calculation according to cores in numa.
         """
         numas = [{"id": 0, "cores": 1}, {"id": 1, "cores": 2}]
         extra_specs = {}
-        expected_extra_specs = {"hw:numa_nodes": "2", "hw:cpu_sockets": "2"}
+        updated_extra_specs = {"hw:numa_nodes": "2", "hw:cpu_sockets": "2"}
+        expected_extra_specs = {
+            "hw:numa_nodes": "2",
+            "hw:cpu_sockets": "2",
+            "hw:cpu_cores": "3",
+        }
         self.vimconn.vim_type = "openstack"
-        vcpus = 2
         mock_process_numa_cores.side_effect = [1, 2]
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
+
         self.check_if_assert_not_called(
             [mock_process_numa_threads, mock_process_numa_paired_threads]
         )
@@ -6297,14 +5261,15 @@
         _call_mock_process_numa_cores = mock_process_numa_cores.call_args_list
         self.assertEqual(
             _call_mock_process_numa_cores[0].args,
-            ({"id": 0, "cores": 1}, {"hw:cpu_sockets": "2", "hw:numa_nodes": "2"}),
+            ({"id": 0, "cores": 1}, updated_extra_specs),
         )
         self.assertEqual(
             _call_mock_process_numa_cores[1].args,
-            ({"id": 1, "cores": 2}, {"hw:cpu_sockets": "2", "hw:numa_nodes": "2"}),
+            ({"id": 1, "cores": 2}, updated_extra_specs),
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6321,6 +5286,7 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, id, cores exist, vim type is VIO.
         vcpus calculation according to cores in numa.
@@ -6328,35 +5294,44 @@
         numas = [{"id": 0, "cores": 1}, {"id": 1, "cores": 2}]
         extra_specs = {}
         expected_extra_specs = {
-            "hw:numa_nodes": "2",
+            "hw:cpu_cores": "3",
             "hw:cpu_sockets": "2",
-            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-            "vmware:latency_sensitivity_level": "high",
+            "hw:numa_nodes": "2",
         }
         self.vimconn.vim_type = "VIO"
-        vcpus = 2
         mock_process_numa_cores.side_effect = [1, 2]
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
         self.check_if_assert_not_called(
             [mock_process_numa_threads, mock_process_numa_paired_threads]
         )
         self.assertEqual(mock_process_numa_memory.call_count, 2)
         self.assertEqual(mock_process_numa_vcpu.call_count, 2)
         self.assertEqual(mock_process_numa_cores.call_count, 2)
+        mock_process_vio_numa_nodes.assert_called_once_with(2, {"hw:numa_nodes": "2"})
         _call_mock_process_numa_cores = mock_process_numa_cores.call_args_list
         self.assertEqual(
             _call_mock_process_numa_cores[0].args,
-            ({"id": 0, "cores": 1}, expected_extra_specs),
+            (
+                {"id": 0, "cores": 1},
+                {
+                    "hw:cpu_sockets": "2",
+                    "hw:numa_nodes": "2",
+                },
+            ),
         )
         self.assertEqual(
             _call_mock_process_numa_cores[1].args,
-            ({"id": 1, "cores": 2}, expected_extra_specs),
+            (
+                {"id": 1, "cores": 2},
+                {
+                    "hw:cpu_sockets": "2",
+                    "hw:numa_nodes": "2",
+                },
+            ),
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6373,6 +5348,7 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, memory, vcpu, thread exist, vim type is VIO,
         vcpus calculation according threads in numa, there are not numa ids.
@@ -6384,17 +5360,12 @@
         extra_specs = {}
         expected_extra_specs = {
             "hw:numa_nodes": "2",
-            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-            "vmware:latency_sensitivity_level": "high",
             "hw:cpu_sockets": "2",
+            "hw:cpu_threads": "3",
         }
         self.vimconn.vim_type = "VIO"
-        vcpus = 3
-        mock_process_numa_threads.return_value = vcpus
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        mock_process_numa_threads.return_value = 3
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
         self.check_if_assert_not_called(
             [
                 mock_process_numa_memory,
@@ -6403,17 +5374,22 @@
                 mock_process_numa_paired_threads,
             ]
         )
+        mock_process_vio_numa_nodes.assert_called_once_with(2, {"hw:numa_nodes": "2"})
         self.assertEqual(mock_process_numa_threads.call_count, 1)
         _call_mock_process_numa_threads = mock_process_numa_threads.call_args_list
         self.assertEqual(
             _call_mock_process_numa_threads[0].args,
             (
                 {"memory": 1, "vcpu": [1, 3], "threads": 3},
-                expected_extra_specs,
+                {
+                    "hw:cpu_sockets": "2",
+                    "hw:numa_nodes": "2",
+                },
             ),
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6430,6 +5406,7 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Process numa parameters, memory, vcpu, thread exist, vim type is openstack,
         vcpus calculation according threads in numa, there are not numa ids.
@@ -6442,20 +5419,19 @@
         expected_extra_specs = {
             "hw:numa_nodes": "2",
             "hw:cpu_sockets": "2",
+            "hw:cpu_threads": "3",
         }
         self.vimconn.vim_type = "openstack"
-        vcpus = 3
-        mock_process_numa_threads.return_value = vcpus
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        mock_process_numa_threads.return_value = 3
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
+
         self.check_if_assert_not_called(
             [
                 mock_process_numa_memory,
                 mock_process_numa_vcpu,
                 mock_process_numa_cores,
                 mock_process_numa_paired_threads,
+                mock_process_vio_numa_nodes,
             ]
         )
         self.assertEqual(mock_process_numa_threads.call_count, 1)
@@ -6464,11 +5440,12 @@
             _call_mock_process_numa_threads[0].args,
             (
                 {"memory": 1, "vcpu": [1, 3], "threads": 3},
-                expected_extra_specs,
+                {"hw:cpu_sockets": "2", "hw:numa_nodes": "2"},
             ),
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6485,22 +5462,14 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Numa list is empty, vim type is VIO."""
         numas = []
         extra_specs = {}
-        expected_extra_specs = {
-            "hw:numa_nodes": "0",
-            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
-            "vmware:latency_sensitivity_level": "high",
-        }
+        expected_extra_specs = {"hw:numa_nodes": "0"}
         self.vimconn.vim_type = "VIO"
-        vcpus = 4
-        mock_process_numa_threads.return_value = None
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
         self.check_if_assert_not_called(
             [
                 mock_process_numa_memory,
@@ -6510,8 +5479,10 @@
                 mock_process_numa_threads,
             ]
         )
+        mock_process_vio_numa_nodes.assert_called_once_with(0, {"hw:numa_nodes": "0"})
         self.assertDictEqual(extra_specs, expected_extra_specs)
 
+    @patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
     @patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
     @patch.object(
@@ -6528,18 +5499,16 @@
         mock_process_numa_paired_threads,
         mock_process_numa_vcpu,
         mock_process_numa_memory,
+        mock_process_vio_numa_nodes,
     ):
         """Numa list is empty, vim type is openstack."""
         numas = []
         extra_specs = {}
         expected_extra_specs = {"hw:numa_nodes": "0"}
         self.vimconn.vim_type = "openstack"
-        vcpus = 5
         mock_process_numa_threads.return_value = None
-        result = self.vimconn._process_numa_parameters_of_flavor(
-            numas, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
+
         self.check_if_assert_not_called(
             [
                 mock_process_numa_memory,
@@ -6547,6 +5516,7 @@
                 mock_process_numa_cores,
                 mock_process_numa_paired_threads,
                 mock_process_numa_threads,
+                mock_process_vio_numa_nodes,
             ]
         )
         self.assertDictEqual(extra_specs, expected_extra_specs)
@@ -6574,14 +5544,6 @@
         self.vimconn.process_numa_memory(numa, node_id, extra_specs)
         self.assertDictEqual(extra_specs, expected_extra_spec)
 
-    def test_process_numa_memory_node_id_is_int(self):
-        numa = {"memory": 2, "vcpu": [2]}
-        node_id = 0
-        extra_specs = {}
-        expected_extra_spec = {"hw:numa_mem.0": 2048}
-        self.vimconn.process_numa_memory(numa, node_id, extra_specs)
-        self.assertDictEqual(extra_specs, expected_extra_spec)
-
     def test_process_numa_vcpu_empty_extra_spec(self):
         numa = {"vcpu": [2]}
         node_id = 0
@@ -6887,7 +5849,6 @@
             {"memory": 1, "vcpu": [1, 3], "threads": 3},
             {"memory": 2, "vcpu": [2]},
         ]
-        vcpus = 3
         extended = {
             "numas": numas,
             "cpu-quota": {"limit": 3},
@@ -6900,13 +5861,10 @@
         expected_extra_specs = {
             "hw:mem_page_size": "large",
         }
-        mock_process_numa_parameters_of_flavor.return_value = vcpus
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
+
         self.assertEqual(mock_process_resource_quota.call_count, 4)
-        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {}, vcpus)
+        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {})
         self.assertEqual(extra_specs, expected_extra_specs)
 
     @patch.object(
@@ -6923,7 +5881,6 @@
             {"memory": 1, "threads": 3},
             {"memory": 2, "vcpu": [2]},
         ]
-        vcpus = 3
         extended = {
             "numas": numas,
             "disk-quota": {"limit": 50},
@@ -6933,13 +5890,9 @@
         expected_extra_specs = {
             "hw:mem_page_size": "any",
         }
-        mock_process_numa_parameters_of_flavor.return_value = vcpus
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         mock_process_resource_quota.assert_not_called()
-        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {}, vcpus)
+        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {})
         self.assertEqual(extra_specs, expected_extra_specs)
 
     @patch.object(
@@ -6952,7 +5905,6 @@
         self, mock_process_resource_quota, mock_process_numa_parameters_of_flavor
     ):
         """Process extended config, extended has cpu, mem, vif and disk-io quota but not numas."""
-        vcpus = 3
         extended = {
             "cpu-quota": {"limit": 3},
             "mem-quota": {"limit": 1},
@@ -6964,11 +5916,7 @@
         expected_extra_specs = {
             "hw:mem_page_size": "small",
         }
-        mock_process_numa_parameters_of_flavor.return_value = vcpus
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, vcpus)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 4)
         mock_process_numa_parameters_of_flavor.assert_not_called()
         self.assertEqual(extra_specs, expected_extra_specs)
@@ -6996,19 +5944,14 @@
             "mem-policy": "STRICT",
         }
         extra_specs = {}
-        vcpus = 3
         expected_extra_specs = {
             "hw:mem_page_size": "large",
             "hw:cpu_policy": "dedicated",
             "hw:numa_mempolicy": "strict",
         }
-        mock_process_numa_parameters_of_flavor.return_value = 4
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 4)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
-        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {}, vcpus)
+        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {})
         self.assertEqual(extra_specs, expected_extra_specs)
 
     @patch.object(
@@ -7029,16 +5972,12 @@
             "mem-policy": "STRICT",
         }
         extra_specs = {}
-        vcpus = 3
         expected_extra_specs = {
             "hw:mem_page_size": "large",
             "hw:cpu_policy": "dedicated",
             "hw:numa_mempolicy": "strict",
         }
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 3)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
         mock_process_numa_parameters_of_flavor.assert_not_called()
         self.assertEqual(extra_specs, expected_extra_specs)
@@ -7061,15 +6000,12 @@
             "mem-policy": "STRICT",
         }
         extra_specs = {}
-        vcpus = 6
+
         expected_extra_specs = {
             "hw:cpu_policy": "dedicated",
             "hw:numa_mempolicy": "strict",
         }
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 6)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
         mock_process_numa_parameters_of_flavor.assert_not_called()
         self.assertEqual(extra_specs, expected_extra_specs)
@@ -7097,18 +6033,13 @@
             "mem-policy": "STRICT",
         }
         extra_specs = {}
-        mock_process_numa_parameters_of_flavor.return_value = 4
-        vcpus = 1
         expected_extra_specs = {
             "hw:cpu_policy": "dedicated",
             "hw:numa_mempolicy": "strict",
         }
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 4)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
-        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {}, vcpus)
+        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {})
         self.assertEqual(extra_specs, expected_extra_specs)
 
     @patch.object(
@@ -7133,19 +6064,14 @@
             "cpu-pinning-policy": "DEDICATED",
             "mem-policy": "STRICT",
         }
-        mock_process_numa_parameters_of_flavor.return_value = 1
         extra_specs = {}
-        vcpus = None
         expected_extra_specs = {
             "hw:cpu_policy": "dedicated",
             "hw:numa_mempolicy": "strict",
         }
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 1)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
-        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {}, vcpus)
+        mock_process_numa_parameters_of_flavor.assert_called_once_with(numas, {})
         self.assertEqual(extra_specs, expected_extra_specs)
 
     @patch.object(
@@ -7166,16 +6092,12 @@
             "mem-policy": "STRICT",
         }
         extra_specs = {"some-key": "some-val"}
-        vcpus = None
         expected_extra_specs = {
             "hw:cpu_policy": "dedicated",
             "hw:numa_mempolicy": "strict",
             "some-key": "some-val",
         }
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, None)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
         mock_process_numa_parameters_of_flavor.assert_not_called()
         self.assertEqual(extra_specs, expected_extra_specs)
@@ -7202,17 +6124,12 @@
             "cpu-pinning-pol": "DEDICATED",
             "mem-pol": "STRICT",
         }
-        mock_process_numa_parameters_of_flavor.return_value = 1
         extra_specs = {}
-        vcpus = ""
         expected_extra_specs = {}
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 1)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.assertEqual(mock_process_resource_quota.call_count, 2)
         mock_process_numa_parameters_of_flavor.assert_called_once_with(
-            numas, extra_specs, vcpus
+            numas, extra_specs
         )
         self.assertEqual(extra_specs, expected_extra_specs)
 
@@ -7228,11 +6145,7 @@
         """Process extended config, extended is empty."""
         extended = {}
         extra_specs = {}
-        vcpus = 2
-        result = self.vimconn._process_extended_config_of_flavor(
-            extended, extra_specs, vcpus
-        )
-        self.assertEqual(result, 2)
+        self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
         self.check_if_assert_not_called(
             [mock_process_resource_quota, mock_process_numa_parameters_of_flavor]
         )
@@ -7292,7 +6205,6 @@
         name_suffix = 0
         vcpus = 8
         mock_change_flavor_name.return_value = name1
-        mock_extended_config_of_flavor.return_value = vcpus
         mock_get_flavor_details.return_value = (
             3,
             vcpus,
@@ -7307,7 +6219,7 @@
         mock_get_flavor_details.assert_called_once_with(flavor_data)
         mock_change_flavor_name.assert_called_once_with(name1, name_suffix, flavor_data)
         mock_extended_config_of_flavor.assert_called_once_with(
-            extended, {"some-key": "some-value"}, vcpus
+            extended, {"some-key": "some-value"}
         )
         self.vimconn.nova.flavors.create.assert_called_once_with(
             name=name1, ram=3, vcpus=8, disk=50, ephemeral=0, swap=0, is_public=True
@@ -7335,16 +6247,14 @@
         name_suffix = 0
         vcpus = 8
         mock_change_flavor_name.return_value = name1
-        mock_extended_config_of_flavor.return_value = vcpus
         mock_get_flavor_details.return_value = (3, vcpus, {}, extended)
         expected_result = self.new_flavor.id
         result = self.vimconn.new_flavor(flavor_data)
         self.assertEqual(result, expected_result)
         mock_reload_connection.assert_called_once()
-
         mock_get_flavor_details.assert_called_once_with(flavor_data)
         mock_change_flavor_name.assert_called_once_with(name1, name_suffix, flavor_data)
-        mock_extended_config_of_flavor.assert_called_once_with(extended, {}, vcpus)
+        mock_extended_config_of_flavor.assert_called_once_with(extended, {})
         self.vimconn.nova.flavors.create.assert_called_once_with(
             name=name1, ram=3, vcpus=vcpus, disk=50, ephemeral=0, swap=0, is_public=True
         )
@@ -7372,15 +6282,14 @@
         """Create new flavor, change_name_if_used_false, there is extended."""
         vcpus = 8
         mock_get_flavor_details.return_value = (3, vcpus, {}, extended)
-        mock_extended_config_of_flavor.return_value = 16
         expected_result = self.new_flavor.id
         result = self.vimconn.new_flavor(flavor_data, False)
         self.assertEqual(result, expected_result)
         mock_reload_connection.assert_called_once()
         self.assertEqual(mock_get_flavor_details.call_count, 1)
-        mock_extended_config_of_flavor.assert_called_once_with(extended, {}, vcpus)
+        mock_extended_config_of_flavor.assert_called_once_with(extended, {})
         self.vimconn.nova.flavors.create.assert_called_once_with(
-            name=name1, ram=3, vcpus=16, disk=50, ephemeral=0, swap=0, is_public=True
+            name=name1, ram=3, vcpus=8, disk=50, ephemeral=0, swap=0, is_public=True
         )
         self.check_if_assert_not_called(
             [mock_change_flavor_name, mock_format_exception, self.new_flavor.set_keys]
@@ -7410,13 +6319,11 @@
         mock_get_flavor_details.return_value = (3, 8, {}, None)
         result = self.vimconn.new_flavor(flavor_data2)
         self.assertEqual(result, expected_result)
-
         mock_reload_connection.assert_called_once()
         mock_change_flavor_name.assert_called_once_with(
             name1, name_suffix, flavor_data2
         )
         self.assertEqual(mock_get_flavor_details.call_count, 1)
-
         self.vimconn.nova.flavors.create.assert_called_once_with(
             name=name1, ram=3, vcpus=8, disk=50, ephemeral=0, swap=0, is_public=True
         )
@@ -7531,11 +6438,9 @@
         mock_change_flavor_name.side_effect = [error2, "sample-flavor-3"]
         expected_result = self.new_flavor.id
         mock_get_flavor_details.return_value = (3, 8, {}, extended)
-        mock_extended_config_of_flavor.return_value = 10
         result = self.vimconn.new_flavor(flavor_data2)
         self.assertEqual(result, expected_result)
         self.assertEqual(mock_reload_connection.call_count, 2)
-
         mock_change_flavor_name.assert_called_with(name1, name_suffix, flavor_data2)
         self.assertEqual(mock_change_flavor_name.call_count, 2)
         self.assertEqual(mock_get_flavor_details.call_count, 1)
@@ -7543,7 +6448,7 @@
         self.vimconn.nova.flavors.create.assert_called_once_with(
             name="sample-flavor-3",
             ram=3,
-            vcpus=10,
+            vcpus=8,
             disk=50,
             ephemeral=0,
             swap=0,
@@ -7579,13 +6484,11 @@
         expected_result = self.new_flavor.id
         mock_get_flavor_details.return_value = (3, 8, {}, None)
         result = self.vimconn.new_flavor(flavor_data2)
-
         self.assertEqual(result, expected_result)
         self.assertEqual(mock_reload_connection.call_count, 2)
         mock_change_flavor_name.assert_called_with(name1, name_suffix, flavor_data2)
         self.assertEqual(mock_change_flavor_name.call_count, 2)
         self.assertEqual(mock_get_flavor_details.call_count, 1)
-
         self.vimconn.nova.flavors.create.assert_called_once_with(
             name="sample-flavor-3",
             ram=3,
@@ -7798,7 +6701,6 @@
                 }
             ),
         )
-
         self.assertEqual(mock_reload_connection.call_count, 3)
         _call_mock_change_flavor = mock_change_flavor_name.call_args_list
         self.assertEqual(
@@ -7833,6 +6735,53 @@
         )
         self.assertEqual(mock_format_exception.call_count, 1)
 
+    def test_process_process_vio_numa_nodes_without_numa_with_extra_spec(self):
+        numa_nodes = 0
+        extra_specs = {"hw:numa_nodes": "0"}
+        expected_extra_spec = {
+            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
+            "vmware:latency_sensitivity_level": "high",
+            "hw:numa_nodes": "0",
+        }
+        self.vimconn.process_vio_numa_nodes(numa_nodes, extra_specs)
+        self.assertDictEqual(extra_specs, expected_extra_spec)
+
+    def test_process_process_vio_numa_nodes_list_type_numa_nodes_empty_extra_spec(self):
+        numa_nodes = [7, 9, 4]
+        extra_specs = {}
+        expected_extra_spec = {
+            "vmware:latency_sensitivity_level": "high",
+        }
+        self.vimconn.process_vio_numa_nodes(numa_nodes, extra_specs)
+        self.assertDictEqual(extra_specs, expected_extra_spec)
+
+    def test_process_process_vio_numa_nodes_with_numa_with_extra_spec(self):
+        numa_nodes = 5
+        extra_specs = {"hw:numa_nodes": "5"}
+        expected_extra_spec = {
+            "vmware:latency_sensitivity_level": "high",
+            "hw:numa_nodes": "5",
+        }
+        self.vimconn.process_vio_numa_nodes(numa_nodes, extra_specs)
+        self.assertDictEqual(extra_specs, expected_extra_spec)
+
+    def test_process_process_vio_numa_nodes_none_numa_nodes(self):
+        numa_nodes = None
+        extra_specs = {"hw:numa_nodes": "None"}
+        expected_extra_spec = {
+            "vmware:latency_sensitivity_level": "high",
+            "hw:numa_nodes": "None",
+            "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
+        }
+        self.vimconn.process_vio_numa_nodes(numa_nodes, extra_specs)
+        self.assertDictEqual(extra_specs, expected_extra_spec)
+
+    def test_process_process_vio_numa_nodes_invalid_type_extra_specs(self):
+        numa_nodes = 5
+        extra_specs = []
+        with self.assertRaises(TypeError):
+            self.vimconn.process_vio_numa_nodes(numa_nodes, extra_specs)
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
index 7c57817..9faf98d 100644
--- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
+++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
@@ -1287,16 +1287,17 @@
             extra_specs (dict):         To be filled.
 
         Returns:
-            vcpus       (int)           Number of virtual cpus
+            threads       (int)           Number of virtual cpus
 
         """
         if not numa.get("paired-threads"):
             return
+
         # cpu_thread_policy "require" implies that compute node must have an STM architecture
-        vcpus = numa["paired-threads"] * 2
+        threads = numa["paired-threads"] * 2
         extra_specs["hw:cpu_thread_policy"] = "require"
         extra_specs["hw:cpu_policy"] = "dedicated"
-        return vcpus
+        return threads
 
     @staticmethod
     def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
@@ -1306,17 +1307,17 @@
             extra_specs (dict):         To be filled.
 
         Returns:
-            vcpus       (int)           Number of virtual cpus
+            cores       (int)           Number of virtual cpus
 
         """
         # cpu_thread_policy "isolate" implies that the host must not have an SMT
         # architecture, or a non-SMT architecture will be emulated
         if not numa.get("cores"):
             return
-        vcpus = numa["cores"]
+        cores = numa["cores"]
         extra_specs["hw:cpu_thread_policy"] = "isolate"
         extra_specs["hw:cpu_policy"] = "dedicated"
-        return vcpus
+        return cores
 
     @staticmethod
     def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
@@ -1326,37 +1327,33 @@
             extra_specs (dict):         To be filled.
 
         Returns:
-            vcpus       (int)           Number of virtual cpus
+            threads       (int)           Number of virtual cpus
 
         """
         # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
         if not numa.get("threads"):
             return
-        vcpus = numa["threads"]
+        threads = numa["threads"]
         extra_specs["hw:cpu_thread_policy"] = "prefer"
         extra_specs["hw:cpu_policy"] = "dedicated"
-        return vcpus
+        return threads
 
     def _process_numa_parameters_of_flavor(
-        self, numas: List, extra_specs: Dict, vcpus: Optional[int]
-    ) -> int:
+        self, numas: List, extra_specs: Dict
+    ) -> None:
         """Process numa parameters and fill up extra_specs.
 
         Args:
             numas   (list):             List of dictionary which includes numa information
             extra_specs (dict):         To be filled.
-            vcpus       (int)      Number of virtual cpus
-
-        Returns:
-            vcpus       (int)           Number of virtual cpus
 
         """
         numa_nodes = len(numas)
         extra_specs["hw:numa_nodes"] = str(numa_nodes)
+        cpu_cores, cpu_threads = 0, 0
 
         if self.vim_type == "VIO":
-            extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-            extra_specs["vmware:latency_sensitivity_level"] = "high"
+            self.process_vio_numa_nodes(numa_nodes, extra_specs)
 
         for numa in numas:
             if "id" in numa:
@@ -1370,15 +1367,38 @@
             extra_specs["hw:cpu_sockets"] = str(numa_nodes)
 
             if "paired-threads" in numa:
-                vcpus = self.process_numa_paired_threads(numa, extra_specs)
+                threads = self.process_numa_paired_threads(numa, extra_specs)
+                cpu_threads += threads
 
             elif "cores" in numa:
-                vcpus = self.process_numa_cores(numa, extra_specs)
+                cores = self.process_numa_cores(numa, extra_specs)
+                cpu_cores += cores
 
             elif "threads" in numa:
-                vcpus = self.process_numa_threads(numa, extra_specs)
+                threads = self.process_numa_threads(numa, extra_specs)
+                cpu_threads += threads
 
-        return vcpus
+        if cpu_cores:
+            extra_specs["hw:cpu_cores"] = str(cpu_cores)
+        if cpu_threads:
+            extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+    @staticmethod
+    def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+        """According to number of numa nodes, updates the extra_specs for VIO.
+
+        Args:
+
+            numa_nodes      (int):         List keeps the numa node numbers
+            extra_specs     (dict):        Extra specs dict to be updated
+
+        """
+        # If there is not any numa, numas_nodes equals to 0.
+        if not numa_nodes:
+            extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+
+        # If there are several numas, we do not define specific affinity.
+        extra_specs["vmware:latency_sensitivity_level"] = "high"
 
     def _change_flavor_name(
         self, name: str, name_suffix: int, flavor_data: dict
@@ -1405,17 +1425,13 @@
         return name
 
     def _process_extended_config_of_flavor(
-        self, extended: dict, extra_specs: dict, vcpus: Optional[int]
-    ) -> int:
+        self, extended: dict, extra_specs: dict
+    ) -> None:
         """Process the extended dict to fill up extra_specs.
         Args:
 
-            extended    (dict):         Keeping the extra specification of flavor
-            extra_specs (dict)          Dict to be filled to be used during flavor creation
-            vcpus       (int)           Number of virtual cpus
-
-        Returns:
-            vcpus       (int)           Number of virtual cpus
+            extended                    (dict):         Keeping the extra specification of flavor
+            extra_specs                 (dict)          Dict to be filled to be used during flavor creation
 
         """
         quotas = {
@@ -1441,7 +1457,7 @@
 
         numas = extended.get("numas")
         if numas:
-            vcpus = self._process_numa_parameters_of_flavor(numas, extra_specs, vcpus)
+            self._process_numa_parameters_of_flavor(numas, extra_specs)
 
         for quota, item in quotas.items():
             if quota in extended.keys():
@@ -1462,8 +1478,6 @@
             if extended.get(policy):
                 extra_specs[hw_policy] = extended[policy].lower()
 
-        return vcpus
-
     @staticmethod
     def _get_flavor_details(flavor_data: dict) -> Tuple:
         """Returns the details of flavor
@@ -1513,9 +1527,7 @@
                         flavor_data
                     )
                     if extended:
-                        vcpus = self._process_extended_config_of_flavor(
-                            extended, extra_specs, vcpus
-                        )
+                        self._process_extended_config_of_flavor(extended, extra_specs)
 
                     # Create flavor
 
@@ -1536,7 +1548,6 @@
                     return new_flavor.id
 
                 except nvExceptions.Conflict as e:
-
                     if change_name_if_used and retry < max_retries:
                         continue
 
@@ -1880,7 +1891,6 @@
 
         # For VF
         elif net["type"] == "VF" or net["type"] == "SR-IOV":
-
             port_dict["binding:vnic_type"] = "direct"
 
             # VIO specific Changes
@@ -2068,7 +2078,6 @@
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
 
         if disk.get(key_id):
-
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
 
@@ -2148,7 +2157,6 @@
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
 
         if disk.get(key_id):
-
             # Use existing persistent volume
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
@@ -2479,7 +2487,6 @@
                 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
                 # several times
                 while not assigned:
-
                     free_floating_ip = self._get_free_floating_ip(
                         server, floating_network
                     )
@@ -2576,7 +2583,6 @@
                 self.neutron.update_port(port[0], port_update)
 
             except Exception:
-
                 raise vimconn.VimConnException(
                     "It was not possible to disable port security for port {}".format(
                         port[0]
@@ -2883,7 +2889,6 @@
             k_id    (str):      Port id in the VIM
         """
         try:
-
             port_dict = self.neutron.list_ports()
             existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
 
@@ -2891,7 +2896,6 @@
                 self.neutron.delete_port(k_id)
 
         except Exception as e:
-
             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
 
     def _delete_volumes_by_id_wth_cinder(
@@ -2973,7 +2977,6 @@
                 k_item, k_id = self._get_item_name_id(k)
 
                 if k_item == "volume":
-
                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
                         k, k_id, volumes_to_hold, created_items
                     )
@@ -2982,7 +2985,6 @@
                         keep_waiting = True
 
                 elif k_item == "floating_ip":
-
                     self._delete_floating_ip_by_id(k, k_id, created_items)
 
             except Exception as e:
@@ -3204,7 +3206,8 @@
 
     def action_vminstance(self, vm_id, action_dict, created_items={}):
         """Send and action over a VM instance from VIM
-        Returns None or the console dict if the action was successfully sent to the VIM"""
+        Returns None or the console dict if the action was successfully sent to the VIM
+        """
         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
 
         try:
@@ -3448,45 +3451,6 @@
                     )
                 )
 
-    def delete_user(self, user_id):
-        """Delete a user from openstack VIM
-        Returns the user identifier"""
-        if self.debug:
-            print("osconnector: Deleting  a  user from VIM")
-
-        try:
-            self._reload_connection()
-            self.keystone.users.delete(user_id)
-
-            return 1, user_id
-        except ksExceptions.ConnectionError as e:
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.NotFound as e:
-            error_value = -vimconn.HTTP_Not_Found
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-        except ksExceptions.ClientException as e:  # TODO remove
-            error_value = -vimconn.HTTP_Bad_Request
-            error_text = (
-                type(e).__name__
-                + ": "
-                + (str(e) if len(e.args) == 0 else str(e.args[0]))
-            )
-
-        # TODO insert exception vimconn.HTTP_Unauthorized
-        # if reaching here is because an exception
-        self.logger.debug("delete_tenant " + error_text)
-
-        return error_value, error_text
-
     def get_hosts_info(self):
         """Get the information of deployed hosts
         Returns the hosts content"""
@@ -3560,619 +3524,6 @@
 
         return error_value, error_text
 
-    def new_classification(self, name, ctype, definition):
-        self.logger.debug(
-            "Adding a new (Traffic) Classification to VIM, named %s", name
-        )
-
-        try:
-            new_class = None
-            self._reload_connection()
-
-            if ctype not in supportedClassificationTypes:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector does not support provided "
-                    "Classification Type {}, supported ones are: {}".format(
-                        ctype, supportedClassificationTypes
-                    )
-                )
-
-            if not self._validate_classification(ctype, definition):
-                raise vimconn.VimConnException(
-                    "Incorrect Classification definition for the type specified."
-                )
-
-            classification_dict = definition
-            classification_dict["name"] = name
-            new_class = self.neutron.create_sfc_flow_classifier(
-                {"flow_classifier": classification_dict}
-            )
-
-            return new_class["flow_classifier"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self.logger.error("Creation of Classification failed.")
-            self._format_exception(e)
-
-    def get_classification(self, class_id):
-        self.logger.debug(" Getting Classification %s from VIM", class_id)
-        filter_dict = {"id": class_id}
-        class_list = self.get_classification_list(filter_dict)
-
-        if len(class_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Classification '{}' not found".format(class_id)
-            )
-        elif len(class_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Classification with this criteria"
-            )
-
-        classification = class_list[0]
-
-        return classification
-
-    def get_classification_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Classifications from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            filter_dict_os = filter_dict.copy()
-            self._reload_connection()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            classification_dict = self.neutron.list_sfc_flow_classifiers(
-                **filter_dict_os
-            )
-            classification_list = classification_dict["flow_classifiers"]
-            self.__classification_os2mano(classification_list)
-
-            return classification_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_classification(self, class_id):
-        self.logger.debug("Deleting Classification '%s' from VIM", class_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_flow_classifier(class_id)
-
-            return class_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
-        self.logger.debug(
-            "Adding a new Service Function Instance to VIM, named '%s'", name
-        )
-
-        try:
-            new_sfi = None
-            self._reload_connection()
-            correlation = None
-
-            if sfc_encap:
-                correlation = "nsh"
-
-            if len(ingress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 ingress port per SFI"
-                )
-
-            if len(egress_ports) != 1:
-                raise vimconn.VimConnNotSupportedException(
-                    "OpenStack VIM connector can only have 1 egress port per SFI"
-                )
-
-            sfi_dict = {
-                "name": name,
-                "ingress": ingress_ports[0],
-                "egress": egress_ports[0],
-                "service_function_parameters": {"correlation": correlation},
-            }
-            new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
-
-            return new_sfi["port_pair"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfi:
-                try:
-                    self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Instance failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
-            self._format_exception(e)
-
-    def get_sfi(self, sfi_id):
-        self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
-        filter_dict = {"id": sfi_id}
-        sfi_list = self.get_sfi_list(filter_dict)
-
-        if len(sfi_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Instance '{}' not found".format(sfi_id)
-            )
-        elif len(sfi_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Instance with this criteria"
-            )
-
-        sfi = sfi_list[0]
-
-        return sfi
-
-    def get_sfi_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
-            sfi_list = sfi_dict["port_pairs"]
-            self.__sfi_os2mano(sfi_list)
-
-            return sfi_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_sfi(self, sfi_id):
-        self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair(sfi_id)
-
-            return sfi_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def new_sf(self, name, sfis, sfc_encap=True):
-        self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
-
-        try:
-            new_sf = None
-            self._reload_connection()
-            # correlation = None
-            # if sfc_encap:
-            #     correlation = "nsh"
-
-            for instance in sfis:
-                sfi = self.get_sfi(instance)
-
-                if sfi.get("sfc_encap") != sfc_encap:
-                    raise vimconn.VimConnNotSupportedException(
-                        "OpenStack VIM connector requires all SFIs of the "
-                        "same SF to share the same SFC Encapsulation"
-                    )
-
-            sf_dict = {"name": name, "port_pairs": sfis}
-            new_sf = self.neutron.create_sfc_port_pair_group(
-                {"port_pair_group": sf_dict}
-            )
-
-            return new_sf["port_pair_group"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sf:
-                try:
-                    self.neutron.delete_sfc_port_pair_group(
-                        new_sf["port_pair_group"]["id"]
-                    )
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
-            self._format_exception(e)
-
-    def get_sf(self, sf_id):
-        self.logger.debug("Getting Service Function %s from VIM", sf_id)
-        filter_dict = {"id": sf_id}
-        sf_list = self.get_sf_list(filter_dict)
-
-        if len(sf_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function '{}' not found".format(sf_id)
-            )
-        elif len(sf_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function with this criteria"
-            )
-
-        sf = sf_list[0]
-
-        return sf
-
-    def get_sf_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
-            sf_list = sf_dict["port_pair_groups"]
-            self.__sf_os2mano(sf_list)
-
-            return sf_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_sf(self, sf_id):
-        self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_pair_group(sf_id)
-
-            return sf_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
-
-        try:
-            new_sfp = None
-            self._reload_connection()
-            # In networking-sfc the MPLS encapsulation is legacy
-            # should be used when no full SFC Encapsulation is intended
-            correlation = "mpls"
-
-            if sfc_encap:
-                correlation = "nsh"
-
-            sfp_dict = {
-                "name": name,
-                "flow_classifiers": classifications,
-                "port_pair_groups": sfs,
-                "chain_parameters": {"correlation": correlation},
-            }
-
-            if spi:
-                sfp_dict["chain_id"] = spi
-
-            new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
-
-            return new_sfp["port_chain"]["id"]
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            if new_sfp:
-                try:
-                    self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
-                except Exception:
-                    self.logger.error(
-                        "Creation of Service Function Path failed, with "
-                        "subsequent deletion failure as well."
-                    )
-
-            self._format_exception(e)
-
-    def get_sfp(self, sfp_id):
-        self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
-
-        filter_dict = {"id": sfp_id}
-        sfp_list = self.get_sfp_list(filter_dict)
-
-        if len(sfp_list) == 0:
-            raise vimconn.VimConnNotFoundException(
-                "Service Function Path '{}' not found".format(sfp_id)
-            )
-        elif len(sfp_list) > 1:
-            raise vimconn.VimConnConflictException(
-                "Found more than one Service Function Path with this criteria"
-            )
-
-        sfp = sfp_list[0]
-
-        return sfp
-
-    def get_sfp_list(self, filter_dict={}):
-        self.logger.debug(
-            "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
-        )
-
-        try:
-            self._reload_connection()
-            filter_dict_os = filter_dict.copy()
-
-            if self.api_version3 and "tenant_id" in filter_dict_os:
-                filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
-            sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
-            sfp_list = sfp_dict["port_chains"]
-            self.__sfp_os2mano(sfp_list)
-
-            return sfp_list
-        except (
-            neExceptions.ConnectionFailed,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def delete_sfp(self, sfp_id):
-        self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
-
-        try:
-            self._reload_connection()
-            self.neutron.delete_sfc_port_chain(sfp_id)
-
-            return sfp_id
-        except (
-            neExceptions.ConnectionFailed,
-            neExceptions.NeutronException,
-            ksExceptions.ClientException,
-            neExceptions.NeutronException,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
-
-    def refresh_sfps_status(self, sfp_list):
-        """Get the status of the service function path
-        Params: the list of sfp identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function path
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
-        """
-        sfp_dict = {}
-        self.logger.debug(
-            "refresh_sfps status: Getting tenant SFP information from VIM"
-        )
-
-        for sfp_id in sfp_list:
-            sfp = {}
-
-            try:
-                sfp_vim = self.get_sfp(sfp_id)
-
-                if sfp_vim["spi"]:
-                    sfp["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfp["status"] = "OTHER"
-                    sfp["error_msg"] = "VIM status reported " + sfp["status"]
-
-                sfp["vim_info"] = self.serialize(sfp_vim)
-
-                if sfp_vim.get("fault"):
-                    sfp["error_msg"] = str(sfp_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "DELETED"
-                sfp["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfp status: %s", str(e))
-                sfp["status"] = "VIM_ERROR"
-                sfp["error_msg"] = str(e)
-
-            sfp_dict[sfp_id] = sfp
-
-        return sfp_dict
-
-    def refresh_sfis_status(self, sfi_list):
-        """Get the status of the service function instances
-        Params: the list of sfi identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function instance
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        sfi_dict = {}
-        self.logger.debug(
-            "refresh_sfis status: Getting tenant sfi information from VIM"
-        )
-
-        for sfi_id in sfi_list:
-            sfi = {}
-
-            try:
-                sfi_vim = self.get_sfi(sfi_id)
-
-                if sfi_vim:
-                    sfi["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sfi["status"] = "OTHER"
-                    sfi["error_msg"] = "VIM status reported " + sfi["status"]
-
-                sfi["vim_info"] = self.serialize(sfi_vim)
-
-                if sfi_vim.get("fault"):
-                    sfi["error_msg"] = str(sfi_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "DELETED"
-                sfi["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sfi status: %s", str(e))
-                sfi["status"] = "VIM_ERROR"
-                sfi["error_msg"] = str(e)
-
-            sfi_dict[sfi_id] = sfi
-
-        return sfi_dict
-
-    def refresh_sfs_status(self, sf_list):
-        """Get the status of the service functions
-        Params: the list of sf identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        sf_dict = {}
-        self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
-
-        for sf_id in sf_list:
-            sf = {}
-
-            try:
-                sf_vim = self.get_sf(sf_id)
-
-                if sf_vim:
-                    sf["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    sf["status"] = "OTHER"
-                    sf["error_msg"] = "VIM status reported " + sf_vim["status"]
-
-                sf["vim_info"] = self.serialize(sf_vim)
-
-                if sf_vim.get("fault"):
-                    sf["error_msg"] = str(sf_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "DELETED"
-                sf["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting sf status: %s", str(e))
-                sf["status"] = "VIM_ERROR"
-                sf["error_msg"] = str(e)
-
-            sf_dict[sf_id] = sf
-
-        return sf_dict
-
-    def refresh_classifications_status(self, classification_list):
-        """Get the status of the classifications
-        Params: the list of classification identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this classifier
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        classification_dict = {}
-        self.logger.debug(
-            "refresh_classifications status: Getting tenant classification information from VIM"
-        )
-
-        for classification_id in classification_list:
-            classification = {}
-
-            try:
-                classification_vim = self.get_classification(classification_id)
-
-                if classification_vim:
-                    classification["status"] = vmStatus2manoFormat["ACTIVE"]
-                else:
-                    classification["status"] = "OTHER"
-                    classification["error_msg"] = (
-                        "VIM status reported " + classification["status"]
-                    )
-
-                classification["vim_info"] = self.serialize(classification_vim)
-
-                if classification_vim.get("fault"):
-                    classification["error_msg"] = str(classification_vim["fault"])
-            except vimconn.VimConnNotFoundException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "DELETED"
-                classification["error_msg"] = str(e)
-            except vimconn.VimConnException as e:
-                self.logger.error("Exception getting classification status: %s", str(e))
-                classification["status"] = "VIM_ERROR"
-                classification["error_msg"] = str(e)
-
-            classification_dict[classification_id] = classification
-
-        return classification_dict
-
     def new_affinity_group(self, affinity_group_data):
         """Adds a server group to VIM
             affinity_group_data contains a dictionary with information, keys:
diff --git a/RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py b/RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py
index 4f93336..3869e53 100644
--- a/RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py
+++ b/RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py
@@ -902,50 +902,6 @@
         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
             self._format_request_exception(e)
 
-    def new_vminstancefromJSON(self, vm_data):
-        """Adds a VM instance to VIM"""
-        """Returns the instance identifier"""
-        try:
-            self._get_my_tenant()
-        except Exception as e:
-            return -vimconn.HTTP_Not_Found, str(e)
-        print("VIMConnector: Adding a new VM instance from JSON to VIM")
-        payload_req = vm_data
-        try:
-            vim_response = requests.post(
-                self.url + "/" + self.tenant + "/servers",
-                headers=self.headers_req,
-                data=payload_req,
-            )
-        except requests.exceptions.RequestException as e:
-            print("new_vminstancefromJSON Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        # print vim_response
-        # print vim_response.status_code
-        if vim_response.status_code == 200:
-            # print vim_response.json()
-            # print json.dumps(vim_response.json(), indent=4)
-            res, http_content = self._format_in(vim_response, new_image_response_schema)
-            # print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_image_response_schema)
-                if r is not None:
-                    print("Warning: remove extra items ", r)
-                # print http_content
-                vminstance_id = http_content["server"]["id"]
-                print("Tenant image id: ", vminstance_id)
-                return vim_response.status_code, vminstance_id
-            else:
-                return -vimconn.HTTP_Bad_Request, http_content
-        else:
-            # print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "{}": not possible to add new vm instance. HTTP Response: {}. Error: {}'.format(
-                self.url, vim_response.status_code, jsonerror
-            )
-            # print text
-            return -vim_response.status_code, text
-
     def new_vminstance(
         self,
         name,
@@ -1263,378 +1219,6 @@
         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
             self._format_request_exception(e)
 
-    # NOT USED METHODS in current version
-
-    def host_vim2gui(self, host, server_dict):
-        """Transform host dictionary from VIM format to GUI format,
-        and append to the server_dict
-        """
-        if type(server_dict) is not dict:
-            print(
-                "vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary"
-            )
-            return
-        RAD = {}
-        occupation = {}
-        for numa in host["host"]["numas"]:
-            RAD_item = {}
-            occupation_item = {}
-            # memory
-            RAD_item["memory"] = {
-                "size": str(numa["memory"]) + "GB",
-                "eligible": str(numa["hugepages"]) + "GB",
-            }
-            occupation_item["memory"] = str(numa["hugepages_consumed"]) + "GB"
-            # cpus
-            RAD_item["cpus"] = {}
-            RAD_item["cpus"]["cores"] = []
-            RAD_item["cpus"]["eligible_cores"] = []
-            occupation_item["cores"] = []
-            for _ in range(0, len(numa["cores"]) // 2):
-                RAD_item["cpus"]["cores"].append([])
-            for core in numa["cores"]:
-                RAD_item["cpus"]["cores"][core["core_id"]].append(core["thread_id"])
-                if "status" not in core:
-                    RAD_item["cpus"]["eligible_cores"].append(core["thread_id"])
-                if "instance_id" in core:
-                    occupation_item["cores"].append(core["thread_id"])
-            # ports
-            RAD_item["ports"] = {}
-            occupation_item["ports"] = {}
-            for iface in numa["interfaces"]:
-                RAD_item["ports"][iface["pci"]] = "speed:" + str(iface["Mbps"]) + "M"
-                occupation_item["ports"][iface["pci"]] = {
-                    "occupied": str(100 * iface["Mbps_consumed"] // iface["Mbps"]) + "%"
-                }
-
-            RAD[numa["numa_socket"]] = RAD_item
-            occupation[numa["numa_socket"]] = occupation_item
-        server_dict[host["host"]["name"]] = {"RAD": RAD, "occupation": occupation}
-
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        # obtain hosts list
-        url = self.url + "/hosts"
-        try:
-            vim_response = requests.get(url)
-        except requests.exceptions.RequestException as e:
-            print("get_hosts_info Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print(
-            "vim get", url, "response:", vim_response.status_code, vim_response.json()
-        )
-        # print vim_response.status_code
-        # print json.dumps(vim_response.json(), indent=4)
-        if vim_response.status_code != 200:
-            # TODO: get error
-            print(
-                "vimconnector.get_hosts_info error getting host list {} {}".format(
-                    vim_response.status_code, vim_response.json()
-                )
-            )
-            return -vim_response.status_code, "Error getting host list"
-
-        res, hosts = self._format_in(vim_response, get_hosts_response_schema)
-
-        if not res:
-            print(
-                "vimconnector.get_hosts_info error parsing GET HOSTS vim response",
-                hosts,
-            )
-            return vimconn.HTTP_Internal_Server_Error, hosts
-        # obtain hosts details
-        hosts_dict = {}
-        for host in hosts["hosts"]:
-            url = self.url + "/hosts/" + host["id"]
-            try:
-                vim_response = requests.get(url)
-            except requests.exceptions.RequestException as e:
-                print("get_hosts_info Exception: ", e.args)
-                return -vimconn.HTTP_Not_Found, str(e.args[0])
-            print(
-                "vim get",
-                url,
-                "response:",
-                vim_response.status_code,
-                vim_response.json(),
-            )
-            if vim_response.status_code != 200:
-                print(
-                    "vimconnector.get_hosts_info error getting detailed host {} {}".format(
-                        vim_response.status_code, vim_response.json()
-                    )
-                )
-                continue
-            res, host_detail = self._format_in(
-                vim_response, get_host_detail_response_schema
-            )
-            if not res:
-                print(
-                    "vimconnector.get_hosts_info error parsing GET HOSTS/{} vim response {}".format(
-                        host["id"], host_detail
-                    ),
-                )
-                continue
-            # print 'host id '+host['id'], json.dumps(host_detail, indent=4)
-            self.host_vim2gui(host_detail, hosts_dict)
-        return 200, hosts_dict
-
-    def get_hosts(self, vim_tenant):
-        """Get the hosts and deployed instances
-        Returns the hosts content"""
-        # obtain hosts list
-        url = self.url + "/hosts"
-        try:
-            vim_response = requests.get(url)
-        except requests.exceptions.RequestException as e:
-            print("get_hosts Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print(
-            "vim get", url, "response:", vim_response.status_code, vim_response.json()
-        )
-        # print vim_response.status_code
-        # print json.dumps(vim_response.json(), indent=4)
-        if vim_response.status_code != 200:
-            # TODO: get error
-            print(
-                "vimconnector.get_hosts error getting host list {} {}".format(
-                    vim_response.status_code, vim_response.json()
-                )
-            )
-            return -vim_response.status_code, "Error getting host list"
-
-        res, hosts = self._format_in(vim_response, get_hosts_response_schema)
-
-        if not res:
-            print("vimconnector.get_host error parsing GET HOSTS vim response", hosts)
-            return vimconn.HTTP_Internal_Server_Error, hosts
-        # obtain instances from hosts
-        for host in hosts["hosts"]:
-            url = self.url + "/" + vim_tenant + "/servers?hostId=" + host["id"]
-            try:
-                vim_response = requests.get(url)
-            except requests.exceptions.RequestException as e:
-                print("get_hosts Exception: ", e.args)
-                return -vimconn.HTTP_Not_Found, str(e.args[0])
-            print(
-                "vim get",
-                url,
-                "response:",
-                vim_response.status_code,
-                vim_response.json(),
-            )
-            if vim_response.status_code != 200:
-                print(
-                    "vimconnector.get_hosts error getting instances at host {} {}".format(
-                        vim_response.status_code, vim_response.json()
-                    )
-                )
-                continue
-            res, servers = self._format_in(vim_response, get_server_response_schema)
-            if not res:
-                print(
-                    "vimconnector.get_host error parsing GET SERVERS/{} vim response {}".format(
-                        host["id"], servers
-                    ),
-                )
-                continue
-            # print 'host id '+host['id'], json.dumps(host_detail, indent=4)
-            host["instances"] = servers["servers"]
-        return 200, hosts["hosts"]
-
-    def get_processor_rankings(self):
-        """Get the processor rankings in the VIM database"""
-        url = self.url + "/processor_ranking"
-        try:
-            vim_response = requests.get(url)
-        except requests.exceptions.RequestException as e:
-            print("get_processor_rankings Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print(
-            "vim get", url, "response:", vim_response.status_code, vim_response.json()
-        )
-        # print vim_response.status_code
-        # print json.dumps(vim_response.json(), indent=4)
-        if vim_response.status_code != 200:
-            # TODO: get error
-            print(
-                "vimconnector.get_processor_rankings error getting processor rankings {} {}".format(
-                    vim_response.status_code, vim_response.json()
-                )
-            )
-            return -vim_response.status_code, "Error getting processor rankings"
-
-        res, rankings = self._format_in(
-            vim_response, get_processor_rankings_response_schema
-        )
-        return res, rankings["rankings"]
-
-    def new_host(self, host_data):
-        """Adds a new host to VIM"""
-        """Returns status code of the VIM response"""
-        payload_req = host_data
-        try:
-            url = self.url_admin + "/hosts"
-            self.logger.info("Adding a new host POST %s", url)
-            vim_response = requests.post(
-                url, headers=self.headers_req, data=payload_req
-            )
-            self._check_http_request_response(vim_response)
-            self.logger.debug(vim_response.text)
-            # print json.dumps(vim_response.json(), indent=4)
-            response = vim_response.json()
-            js_v(response, new_host_response_schema)
-            r = self._remove_extra_items(response, new_host_response_schema)
-            if r is not None:
-                self.logger.warn("Warning: remove extra items %s", str(r))
-            host_id = response["host"]["id"]
-            return host_id
-        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
-            self._format_request_exception(e)
-
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM"""
-        """Returns the port identifier"""
-        # TODO change to logging exception code policies
-        print("VIMConnector: Adding a new external port")
-        payload_req = port_data
-        try:
-            vim_response = requests.post(
-                self.url_admin + "/ports", headers=self.headers_req, data=payload_req
-            )
-        except requests.exceptions.RequestException as e:
-            self.logger.error("new_external_port Exception: ", str(e))
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print(vim_response)
-        # print vim_response.status_code
-        if vim_response.status_code == 200:
-            # print vim_response.json()
-            # print json.dumps(vim_response.json(), indent=4)
-            res, http_content = self._format_in(vim_response, new_port_response_schema)
-            # print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_port_response_schema)
-                if r is not None:
-                    print("Warning: remove extra items ", r)
-                # print http_content
-                port_id = http_content["port"]["id"]
-                print("Port id: ", port_id)
-                return vim_response.status_code, port_id
-            else:
-                return -vimconn.HTTP_Bad_Request, http_content
-        else:
-            # print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "{}": not possible to add new external port. HTTP Response: {}. Error: {}'.format(
-                self.url_admin, vim_response.status_code, jsonerror
-            )
-            # print text
-            return -vim_response.status_code, text
-
-    def new_external_network(self, net_name, net_type):
-        """Adds a external network to VIM (shared)"""
-        """Returns the network identifier"""
-        # TODO change to logging exception code policies
-        print(
-            "VIMConnector: Adding external shared network to VIM (type "
-            + net_type
-            + "): "
-            + net_name
-        )
-
-        payload_req = (
-            '{"network":{"name": "'
-            + net_name
-            + '","shared":true,"type": "'
-            + net_type
-            + '"}}'
-        )
-        try:
-            vim_response = requests.post(
-                self.url + "/networks", headers=self.headers_req, data=payload_req
-            )
-        except requests.exceptions.RequestException as e:
-            self.logger.error("new_external_network Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print(vim_response)
-        # print vim_response.status_code
-        if vim_response.status_code == 200:
-            # print vim_response.json()
-            # print json.dumps(vim_response.json(), indent=4)
-            res, http_content = self._format_in(
-                vim_response, new_network_response_schema
-            )
-            # print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_network_response_schema)
-                if r is not None:
-                    print("Warning: remove extra items ", r)
-                # print http_content
-                network_id = http_content["network"]["id"]
-                print("Network id: ", network_id)
-                return vim_response.status_code, network_id
-            else:
-                return -vimconn.HTTP_Bad_Request, http_content
-        else:
-            # print vim_response.text
-            jsonerror = self._format_jsonerror(vim_response)
-            text = 'Error in VIM "{}": not possible to add new external network. HTTP Response: {}. Error: {}'.format(
-                self.url, vim_response.status_code, jsonerror
-            )
-            # print text
-            return -vim_response.status_code, text
-
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network"""
-        """Returns status code of the VIM response"""
-        # TODO change to logging exception code policies
-        print("VIMConnector: Connecting external port to network")
-
-        payload_req = '{"port":{"network_id":"' + network_id + '"}}'
-        if admin:
-            if self.url_admin is None:
-                return (
-                    -vimconn.HTTP_Unauthorized,
-                    "datacenter cannot contain  admin URL",
-                )
-            url = self.url_admin
-        else:
-            url = self.url
-        try:
-            vim_response = requests.put(
-                url + "/ports/" + port_id, headers=self.headers_req, data=payload_req
-            )
-        except requests.exceptions.RequestException as e:
-            print("connect_port_network Exception: ", e.args)
-            return -vimconn.HTTP_Not_Found, str(e.args[0])
-        print(vim_response)
-        # print vim_response.status_code
-        if vim_response.status_code == 200:
-            # print vim_response.json()
-            # print json.dumps(vim_response.json(), indent=4)
-            res, http_content = self._format_in(vim_response, new_port_response_schema)
-            # print http_content
-            if res:
-                r = self._remove_extra_items(http_content, new_port_response_schema)
-                if r is not None:
-                    print("Warning: remove extra items ", r)
-                # print http_content
-                port_id = http_content["port"]["id"]
-                print("Port id: ", port_id)
-                return vim_response.status_code, port_id
-            else:
-                return -vimconn.HTTP_Bad_Request, http_content
-        else:
-            print(vim_response.text)
-            jsonerror = self._format_jsonerror(vim_response)
-            text = (
-                'Error in VIM "{}": not possible to connect external port to network. HTTP Response: {}.'
-                " Error: {}".format(self.url_admin, vim_response.status_code, jsonerror)
-            )
-            print(text)
-            return -vim_response.status_code, text
-
     def migrate_instance(self, vm_id, compute_host=None):
         """
         Migrate a vdu
diff --git a/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py b/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
index e06b2f9..456db81 100644
--- a/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
+++ b/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
@@ -693,113 +693,6 @@
                 "Failed create a new network {}".format(net_name)
             )
 
-    def get_vcd_network_list(self):
-        """Method available organization for a logged in tenant
-
-        Returns:
-            The return vca object that letter can be used to connect to vcloud direct as admin
-        """
-
-        self.logger.debug(
-            "get_vcd_network_list(): retrieving network list for vcd {}".format(
-                self.tenant_name
-            )
-        )
-
-        if not self.tenant_name:
-            raise vimconn.VimConnConnectionException("Tenant name is empty.")
-
-        _, vdc = self.get_vdc_details()
-        if vdc is None:
-            raise vimconn.VimConnConnectionException(
-                "Can't retrieve information for a VDC {}".format(self.tenant_name)
-            )
-
-        vdc_uuid = vdc.get("id").split(":")[3]
-        if self.client._session:
-            headers = {
-                "Accept": "application/*+xml;version=" + API_VERSION,
-                "x-vcloud-authorization": self.client._session.headers[
-                    "x-vcloud-authorization"
-                ],
-            }
-            response = self.perform_request(
-                req_type="GET", url=vdc.get("href"), headers=headers
-            )
-
-        if response.status_code != 200:
-            self.logger.error("Failed to get vdc content")
-            raise vimconn.VimConnNotFoundException("Failed to get vdc content")
-        else:
-            content = XmlElementTree.fromstring(response.text)
-
-        network_list = []
-        try:
-            for item in content:
-                if item.tag.split("}")[-1] == "AvailableNetworks":
-                    for net in item:
-                        response = self.perform_request(
-                            req_type="GET", url=net.get("href"), headers=headers
-                        )
-
-                        if response.status_code != 200:
-                            self.logger.error("Failed to get network content")
-                            raise vimconn.VimConnNotFoundException(
-                                "Failed to get network content"
-                            )
-                        else:
-                            net_details = XmlElementTree.fromstring(response.text)
-
-                            filter_dict = {}
-                            net_uuid = net_details.get("id").split(":")
-
-                            if len(net_uuid) != 4:
-                                continue
-                            else:
-                                net_uuid = net_uuid[3]
-                                # create dict entry
-                                self.logger.debug(
-                                    "get_vcd_network_list(): Adding network {} "
-                                    "to a list vcd id {} network {}".format(
-                                        net_uuid, vdc_uuid, net_details.get("name")
-                                    )
-                                )
-                                filter_dict["name"] = net_details.get("name")
-                                filter_dict["id"] = net_uuid
-
-                                if [
-                                    i.text
-                                    for i in net_details
-                                    if i.tag.split("}")[-1] == "IsShared"
-                                ][0] == "true":
-                                    shared = True
-                                else:
-                                    shared = False
-
-                                filter_dict["shared"] = shared
-                                filter_dict["tenant_id"] = vdc_uuid
-
-                                if int(net_details.get("status")) == 1:
-                                    filter_dict["admin_state_up"] = True
-                                else:
-                                    filter_dict["admin_state_up"] = False
-
-                                filter_dict["status"] = "ACTIVE"
-                                filter_dict["type"] = "bridge"
-                                network_list.append(filter_dict)
-                                self.logger.debug(
-                                    "get_vcd_network_list adding entry {}".format(
-                                        filter_dict
-                                    )
-                                )
-        except Exception:
-            self.logger.debug("Error in get_vcd_network_list", exc_info=True)
-            pass
-
-        self.logger.debug("get_vcd_network_list returning {}".format(network_list))
-
-        return network_list
-
     def get_network_list(self, filter_dict={}):
         """Obtain tenant networks of VIM
         Filter_dict can be:
@@ -936,7 +829,8 @@
 
     def get_network(self, net_id):
         """Method obtains network details of net_id VIM network
-        Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
+        Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]
+        """
         try:
             _, vdc = self.get_vdc_details()
             vdc_id = vdc.get("id").split(":")[3]
@@ -1026,19 +920,6 @@
         :param created_items: dictionary with extra items to be deleted. provided by method new_network
         Returns the network identifier or raises an exception upon error or when network is not found
         """
-
-        # ############# Stub code for SRIOV #################
-        #         dvport_group = self.get_dvport_group(net_id)
-        #         if dvport_group:
-        #             #delete portgroup
-        #             status = self.destroy_dvport_group(net_id)
-        #             if status:
-        #                 # Remove vlanID from persistent info
-        #                 if net_id in self.persistent_info["used_vlanIDs"]:
-        #                     del self.persistent_info["used_vlanIDs"][net_id]
-        #
-        #                 return net_id
-
         vcd_network = self.get_vcd_network(network_uuid=net_id)
         if vcd_network is not None and vcd_network:
             if self.delete_network_action(network_uuid=net_id):
@@ -1800,69 +1681,6 @@
                 "Exception occured while retriving catalog items {}".format(exp)
             )
 
-    def get_vappid(self, vdc=None, vapp_name=None):
-        """Method takes vdc object and vApp name and returns vapp uuid or None
-
-        Args:
-            vdc: The VDC object.
-            vapp_name: is application vappp name identifier
-
-        Returns:
-                The return vApp name otherwise None
-        """
-        if vdc is None or vapp_name is None:
-            return None
-
-        # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
-        try:
-            refs = [
-                ref
-                for ref in vdc.ResourceEntities.ResourceEntity
-                if ref.name == vapp_name
-                and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
-            ]
-
-            if len(refs) == 1:
-                return refs[0].href.split("vapp")[1][1:]
-        except Exception as e:
-            self.logger.exception(e)
-            return False
-
-        return None
-
-    def check_vapp(self, vdc=None, vapp_uuid=None):
-        """Method Method returns True or False if vapp deployed in vCloud director
-
-        Args:
-            vca: Connector to VCA
-            vdc: The VDC object.
-            vappid: vappid is application identifier
-
-        Returns:
-            The return True if vApp deployed
-            :param vdc:
-            :param vapp_uuid:
-        """
-        try:
-            refs = [
-                ref
-                for ref in vdc.ResourceEntities.ResourceEntity
-                if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
-            ]
-
-            for ref in refs:
-                vappid = ref.href.split("vapp")[1][1:]
-                # find vapp with respected vapp uuid
-
-                if vappid == vapp_uuid:
-                    return True
-        except Exception as e:
-            self.logger.exception(e)
-
-            return False
-
-        return False
-
     def get_namebyvappid(self, vapp_uuid=None):
         """Method returns vApp name from vCD and lookup done by vapp_id.
 
@@ -2841,59 +2659,6 @@
                 "The upload iso task failed with status {}".format(result.get("status"))
             )
 
-    def get_vcd_availibility_zones(self, respool_href, headers):
-        """Method to find presence of av zone is VIM resource pool
-
-        Args:
-            respool_href - resource pool href
-            headers - header information
-
-        Returns:
-           vcd_az - list of azone present in vCD
-        """
-        vcd_az = []
-        url = respool_href
-        resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
-
-        if resp.status_code != requests.codes.ok:
-            self.logger.debug(
-                "REST API call {} failed. Return status code {}".format(
-                    url, resp.status_code
-                )
-            )
-        else:
-            # Get the href to hostGroups and find provided hostGroup is present in it
-            resp_xml = XmlElementTree.fromstring(resp.content)
-            for child in resp_xml:
-                if "VMWProviderVdcResourcePool" in child.tag:
-                    for schild in child:
-                        if "Link" in schild.tag:
-                            if (
-                                schild.attrib.get("type")
-                                == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
-                            ):
-                                hostGroup = schild.attrib.get("href")
-                                hg_resp = self.perform_request(
-                                    req_type="GET", url=hostGroup, headers=headers
-                                )
-
-                                if hg_resp.status_code != requests.codes.ok:
-                                    self.logger.debug(
-                                        "REST API call {} failed. Return status code {}".format(
-                                            hostGroup, hg_resp.status_code
-                                        )
-                                    )
-                                else:
-                                    hg_resp_xml = XmlElementTree.fromstring(
-                                        hg_resp.content
-                                    )
-                                    for hostGroup in hg_resp_xml:
-                                        if "HostGroup" in hostGroup.tag:
-                                            # append host group name to the list
-                                            vcd_az.append(hostGroup.attrib.get("name"))
-
-        return vcd_az
-
     def set_availability_zones(self):
         """
         Set vim availability zone
@@ -3420,21 +3185,6 @@
 
         raise vimconn.VimConnException(msg)
 
-    # #
-    # #
-    # #  based on current discussion
-    # #
-    # #
-    # #  server:
-    #   created: '2016-09-08T11:51:58'
-    #   description: simple-instance.linux1.1
-    #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
-    #   hostId: e836c036-74e7-11e6-b249-0800273e724c
-    #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
-    #   status: ACTIVE
-    #   error_msg:
-    #   interfaces: …
-    #
     def get_vminstance(self, vim_vm_uuid=None):
         """Returns the VM instance information from VIM"""
         self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
@@ -4195,14 +3945,6 @@
 
         return console_dict
 
-    # NOT USED METHODS in current version
-
-    def host_vim2gui(self, host, server_dict):
-        """Transform host dictionary from VIM format to GUI format,
-        and append to the server_dict
-        """
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
     def get_hosts_info(self):
         """Get the information of deployed hosts
         Returns the hosts content"""
@@ -4213,35 +3955,6 @@
         Returns the hosts content"""
         raise vimconn.VimConnNotImplemented("Should have implemented this")
 
-    def get_processor_rankings(self):
-        """Get the processor rankings in the VIM database"""
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
-    def new_host(self, host_data):
-        """Adds a new host to VIM"""
-        """Returns status code of the VIM response"""
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM"""
-        """Returns the port identifier"""
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
-    def new_external_network(self, net_name, net_type):
-        """Adds a external network to VIM (shared)"""
-        """Returns the network identifier"""
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network"""
-        """Returns status code of the VIM response"""
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
-    def new_vminstancefromJSON(self, vm_data):
-        """Adds a VM instance to VIM"""
-        """Returns the instance identifier"""
-        raise vimconn.VimConnNotImplemented("Should have implemented this")
-
     def get_network_name_by_id(self, network_uuid=None):
         """Method gets vcloud director network named based on supplied uuid.
 
@@ -4586,76 +4299,6 @@
 
         return None
 
-    def get_vapp_list(self, vdc_name=None):
-        """
-        Method retrieves vApp list deployed vCloud director and returns a dictionary
-        contains a list of all vapp deployed for queried VDC.
-        The key for a dictionary is vApp UUID
-
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return dictionary and key for each entry vapp UUID
-        """
-        vapp_dict = {}
-
-        if vdc_name is None:
-            return vapp_dict
-
-        content = self.vms_view_action(vdc_name=vdc_name)
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for vm_xml in vm_list_xmlroot:
-                if vm_xml.tag.split("}")[1] == "VMRecord":
-                    if vm_xml.attrib["isVAppTemplate"] == "true":
-                        rawuuid = vm_xml.attrib["container"].split("/")[-1:]
-                        if "vappTemplate-" in rawuuid[0]:
-                            # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
-                            # vm and use raw UUID as key
-                            vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
-        except Exception:
-            pass
-
-        return vapp_dict
-
-    def get_vm_list(self, vdc_name=None):
-        """
-        Method retrieves VM's list deployed vCloud director. It returns a dictionary
-        contains a list of all VM's deployed for queried VDC.
-        The key for a dictionary is VM UUID
-
-
-        Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
-
-            Returns:
-                The return dictionary and key for each entry vapp UUID
-        """
-        vm_dict = {}
-
-        if vdc_name is None:
-            return vm_dict
-
-        content = self.vms_view_action(vdc_name=vdc_name)
-        try:
-            vm_list_xmlroot = XmlElementTree.fromstring(content)
-            for vm_xml in vm_list_xmlroot:
-                if vm_xml.tag.split("}")[1] == "VMRecord":
-                    if vm_xml.attrib["isVAppTemplate"] == "false":
-                        rawuuid = vm_xml.attrib["href"].split("/")[-1:]
-                        if "vm-" in rawuuid[0]:
-                            # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
-                            #  vm and use raw UUID as key
-                            vm_dict[rawuuid[0][3:]] = vm_xml.attrib
-        except Exception:
-            pass
-
-        return vm_dict
-
     def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
         """
         Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
@@ -5367,117 +5010,6 @@
 
         return None
 
-    def create_vdc_rest(self, vdc_name=None):
-        """
-        Method create network in vCloud director
-
-        Args:
-            vdc_name - vdc name to be created
-            Returns:
-                The return response
-        """
-        self.logger.info("Creating new vdc {}".format(vdc_name))
-        vca = self.connect_as_admin()
-
-        if not vca:
-            raise vimconn.VimConnConnectionException("Failed to connect vCD")
-
-        if vdc_name is None:
-            return None
-
-        url_list = [self.url, "/api/admin/org/", self.org_uuid]
-        vm_list_rest_call = "".join(url_list)
-
-        if vca._session:
-            headers = {
-                "Accept": "application/*+xml;version=" + API_VERSION,
-                "x-vcloud-authorization": self.client._session.headers[
-                    "x-vcloud-authorization"
-                ],
-            }
-            response = self.perform_request(
-                req_type="GET", url=vm_list_rest_call, headers=headers
-            )
-            provider_vdc_ref = None
-            add_vdc_rest_url = None
-            # available_networks = None
-
-            if response.status_code != requests.codes.ok:
-                self.logger.debug(
-                    "REST API call {} failed. Return status code {}".format(
-                        vm_list_rest_call, response.status_code
-                    )
-                )
-
-                return None
-            else:
-                try:
-                    vm_list_xmlroot = XmlElementTree.fromstring(response.text)
-                    for child in vm_list_xmlroot:
-                        # application/vnd.vmware.admin.providervdc+xml
-                        if child.tag.split("}")[1] == "Link":
-                            if (
-                                child.attrib.get("type")
-                                == "application/vnd.vmware.admin.createVdcParams+xml"
-                                and child.attrib.get("rel") == "add"
-                            ):
-                                add_vdc_rest_url = child.attrib.get("href")
-                except Exception:
-                    self.logger.debug(
-                        "Failed parse respond for rest api call {}".format(
-                            vm_list_rest_call
-                        )
-                    )
-                    self.logger.debug("Respond body {}".format(response.text))
-
-                    return None
-
-                response = self.get_provider_rest(vca=vca)
-                try:
-                    vm_list_xmlroot = XmlElementTree.fromstring(response)
-                    for child in vm_list_xmlroot:
-                        if child.tag.split("}")[1] == "ProviderVdcReferences":
-                            for sub_child in child:
-                                provider_vdc_ref = sub_child.attrib.get("href")
-                except Exception:
-                    self.logger.debug(
-                        "Failed parse respond for rest api call {}".format(
-                            vm_list_rest_call
-                        )
-                    )
-                    self.logger.debug("Respond body {}".format(response))
-
-                    return None
-
-                if add_vdc_rest_url is not None and provider_vdc_ref is not None:
-                    data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
-                            <AllocationModel>ReservationPool</AllocationModel>
-                            <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
-                            <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
-                            </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
-                            <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
-                            <ProviderVdcReference
-                            name="Main Provider"
-                            href="{2:s}" />
-                    <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
-                        escape(vdc_name), escape(vdc_name), provider_vdc_ref
-                    )
-                    headers[
-                        "Content-Type"
-                    ] = "application/vnd.vmware.admin.createVdcParams+xml"
-                    response = self.perform_request(
-                        req_type="POST",
-                        url=add_vdc_rest_url,
-                        headers=headers,
-                        data=data,
-                    )
-
-                    # if we all ok we respond with content otherwise by default None
-                    if response.status_code == 201:
-                        return response.text
-
-        return None
-
     def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
         """
         Method retrieve vapp detail from vCloud director
@@ -5675,33 +5207,6 @@
 
         return parsed_respond
 
-    def acquire_console(self, vm_uuid=None):
-        if vm_uuid is None:
-            return None
-
-        if self.client._session:
-            headers = {
-                "Accept": "application/*+xml;version=" + API_VERSION,
-                "x-vcloud-authorization": self.client._session.headers[
-                    "x-vcloud-authorization"
-                ],
-            }
-            vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
-            console_dict = vm_dict["acquireTicket"]
-            console_rest_call = console_dict["href"]
-
-            response = self.perform_request(
-                req_type="POST", url=console_rest_call, headers=headers
-            )
-
-            if response.status_code == 403:
-                response = self.retry_rest("POST", console_rest_call)
-
-            if response.status_code == requests.codes.ok:
-                return response.text
-
-        return None
-
     def modify_vm_disk(self, vapp_uuid, flavor_disk):
         """
         Method retrieve vm disk details
@@ -7042,197 +6547,6 @@
                 "affinity".format(exp)
             )
 
-    def cloud_init(self, vapp, cloud_config):
-        """
-        Method to inject ssh-key
-        vapp - vapp object
-        cloud_config a dictionary with:
-                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
-                'users': (optional) list of users to be inserted, each item is a dict with:
-                    'name': (mandatory) user name,
-                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
-                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
-                'config-files': (optional). List of files to be transferred. Each item is a dict with:
-                    'dest': (mandatory) string with the destination absolute path
-                    'encoding': (optional, by default text). Can be one of:
-                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
-                    'content' (mandatory): string with the content of the file
-                    'permissions': (optional) string with file permissions, typically octal notation '0644'
-                    'owner': (optional) file owner, string with the format 'owner:group'
-                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
-        """
-        try:
-            if not isinstance(cloud_config, dict):
-                raise Exception(
-                    "cloud_init : parameter cloud_config is not a dictionary"
-                )
-            else:
-                key_pairs = []
-                userdata = []
-
-                if "key-pairs" in cloud_config:
-                    key_pairs = cloud_config["key-pairs"]
-
-                if "users" in cloud_config:
-                    userdata = cloud_config["users"]
-
-                self.logger.debug("cloud_init : Guest os customization started..")
-                customize_script = self.format_script(
-                    key_pairs=key_pairs, users_list=userdata
-                )
-                customize_script = customize_script.replace("&", "&amp;")
-                self.guest_customization(vapp, customize_script)
-        except Exception as exp:
-            self.logger.error(
-                "cloud_init : exception occurred while injecting " "ssh-key"
-            )
-
-            raise vimconn.VimConnException(
-                "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
-            )
-
-    def format_script(self, key_pairs=[], users_list=[]):
-        bash_script = """#!/bin/sh
-echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
-if [ "$1" = "precustomization" ];then
-    echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
-"""
-
-        keys = "\n".join(key_pairs)
-        if keys:
-            keys_data = """
-            if [ ! -d /root/.ssh ];then
-                mkdir /root/.ssh
-                chown root:root /root/.ssh
-                chmod 700 /root/.ssh
-                touch /root/.ssh/authorized_keys
-                chown root:root /root/.ssh/authorized_keys
-                chmod 600 /root/.ssh/authorized_keys
-                # make centos with selinux happy
-                which restorecon && restorecon -Rv /root/.ssh
-            else
-                touch /root/.ssh/authorized_keys
-                chown root:root /root/.ssh/authorized_keys
-                chmod 600 /root/.ssh/authorized_keys
-            fi
-            echo '{key}' >> /root/.ssh/authorized_keys
-            """.format(
-                key=keys
-            )
-
-            bash_script += keys_data
-
-        for user in users_list:
-            if "name" in user:
-                user_name = user["name"]
-
-            if "key-pairs" in user:
-                user_keys = "\n".join(user["key-pairs"])
-            else:
-                user_keys = None
-
-            add_user_name = """
-                useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
-                """.format(
-                user_name=user_name
-            )
-
-            bash_script += add_user_name
-
-            if user_keys:
-                user_keys_data = """
-                mkdir /home/{user_name}/.ssh
-                chown {user_name}:{user_name} /home/{user_name}/.ssh
-                chmod 700 /home/{user_name}/.ssh
-                touch /home/{user_name}/.ssh/authorized_keys
-                chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
-                chmod 600 /home/{user_name}/.ssh/authorized_keys
-                # make centos with selinux happy
-                which restorecon && restorecon -Rv /home/{user_name}/.ssh
-                echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
-                """.format(
-                    user_name=user_name, user_key=user_keys
-                )
-                bash_script += user_keys_data
-
-        return bash_script + "\n\tfi"
-
-    def guest_customization(self, vapp, customize_script):
-        """
-        Method to customize guest os
-        vapp - Vapp object
-        customize_script - Customize script to be run at first boot of VM.
-        """
-        for vm in vapp.get_all_vms():
-            vm_id = vm.get("id").split(":")[-1]
-            vm_name = vm.get("name")
-            vm_name = vm_name.replace("_", "-")
-
-            vm_customization_url = (
-                "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
-            )
-            headers = {
-                "Accept": "application/*+xml;version=" + API_VERSION,
-                "x-vcloud-authorization": self.client._session.headers[
-                    "x-vcloud-authorization"
-                ],
-            }
-
-            headers[
-                "Content-Type"
-            ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
-
-            data = """<GuestCustomizationSection
-                           xmlns="http://www.vmware.com/vcloud/v1.5"
-                           xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
-                           ovf:required="false" href="{}"
-                             type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
-                           <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
-                           <Enabled>true</Enabled>
-                           <ChangeSid>false</ChangeSid>
-                           <VirtualMachineId>{}</VirtualMachineId>
-                           <JoinDomainEnabled>false</JoinDomainEnabled>
-                           <UseOrgSettings>false</UseOrgSettings>
-                           <AdminPasswordEnabled>false</AdminPasswordEnabled>
-                           <AdminPasswordAuto>true</AdminPasswordAuto>
-                           <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
-                           <AdminAutoLogonCount>0</AdminAutoLogonCount>
-                           <ResetPasswordRequired>false</ResetPasswordRequired>
-                           <CustomizationScript>{}</CustomizationScript>
-                           <ComputerName>{}</ComputerName>
-                           <Link href="{}"
-                             type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
-                       </GuestCustomizationSection>
-                   """.format(
-                vm_customization_url,
-                vm_id,
-                customize_script,
-                vm_name,
-                vm_customization_url,
-            )
-
-            response = self.perform_request(
-                req_type="PUT", url=vm_customization_url, headers=headers, data=data
-            )
-            if response.status_code == 202:
-                guest_task = self.get_task_from_response(response.text)
-                self.client.get_task_monitor().wait_for_success(task=guest_task)
-                self.logger.info(
-                    "guest_customization : customized guest os task "
-                    "completed for VM {}".format(vm_name)
-                )
-            else:
-                self.logger.error(
-                    "guest_customization : task for customized guest os"
-                    "failed for VM {}".format(vm_name)
-                )
-
-                raise vimconn.VimConnException(
-                    "guest_customization : failed to perform"
-                    "guest os customization on VM {}".format(vm_name)
-                )
-
     def add_new_disk(self, vapp_uuid, disk_size):
         """
         Method to create an empty vm disk
@@ -7762,157 +7076,6 @@
         elif exp_type == "NotFound":
             raise vimconn.VimConnNotFoundException(message=msg)
 
-    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
-        """
-        Method to attach SRIOV adapters to VM
-
-         Args:
-            vapp_uuid - uuid of vApp/VM
-            sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
-            vmname_andid - vmname
-
-        Returns:
-            The status of add SRIOV adapter task , vm object and
-            vcenter_conect object
-        """
-        vm_obj = None
-        vcenter_conect, content = self.get_vcenter_content()
-        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
-
-        if vm_moref_id:
-            try:
-                no_of_sriov_devices = len(sriov_nets)
-                if no_of_sriov_devices > 0:
-                    # Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
-                    self.logger.info(
-                        "VM {} is currently on host {}".format(vm_obj, host_obj)
-                    )
-
-                    if host_obj and vm_obj:
-                        # get SRIOV devies from host on which vapp is currently installed
-                        avilable_sriov_devices = self.get_sriov_devices(
-                            host_obj,
-                            no_of_sriov_devices,
-                        )
-
-                        if len(avilable_sriov_devices) == 0:
-                            # find other hosts with active pci devices
-                            (
-                                new_host_obj,
-                                avilable_sriov_devices,
-                            ) = self.get_host_and_sriov_devices(
-                                content,
-                                no_of_sriov_devices,
-                            )
-
-                            if (
-                                new_host_obj is not None
-                                and len(avilable_sriov_devices) > 0
-                            ):
-                                # Migrate vm to the host where SRIOV devices are available
-                                self.logger.info(
-                                    "Relocate VM {} on new host {}".format(
-                                        vm_obj, new_host_obj
-                                    )
-                                )
-                                task = self.relocate_vm(new_host_obj, vm_obj)
-
-                                if task is not None:
-                                    result = self.wait_for_vcenter_task(
-                                        task, vcenter_conect
-                                    )
-                                    self.logger.info(
-                                        "Migrate VM status: {}".format(result)
-                                    )
-                                    host_obj = new_host_obj
-                                else:
-                                    self.logger.info(
-                                        "Fail to migrate VM : {}".format(result)
-                                    )
-
-                                    raise vimconn.VimConnNotFoundException(
-                                        "Fail to migrate VM : {} to host {}".format(
-                                            vmname_andid, new_host_obj
-                                        )
-                                    )
-
-                        if (
-                            host_obj is not None
-                            and avilable_sriov_devices is not None
-                            and len(avilable_sriov_devices) > 0
-                        ):
-                            # Add SRIOV devices one by one
-                            for sriov_net in sriov_nets:
-                                network_name = sriov_net.get("net_id")
-                                self.create_dvPort_group(network_name)
-
-                                if (
-                                    sriov_net.get("type") == "VF"
-                                    or sriov_net.get("type") == "SR-IOV"
-                                ):
-                                    # add vlan ID ,Modify portgroup for vlan ID
-                                    self.configure_vlanID(
-                                        content, vcenter_conect, network_name
-                                    )
-
-                                task = self.add_sriov_to_vm(
-                                    content,
-                                    vm_obj,
-                                    host_obj,
-                                    network_name,
-                                    avilable_sriov_devices[0],
-                                )
-
-                                if task:
-                                    status = self.wait_for_vcenter_task(
-                                        task, vcenter_conect
-                                    )
-
-                                    if status:
-                                        self.logger.info(
-                                            "Added SRIOV {} to VM {}".format(
-                                                no_of_sriov_devices, str(vm_obj)
-                                            )
-                                        )
-                                else:
-                                    self.logger.error(
-                                        "Fail to add SRIOV {} to VM {}".format(
-                                            no_of_sriov_devices, str(vm_obj)
-                                        )
-                                    )
-
-                                    raise vimconn.VimConnUnexpectedResponse(
-                                        "Fail to add SRIOV adapter in VM {}".format(
-                                            str(vm_obj)
-                                        )
-                                    )
-
-                            return True, vm_obj, vcenter_conect
-                        else:
-                            self.logger.error(
-                                "Currently there is no host with"
-                                " {} number of avaialble SRIOV "
-                                "VFs required for VM {}".format(
-                                    no_of_sriov_devices, vmname_andid
-                                )
-                            )
-
-                            raise vimconn.VimConnNotFoundException(
-                                "Currently there is no host with {} "
-                                "number of avaialble SRIOV devices required for VM {}".format(
-                                    no_of_sriov_devices, vmname_andid
-                                )
-                            )
-                else:
-                    self.logger.debug(
-                        "No infromation about SRIOV devices {} ", sriov_nets
-                    )
-            except vmodl.MethodFault as error:
-                self.logger.error("Error occurred while adding SRIOV {} ", error)
-
-        return None, vm_obj, vcenter_conect
-
     def get_sriov_devices(self, host, no_of_vfs):
         """
         Method to get the details of SRIOV devices on given host
@@ -7934,173 +7097,6 @@
 
         return sriovInfo
 
-    def get_host_and_sriov_devices(self, content, no_of_vfs):
-        """
-        Method to get the details of SRIOV devices infromation on all hosts
-
-           Args:
-               content - vSphere host object
-               no_of_vfs - number of pci VFs needed on host
-
-           Returns:
-                array of SRIOV devices and host object
-        """
-        host_obj = None
-        sriov_device_objs = None
-
-        try:
-            if content:
-                container = content.viewManager.CreateContainerView(
-                    content.rootFolder, [vim.HostSystem], True
-                )
-
-                for host in container.view:
-                    devices = self.get_sriov_devices(host, no_of_vfs)
-
-                    if devices:
-                        host_obj = host
-                        sriov_device_objs = devices
-                        break
-        except Exception as exp:
-            self.logger.error(
-                "Error {} occurred while finding SRIOV devices on host: {}".format(
-                    exp, host_obj
-                )
-            )
-
-        return host_obj, sriov_device_objs
-
-    def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
-        """
-        Method to add SRIOV adapter to vm
-
-           Args:
-               host_obj - vSphere host object
-               vm_obj - vSphere vm object
-               content - vCenter content object
-               network_name - name of distributed virtaul portgroup
-               sriov_device - SRIOV device info
-
-           Returns:
-                task object
-        """
-        devices = []
-        vnic_label = "sriov nic"
-
-        try:
-            dvs_portgr = self.get_dvport_group(network_name)
-            network_name = dvs_portgr.name
-            nic = vim.vm.device.VirtualDeviceSpec()
-            # VM device
-            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
-            nic.device = vim.vm.device.VirtualSriovEthernetCard()
-            nic.device.addressType = "assigned"
-            # nic.device.key = 13016
-            nic.device.deviceInfo = vim.Description()
-            nic.device.deviceInfo.label = vnic_label
-            nic.device.deviceInfo.summary = network_name
-            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
-
-            nic.device.backing.network = self.get_obj(
-                content, [vim.Network], network_name
-            )
-            nic.device.backing.deviceName = network_name
-            nic.device.backing.useAutoDetect = False
-            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
-            nic.device.connectable.startConnected = True
-            nic.device.connectable.allowGuestControl = True
-
-            nic.device.sriovBacking = (
-                vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
-            )
-            nic.device.sriovBacking.physicalFunctionBacking = (
-                vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
-            )
-            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
-
-            devices.append(nic)
-            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
-            task = vm_obj.ReconfigVM_Task(vmconf)
-
-            return task
-        except Exception as exp:
-            self.logger.error(
-                "Error {} occurred while adding SRIOV adapter in VM: {}".format(
-                    exp, vm_obj
-                )
-            )
-
-            return None
-
-    def create_dvPort_group(self, network_name):
-        """
-        Method to create disributed virtual portgroup
-
-           Args:
-               network_name - name of network/portgroup
-
-           Returns:
-               portgroup key
-        """
-        try:
-            new_network_name = [network_name, "-", str(uuid.uuid4())]
-            network_name = "".join(new_network_name)
-            vcenter_conect, content = self.get_vcenter_content()
-
-            dv_switch = self.get_obj(
-                content, [vim.DistributedVirtualSwitch], self.dvs_name
-            )
-
-            if dv_switch:
-                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
-                dv_pg_spec.name = network_name
-
-                dv_pg_spec.type = (
-                    vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
-                )
-                dv_pg_spec.defaultPortConfig = (
-                    vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
-                )
-                dv_pg_spec.defaultPortConfig.securityPolicy = (
-                    vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
-                )
-                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
-                    vim.BoolPolicy(value=False)
-                )
-                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
-                    vim.BoolPolicy(value=False)
-                )
-                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
-                    value=False
-                )
-
-                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
-                self.wait_for_vcenter_task(task, vcenter_conect)
-
-                dvPort_group = self.get_obj(
-                    content, [vim.dvs.DistributedVirtualPortgroup], network_name
-                )
-
-                if dvPort_group:
-                    self.logger.info(
-                        "Created disributed virtaul port group: {}".format(dvPort_group)
-                    )
-                    return dvPort_group.key
-            else:
-                self.logger.debug(
-                    "No disributed virtual switch found with name {}".format(
-                        network_name
-                    )
-                )
-
-        except Exception as exp:
-            self.logger.error(
-                "Error occurred while creating disributed virtaul port group {}"
-                " : {}".format(network_name, exp)
-            )
-
-        return None
-
     def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
         """
         Method to reconfigure disributed virtual portgroup
@@ -8142,36 +7138,6 @@
 
             return None
 
-    def destroy_dvport_group(self, dvPort_group_name):
-        """
-        Method to destroy disributed virtual portgroup
-
-           Args:
-               network_name - name of network/portgroup
-
-           Returns:
-               True if portgroup successfully got deleted else false
-        """
-        vcenter_conect, _ = self.get_vcenter_content()
-
-        try:
-            status = None
-            dvPort_group = self.get_dvport_group(dvPort_group_name)
-
-            if dvPort_group:
-                task = dvPort_group.Destroy_Task()
-                status = self.wait_for_vcenter_task(task, vcenter_conect)
-
-            return status
-        except vmodl.MethodFault as exp:
-            self.logger.error(
-                "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
-                    exp, dvPort_group_name
-                )
-            )
-
-            return None
-
     def get_dvport_group(self, dvPort_group_name):
         """
         Method to get disributed virtual portgroup
@@ -8231,97 +7197,6 @@
 
         return vlanId
 
-    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
-        """
-        Method to configure vlanID in disributed virtual portgroup vlanID
-
-           Args:
-               network_name - name of network/portgroup
-
-           Returns:
-               None
-        """
-        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
-
-        if vlanID == 0:
-            # configure vlanID
-            vlanID = self.genrate_vlanID(dvPort_group_name)
-            config = {"vlanID": vlanID}
-            task = self.reconfig_portgroup(
-                content, dvPort_group_name, config_info=config
-            )
-
-            if task:
-                status = self.wait_for_vcenter_task(task, vcenter_conect)
-
-                if status:
-                    self.logger.info(
-                        "Reconfigured Port group {} for vlan ID {}".format(
-                            dvPort_group_name, vlanID
-                        )
-                    )
-            else:
-                self.logger.error(
-                    "Fail reconfigure portgroup {} for vlanID{}".format(
-                        dvPort_group_name, vlanID
-                    )
-                )
-
-    def genrate_vlanID(self, network_name):
-        """
-        Method to get unused vlanID
-           Args:
-               network_name - name of network/portgroup
-           Returns:
-               vlanID
-        """
-        vlan_id = None
-        used_ids = []
-
-        if self.config.get("vlanID_range") is None:
-            raise vimconn.VimConnConflictException(
-                "You must provide a 'vlanID_range' "
-                "at config value before creating sriov network with vlan tag"
-            )
-
-        if "used_vlanIDs" not in self.persistent_info:
-            self.persistent_info["used_vlanIDs"] = {}
-        else:
-            used_ids = list(self.persistent_info["used_vlanIDs"].values())
-
-        for vlanID_range in self.config.get("vlanID_range"):
-            start_vlanid, end_vlanid = vlanID_range.split("-")
-
-            if start_vlanid > end_vlanid:
-                raise vimconn.VimConnConflictException(
-                    "Invalid vlan ID range {}".format(vlanID_range)
-                )
-
-            for vid in range(int(start_vlanid), int(end_vlanid) + 1):
-                if vid not in used_ids:
-                    vlan_id = vid
-                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
-                    return vlan_id
-
-        if vlan_id is None:
-            raise vimconn.VimConnConflictException("All Vlan IDs are in use")
-
-    def get_obj(self, content, vimtype, name):
-        """
-        Get the vsphere object associated with a given text name
-        """
-        obj = None
-        container = content.viewManager.CreateContainerView(
-            content.rootFolder, vimtype, True
-        )
-
-        for item in container.view:
-            if item.name == name:
-                obj = item
-                break
-
-        return obj
-
     def insert_media_to_vm(self, vapp, image_id):
         """
         Method to insert media CD-ROM (ISO image) from catalog to vm.
diff --git a/RO-plugin/osm_ro_plugin/vimconn.py b/RO-plugin/osm_ro_plugin/vimconn.py
index bb06037..f526aef 100644
--- a/RO-plugin/osm_ro_plugin/vimconn.py
+++ b/RO-plugin/osm_ro_plugin/vimconn.py
@@ -822,273 +822,6 @@
         """
         raise VimConnNotImplemented("Should have implemented this")
 
-    def new_classification(self, name, ctype, definition):
-        """Creates a traffic classification in the VIM
-        Params:
-            'name': name of this classification
-            'ctype': type of this classification
-            'definition': definition of this classification (type-dependent free-form text)
-        Returns the VIM's classification ID on success or raises an exception on failure
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_classification(self, classification_id):
-        """Obtain classification details of the VIM's classification with ID='classification_id'
-        Return a dict that contains:
-            'id': VIM's classification ID (same as classification_id)
-            'name': VIM's classification name
-            'type': type of this classification
-            'definition': definition of the classification
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when classification is not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_classification_list(self, filter_dict={}):
-        """Obtain classifications from the VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the classifications on and only return those that
-                match ALL:
-                id:   string => returns classifications with this VIM's classification ID, which implies a return of one
-                    classification at most
-                name: string => returns only classifications with this name
-                type: string => returns classifications of this type
-                definition: string => returns classifications that have this definition
-                tenant_id: string => returns only classifications that belong to this tenant/project
-        Returns a list of classification dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's classification ID
-            'name': (mandatory) VIM's classification name
-            'type': type of this classification
-            'definition': definition of the classification
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no classification matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def refresh_classifications_status(self, classification_list):
-        """Get the status of the classifications
-        Params: the list of classification identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this classifier
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        raise VimConnNotImplemented("Should have implemented this")
-
-    def delete_classification(self, classification_id):
-        """Deletes a classification from the VIM
-        Returns the classification ID (classification_id) or raises an exception upon error or when classification is
-           not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
-        """Creates a service function instance in the VIM
-        Params:
-            'name': name of this service function instance
-            'ingress_ports': set of ingress ports (VIM's port IDs)
-            'egress_ports': set of egress ports (VIM's port IDs)
-            'sfc_encap': boolean stating whether this specific instance supports IETF SFC Encapsulation
-        Returns the VIM's service function instance ID on success or raises an exception on failure
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_sfi(self, sfi_id):
-        """Obtain service function instance details of the VIM's service function instance with ID='sfi_id'
-        Return a dict that contains:
-            'id': VIM's sfi ID (same as sfi_id)
-            'name': VIM's sfi name
-            'ingress_ports': set of ingress ports (VIM's port IDs)
-            'egress_ports': set of egress ports (VIM's port IDs)
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when service function instance is not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_sfi_list(self, filter_dict={}):
-        """Obtain service function instances from the VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the sfis on and only return those that match ALL:
-                id:   string  => returns sfis with this VIM's sfi ID, which implies a return of one sfi at most
-                name: string  => returns only service function instances with this name
-                tenant_id: string => returns only service function instances that belong to this tenant/project
-        Returns a list of service function instance dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's sfi ID
-            'name': (mandatory) VIM's sfi name
-            'ingress_ports': set of ingress ports (VIM's port IDs)
-            'egress_ports': set of egress ports (VIM's port IDs)
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no sfi matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def delete_sfi(self, sfi_id):
-        """Deletes a service function instance from the VIM
-        Returns the service function instance ID (sfi_id) or raises an exception upon error or when sfi is not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def refresh_sfis_status(self, sfi_list):
-        """Get the status of the service function instances
-        Params: the list of sfi identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function instance
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        raise VimConnNotImplemented("Should have implemented this")
-
-    def new_sf(self, name, sfis, sfc_encap=True):
-        """Creates (an abstract) service function in the VIM
-        Params:
-            'name': name of this service function
-            'sfis': set of service function instances of this (abstract) service function
-            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
-        Returns the VIM's service function ID on success or raises an exception on failure
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_sf(self, sf_id):
-        """Obtain service function details of the VIM's service function with ID='sf_id'
-        Return a dict that contains:
-            'id': VIM's sf ID (same as sf_id)
-            'name': VIM's sf name
-            'sfis': VIM's sf's set of VIM's service function instance IDs
-            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when sf is not found
-        """
-
-    def get_sf_list(self, filter_dict={}):
-        """Obtain service functions from the VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the sfs on and only return those that match ALL:
-                id:   string  => returns sfs with this VIM's sf ID, which implies a return of one sf at most
-                name: string  => returns only service functions with this name
-                tenant_id: string => returns only service functions that belong to this tenant/project
-        Returns a list of service function dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's sf ID
-            'name': (mandatory) VIM's sf name
-            'sfis': VIM's sf's set of VIM's service function instance IDs
-            'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no sf matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def delete_sf(self, sf_id):
-        """Deletes (an abstract) service function from the VIM
-        Returns the service function ID (sf_id) or raises an exception upon error or when sf is not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def refresh_sfs_status(self, sf_list):
-        """Get the status of the service functions
-        Params: the list of sf identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
-        """
-        raise VimConnNotImplemented("Should have implemented this")
-
-    def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
-        """Creates a service function path
-        Params:
-            'name': name of this service function path
-            'classifications': set of traffic classifications that should be matched on to get into this sfp
-            'sfs': list of every service function that constitutes this path , from first to last
-            'sfc_encap': whether this is an SFC-Encapsulated chain (i.e using NSH), True by default
-            'spi': (optional) the Service Function Path identifier (SPI: Service Path Identifier) for this path
-        Returns the VIM's sfp ID on success or raises an exception on failure
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_sfp(self, sfp_id):
-        """Obtain service function path details of the VIM's sfp with ID='sfp_id'
-        Return a dict that contains:
-            'id': VIM's sfp ID (same as sfp_id)
-            'name': VIM's sfp name
-            'classifications': VIM's sfp's list of VIM's classification IDs
-            'sfs': VIM's sfp's list of VIM's service function IDs
-            'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
-            'error_msg': (optional) text that explains the ERROR status
-            other VIM specific fields: (optional) whenever possible
-        Raises an exception upon error or when sfp is not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def get_sfp_list(self, filter_dict={}):
-        """Obtain service function paths from VIM
-        Params:
-            'filter_dict' (optional): contains the entries to filter the sfps on, and only return those that match ALL:
-                id:   string  => returns sfps with this VIM's sfp ID , which implies a return of one sfp at most
-                name: string  => returns only sfps with this name
-                tenant_id: string => returns only sfps that belong to this tenant/project
-        Returns a list of service function path dictionaries, each dictionary contains:
-            'id': (mandatory) VIM's sfp ID
-            'name': (mandatory) VIM's sfp name
-            'classifications': VIM's sfp's list of VIM's classification IDs
-            'sfs': VIM's sfp's list of VIM's service function IDs
-            other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
-        List can be empty if no sfp matches the filter_dict. Raise an exception only upon VIM connectivity,
-            authorization, or some other unspecific error
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
-    def refresh_sfps_status(self, sfp_list):
-        """Get the status of the service function path
-        Params: the list of sfp identifiers
-        Returns a dictionary with:
-            vm_id:          #VIM id of this service function path
-                status:     #Mandatory. Text with one of:
-                            #  DELETED (not found at vim)
-                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
-                            #  OTHER (Vim reported other status not understood)
-                            #  ERROR (VIM indicates an ERROR status)
-                            #  ACTIVE,
-                            #  CREATING (on building process)
-                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
-                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)F
-        """
-        raise VimConnNotImplemented("Should have implemented this")
-
-    def delete_sfp(self, sfp_id):
-        """Deletes a service function path from the VIM
-        Returns the sfp ID (sfp_id) or raises an exception upon error or when sf is not found
-        """
-        raise VimConnNotImplemented("SFC support not implemented")
-
     def migrate_instance(self, vm_id, compute_host=None):
         """Migrate a vdu
         Params:
@@ -1106,58 +839,3 @@
             flavor_id: flavor_id to resize the vdu to
         """
         raise VimConnNotImplemented("Should have implemented this")
-
-    # NOT USED METHODS in current version. Deprecated
-    @deprecated
-    def host_vim2gui(self, host, server_dict):
-        """Transform host dictionary from VIM format to GUI format,
-        and append to the server_dict
-        """
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def get_hosts_info(self):
-        """Get the information of deployed hosts
-        Returns the hosts content"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def get_hosts(self, vim_tenant):
-        """Get the hosts and deployed instances
-        Returns the hosts content"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def get_processor_rankings(self):
-        """Get the processor rankings in the VIM database"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def new_host(self, host_data):
-        """Adds a new host to VIM"""
-        """Returns status code of the VIM response"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def new_external_port(self, port_data):
-        """Adds a external port to VIM"""
-        """Returns the port identifier"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def new_external_network(self, net_name, net_type):
-        """Adds a external network to VIM (shared)"""
-        """Returns the network identifier"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def connect_port_network(self, port_id, network_id, admin=False):
-        """Connects a external port to a network"""
-        """Returns status code of the VIM response"""
-        raise VimConnNotImplemented("Should have implemented this")
-
-    @deprecated
-    def new_vminstancefromJSON(self, vm_data):
-        """Adds a VM instance to VIM"""
-        """Returns the instance identifier"""
-        raise VimConnNotImplemented("Should have implemented this")
diff --git a/integration-tests/test_vimconn_gcp.py b/integration-tests/test_vimconn_gcp.py
index ea9783b..2285f21 100644
--- a/integration-tests/test_vimconn_gcp.py
+++ b/integration-tests/test_vimconn_gcp.py
@@ -35,7 +35,6 @@
 
 
 class TestGCPOperations:
-
     gcp_conn = None
     time_id = datetime.today().strftime("%Y%m%d%H%M%S")
     vim_id = "gcp-test-" + time_id
diff --git a/releasenotes/notes/Correcting_vcpu_calculation-83aa1e4c3ee72a24.yaml b/releasenotes/notes/Correcting_vcpu_calculation-83aa1e4c3ee72a24.yaml
new file mode 100644
index 0000000..d7f5f18
--- /dev/null
+++ b/releasenotes/notes/Correcting_vcpu_calculation-83aa1e4c3ee72a24.yaml
@@ -0,0 +1,21 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Correcting invalid vcpu calculation according to numa nodes core and thread settings,
+    correcting vcpu pinning policy evaluation according to https://osm.etsi.org/gerrit/#/c/osm/IM/+/12851/.
diff --git a/releasenotes/notes/adding_vio_numa_support-6717ff4c02776dda.yaml b/releasenotes/notes/adding_vio_numa_support-6717ff4c02776dda.yaml
new file mode 100644
index 0000000..b8d6179
--- /dev/null
+++ b/releasenotes/notes/adding_vio_numa_support-6717ff4c02776dda.yaml
@@ -0,0 +1,22 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+  - |
+     Bug 2180 Adding VIO Numa support with unit tests.
+
+
diff --git a/releasenotes/notes/feature_10950_pycryptodome-b9b85cfc8ee5a5e0.yaml b/releasenotes/notes/feature_10950_pycryptodome-b9b85cfc8ee5a5e0.yaml
new file mode 100644
index 0000000..4cc88ff
--- /dev/null
+++ b/releasenotes/notes/feature_10950_pycryptodome-b9b85cfc8ee5a5e0.yaml
@@ -0,0 +1,21 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+  - |
+    Feature 10950: Replace pycrypto with pycryptodome. Updating requirements-dev.txt.
+
diff --git a/releasenotes/notes/fixing_black_formatting-38523eb5ec59dae6.yaml b/releasenotes/notes/fixing_black_formatting-38523eb5ec59dae6.yaml
new file mode 100644
index 0000000..a8c6c50
--- /dev/null
+++ b/releasenotes/notes/fixing_black_formatting-38523eb5ec59dae6.yaml
@@ -0,0 +1,20 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+  - |
+    Reformat files according to new black validation.
diff --git a/releasenotes/notes/re_revert_of_unused_methods-cb9ca6cc030c92c1.yaml b/releasenotes/notes/re_revert_of_unused_methods-cb9ca6cc030c92c1.yaml
new file mode 100644
index 0000000..318f335
--- /dev/null
+++ b/releasenotes/notes/re_revert_of_unused_methods-cb9ca6cc030c92c1.yaml
@@ -0,0 +1,21 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Removing unused methods from RO module to get rid of unmaintained code
+    Removed codes are accounted to ETSI.
diff --git a/releasenotes/notes/removing_unused_methods-c35f90b924eee7d2.yaml b/releasenotes/notes/removing_unused_methods-c35f90b924eee7d2.yaml
new file mode 100644
index 0000000..9fe44e3
--- /dev/null
+++ b/releasenotes/notes/removing_unused_methods-c35f90b924eee7d2.yaml
@@ -0,0 +1,20 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Removing unused methods from RO module to get rid of unmaintained code.
diff --git a/releasenotes/notes/update_python_dependencies-a4b78dba19059876.yaml b/releasenotes/notes/update_python_dependencies-a4b78dba19059876.yaml
new file mode 100644
index 0000000..b645eb8
--- /dev/null
+++ b/releasenotes/notes/update_python_dependencies-a4b78dba19059876.yaml
@@ -0,0 +1,20 @@
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+  - |
+    Update of Python packages (using pip-compile) in preparation for Release FOURTEEN.
diff --git a/requirements-dev.in b/requirements-dev.in
index 87d042b..2e25c46 100644
--- a/requirements-dev.in
+++ b/requirements-dev.in
@@ -14,5 +14,5 @@
 # limitations under the License.
 ##
 
-git+https://osm.etsi.org/gerrit/osm/common.git@master#egg=osm-common
--r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+git+https://osm.etsi.org/gerrit/osm/common.git@paas#egg=osm-common
+-r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
diff --git a/requirements-dev.txt b/requirements-dev.txt
index ed7864e..0304e4f 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -14,19 +14,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #######################################################################################
-aiokafka==0.7.2
+aiokafka==0.8.0
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
+async-timeout==4.0.2
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
+    #   aiokafka
 dataclasses==0.6
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
 kafka-python==2.0.2
     # via
     #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
     #   aiokafka
+motor==1.3.1
+    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
 osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@paas
     # via -r requirements-dev.in
-pycrypto==2.6.1
+packaging==23.0
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
+    #   aiokafka
+pycryptodome==3.17
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
-pymongo==3.12.3
-    # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
+pymongo==3.13.0
+    # via
+    #   -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
+    #   motor
 pyyaml==5.4.1
     # via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
diff --git a/requirements-test.txt b/requirements-test.txt
index c7c2f90..20b69e9 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -16,7 +16,7 @@
 #######################################################################################
 -e RO-plugin
     # via -r requirements-test.in
-coverage==7.0.5
+coverage==7.1.0
     # via -r requirements-test.in
 mock==5.0.1
     # via -r requirements-test.in
diff --git a/requirements.txt b/requirements.txt
index 3f146d0..e722b78 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -32,14 +32,14 @@
     #   azure-mgmt-compute
     #   azure-mgmt-network
     #   azure-mgmt-resource
-azure-core==1.26.2
+azure-core==1.26.3
     # via
     #   azure-identity
     #   azure-mgmt-core
     #   msrest
 azure-identity==1.12.0
     # via -r RO-VIM-azure/requirements.in
-azure-mgmt-compute==29.0.0
+azure-mgmt-compute==29.1.0
     # via -r RO-VIM-azure/requirements.in
 azure-mgmt-core==1.3.2
     # via
@@ -54,7 +54,7 @@
     # via paramiko
 boto==2.49.0
     # via -r RO-VIM-aws/requirements.in
-cachetools==5.2.1
+cachetools==5.3.0
     # via google-auth
 certifi==2022.12.7
     # via
@@ -75,9 +75,9 @@
     #   osc-lib
     #   python-neutronclient
     #   python-openstackclient
-cmd2==2.4.2
+cmd2==2.4.3
     # via cliff
-cryptography==39.0.0
+cryptography==39.0.1
     # via
     #   -r NG-RO/requirements.in
     #   adal
@@ -107,7 +107,7 @@
     # via pyvcloud
 google-api-core==2.11.0
     # via google-api-python-client
-google-api-python-client==2.72.0
+google-api-python-client==2.77.0
     # via -r RO-VIM-gcp/requirements.in
 google-auth==2.16.0
     # via
@@ -148,7 +148,9 @@
 isodate==0.6.1
     # via msrest
 jaraco-functools==3.5.2
-    # via cheroot
+    # via
+    #   cheroot
+    #   tempora
 jinja2==3.1.2
     # via -r NG-RO/requirements.in
 jmespath==1.0.1
@@ -174,14 +176,14 @@
     # via -r NG-RO/requirements.in
 lxml==4.9.2
     # via pyvcloud
-markupsafe==2.1.1
+markupsafe==2.1.2
     # via jinja2
 more-itertools==9.0.0
     # via
     #   cheroot
     #   cherrypy
     #   jaraco-functools
-msal==1.20.0
+msal==1.21.0
     # via
     #   azure-identity
     #   msal-extensions
@@ -198,8 +200,6 @@
     #   msrestazure
 msrestazure==0.6.4
     # via -r RO-VIM-azure/requirements.in
-munch==2.5.0
-    # via openstacksdk
 netaddr==0.8.0
     # via
     #   -r RO-VIM-aws/requirements.in
@@ -218,7 +218,7 @@
     #   oslo-utils
 oauthlib==3.2.2
     # via requests-oauthlib
-openstacksdk==0.103.0
+openstacksdk==1.0.0
     # via
     #   os-client-config
     #   osc-lib
@@ -274,7 +274,7 @@
     # via
     #   oslo-utils
     #   python-keystoneclient
-paramiko==2.12.0
+paramiko==3.0.0
     # via
     #   -r RO-SDN-dpb/requirements.in
     #   -r RO-VIM-gcp/requirements.in
@@ -298,7 +298,7 @@
     #   stevedore
 pkgutil-resolve-name==1.3.10
     # via jsonschema
-portalocker==2.6.0
+portalocker==2.7.0
     # via msal-extensions
 portend==3.1.0
     # via cherrypy
@@ -375,7 +375,7 @@
     #   tempora
 pyvcloud==19.1.1
     # via -r RO-VIM-vmware/requirements.in
-pyvmomi==8.0.0.1.1
+pyvmomi==8.0.0.1.2
     # via -r RO-VIM-vmware/requirements.in
 pyyaml==5.4.1
     # via
@@ -431,7 +431,7 @@
     # via oslo-config
 rsa==4.9
     # via google-auth
-simplejson==3.18.1
+simplejson==3.18.3
     # via
     #   osc-lib
     #   python-cinderclient
@@ -446,8 +446,6 @@
     #   isodate
     #   keystoneauth1
     #   msrestazure
-    #   munch
-    #   paramiko
     #   python-dateutil
     #   python-keystoneclient
     #   pyvmomi
@@ -462,7 +460,7 @@
     #   python-keystoneclient
     #   python-novaclient
     #   python-openstackclient
-tempora==5.2.0
+tempora==5.2.1
     # via portend
 tqdm==4.64.1
     # via ipconflict
@@ -486,7 +484,7 @@
     #   python-glanceclient
 zc-lockfile==2.0
     # via cherrypy
-zipp==3.11.0
+zipp==3.13.0
     # via
     #   importlib-metadata
     #   importlib-resources