return_data.append(task["action_id"])
return return_data, None, True
+
+ def migrate_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+ target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="migrate",
+ action="EXEC",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+
+ return task
+
+ def migrate(self, session, indata, version, nsr_id, *args, **kwargs):
+ task_index = 0
+ extra_dict = {}
+ now = time()
+ action_id = indata.get("action_id", str(uuid4()))
+ step = ""
+ logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+ self.logger.debug(logging_text + "Enter")
+ try:
+ vnf_instance_id = indata["vnfInstanceId"]
+ step = "Getting vnfrs from db"
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+ vdu = indata.get("vdu")
+ migrateToHost = indata.get("migrateToHost")
+ db_new_tasks = []
+
+ with self.write_lock:
+ if vdu is not None:
+ vdu_id = indata["vdu"]["vduId"]
+ vdu_count_index = indata["vdu"].get("vduCountIndex", 0)
+ for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+ if (
+ vdu["vdu-id-ref"] == vdu_id
+ and vdu["count-index"] == vdu_count_index
+ ):
+ extra_dict["params"] = {
+ "vim_vm_id": vdu["vim-id"],
+ "migrate_host": migrateToHost,
+ "vdu_vim_info": vdu["vim_info"],
+ }
+ step = "Creating migration task for vdu:{}".format(vdu)
+ task = self.migrate_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+ break
+ else:
+
+ for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+ extra_dict["params"] = {
+ "vim_vm_id": vdu["vim-id"],
+ "migrate_host": migrateToHost,
+ "vdu_vim_info": vdu["vim_info"],
+ }
+ step = "Creating migration task for vdu:{}".format(vdu)
+ task = self.migrate_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+
+ self.upload_all_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
+
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+ raise NsException(e)
return "DONE", ro_vim_item_update_ok
+class VimInteractionMigration(VimInteractionBase):
+ def exec(self, ro_task, task_index, task_depends):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ db_task_update = {"retries": 0}
+ target_vim = self.my_vims[ro_task["target_id"]]
+ vim_interfaces = []
+ created = False
+ created_items = {}
+ refreshed_vim_info = {}
+
+ try:
+ if task.get("params"):
+ vim_vm_id = task["params"].get("vim_vm_id")
+ migrate_host = task["params"].get("migrate_host")
+ _, migrated_compute_node = target_vim.migrate_instance(
+ vim_vm_id, migrate_host
+ )
+
+ if migrated_compute_node:
+ # When VM is migrated, vdu["vim_info"] needs to be updated
+ vdu_old_vim_info = task["params"]["vdu_vim_info"].get(
+ ro_task["target_id"]
+ )
+
+ # Refresh VM to get new vim_info
+ vm_to_refresh_list = [vim_vm_id]
+ vim_dict = target_vim.refresh_vms_status(vm_to_refresh_list)
+ refreshed_vim_info = vim_dict[vim_vm_id]
+
+ if refreshed_vim_info.get("interfaces"):
+ for old_iface in vdu_old_vim_info.get("interfaces"):
+ iface = next(
+ (
+ iface
+ for iface in refreshed_vim_info["interfaces"]
+ if old_iface["vim_interface_id"]
+ == iface["vim_interface_id"]
+ ),
+ None,
+ )
+ vim_interfaces.append(iface)
+
+ ro_vim_item_update = {
+ "vim_id": vim_vm_id,
+ "vim_status": "ACTIVE",
+ "created": created,
+ "created_items": created_items,
+ "vim_details": None,
+ "vim_message": None,
+ }
+
+ if refreshed_vim_info and refreshed_vim_info.get("status") not in (
+ "ERROR",
+ "VIM_ERROR",
+ ):
+ ro_vim_item_update["vim_details"] = refreshed_vim_info["vim_info"]
+
+ if vim_interfaces:
+ ro_vim_item_update["interfaces"] = vim_interfaces
+
+ self.logger.debug(
+ "task={} {} vm-migration done".format(task_id, ro_task["target_id"])
+ )
+
+ return "DONE", ro_vim_item_update, db_task_update
+
+ except (vimconn.VimConnException, NsWorkerException) as e:
+ self.logger.error(
+ "task={} vim={} VM Migration:"
+ " {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_message": str(e),
+ }
+
+ return "FAILED", ro_vim_item_update, db_task_update
+
+
class NsWorker(threading.Thread):
REFRESH_BUILD = 5 # 5 seconds
REFRESH_ACTIVE = 60 # 1 minute
"affinity-or-anti-affinity-group": VimInteractionAffinityGroup(
self.db, self.my_vims, self.db_vims, self.logger
),
+ "migrate": VimInteractionMigration(
+ self.db, self.my_vims, self.db_vims, self.logger
+ ),
}
self.time_last_task_processed = None
# lists of tasks to delete because nsrs or vnfrs has been deleted from db
},
},
},
+ "migrate": {
+ "<ID>": {
+ "METHODS": ("POST"),
+ "ROLE_PERMISSION": "migrate:id:",
+ "<ID>": {
+ "METHODS": ("GET",),
+ "ROLE_PERMISSION": "migrate:id:id:",
+ },
+ },
+ },
}
},
}
"deploy:id:id:cancel:post": self.ns.cancel,
"recreate:id:post": self.ns.recreate,
"recreate:id:id:get": self.ns.recreate_status,
+ "migrate:id:post": self.ns.migrate,
}
def _format_in(self, kwargs):
return None
except Exception as e:
self.format_vimconn_exception(e)
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ # TODO: Add support for migration
+ raise vimconn.VimConnNotImplemented("Not implemented")
else:
return self._default_admin_user
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ # TODO: Add support for migration
+ raise vimconn.VimConnNotImplemented("Not implemented")
+
if __name__ == "__main__":
# Init logger
)
)
self._format_vimconn_exception(e)
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ # TODO: Add support for migration
+ raise vimconn.VimConnNotImplemented("Not implemented")
import copy
from http.client import HTTPException
+import json
import logging
from pprint import pformat
import random
ConnectionError,
) as e:
self._format_exception(e)
+
+ def get_vdu_state(self, vm_id):
+ """
+ Getting the state of a vdu
+ param:
+ vm_id: ID of an instance
+ """
+ self.logger.debug("Getting the status of VM")
+ self.logger.debug("VIM VM ID %s", vm_id)
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ server_dict = server.to_dict()
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ server_dict["OS-EXT-SRV-ATTR:host"],
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ def check_compute_availability(self, host, server_flavor_details):
+ self._reload_connection()
+ hypervisor_search = self.nova.hypervisors.search(
+ hypervisor_match=host, servers=True
+ )
+ for hypervisor in hypervisor_search:
+ hypervisor_id = hypervisor.to_dict()["id"]
+ hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+ hypervisor_dict = hypervisor_details.to_dict()
+ hypervisor_temp = json.dumps(hypervisor_dict)
+ hypervisor_json = json.loads(hypervisor_temp)
+ resources_available = [
+ hypervisor_json["free_ram_mb"],
+ hypervisor_json["disk_available_least"],
+ hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+ ]
+ compute_available = all(
+ x > y for x, y in zip(resources_available, server_flavor_details)
+ )
+ if compute_available:
+ return host
+
+ def check_availability_zone(
+ self, old_az, server_flavor_details, old_host, host=None
+ ):
+ self._reload_connection()
+ az_check = {"zone_check": False, "compute_availability": None}
+ aggregates_list = self.nova.aggregates.list()
+ for aggregate in aggregates_list:
+ aggregate_details = aggregate.to_dict()
+ aggregate_temp = json.dumps(aggregate_details)
+ aggregate_json = json.loads(aggregate_temp)
+ if aggregate_json["availability_zone"] == old_az:
+ hosts_list = aggregate_json["hosts"]
+ if host is not None:
+ if host in hosts_list:
+ az_check["zone_check"] = True
+ available_compute_id = self.check_compute_availability(
+ host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["compute_availability"] = available_compute_id
+ else:
+ for check_host in hosts_list:
+ if check_host != old_host:
+ available_compute_id = self.check_compute_availability(
+ check_host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["zone_check"] = True
+ az_check["compute_availability"] = available_compute_id
+ break
+ else:
+ az_check["zone_check"] = True
+ return az_check
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ self._reload_connection()
+ vm_state = False
+ instance_state = self.get_vdu_state(vm_id)
+ server_flavor_id = instance_state[1]
+ server_hypervisor_name = instance_state[2]
+ server_availability_zone = instance_state[3]
+ try:
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
+
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ disk_over_commit=False,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id)[2]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
+ )
+ )
+ return state, available_compute_id
+ else:
+ raise vimconn.VimConnException(
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
)
print(text)
return -vim_response.status_code, text
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ # TODO: Add support for migration
+ raise vimconn.VimConnNotImplemented("Not implemented")
poweron_task = self.get_task_from_response(response.text)
return poweron_task
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ # TODO: Add support for migration
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
"""
raise VimConnNotImplemented("SFC support not implemented")
+ def migrate_instance(self, vm_id, compute_host=None):
+ """Migrate a vdu
+ Params:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ Returns the vm state or raises an exception upon error
+ """
+ raise VimConnNotImplemented("Should have implemented this")
+
# NOT USED METHODS in current version. Deprecated
@deprecated
def host_vim2gui(self, host, server_dict):
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10910 - Migration of Openstack based VM instances from OSM
+ This feature enables migration of virtual machine instances of VNFs across compute hosts.
+ Support added for Openstack VIM type.