self._reload_connection()
network_dict = {"name": net_name, "admin_state_up": True}
- if net_type in ("data", "ptp"):
+ if net_type in ("data", "ptp") or provider_network_profile:
provider_physical_network = None
if provider_network_profile and provider_network_profile.get(
if numas:
numa_nodes = len(numas)
- if numa_nodes > 1:
- return -1, "Can not add flavor with more than one numa"
-
extra_specs["hw:numa_nodes"] = str(numa_nodes)
- extra_specs["hw:mem_page_size"] = "large"
- extra_specs["hw:cpu_policy"] = "dedicated"
- extra_specs["hw:numa_mempolicy"] = "strict"
if self.vim_type == "VIO":
extra_specs[
extra_specs["vmware:latency_sensitivity_level"] = "high"
for numa in numas:
+ if "id" in numa:
+ node_id = numa["id"]
+
+ if "memory" in numa:
+ memory_mb = numa["memory"] * 1024
+ memory = "hw:numa_mem.{}".format(node_id)
+ extra_specs[memory] = int(memory_mb)
+
+ if "vcpu" in numa:
+ vcpu = numa["vcpu"]
+ cpu = "hw:numa_cpus.{}".format(node_id)
+ vcpu = ",".join(map(str, vcpu))
+ extra_specs[cpu] = vcpu
+
# overwrite ram and vcpus
# check if key "memory" is present in numa else use ram value at flavor
- if "memory" in numa:
- ram = numa["memory"] * 1024
# See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
# implemented/virt-driver-cpu-thread-pinning.html
- extra_specs["hw:cpu_sockets"] = 1
+ extra_specs["hw:cpu_sockets"] = str(numa_nodes)
if "paired-threads" in numa:
vcpus = numa["paired-threads"] * 2
"Invalid mempage-size %s. Will be ignored",
extended.get("mempage-size"),
)
+ if extended.get("cpu-pinning-policy"):
+ extra_specs["hw:cpu_policy"] = extended.get(
+ "cpu-pinning-policy"
+ ).lower()
+
+ # Set the cpu thread pinning policy as specified in the descriptor
+ if extended.get("cpu-thread-pinning-policy"):
+ extra_specs["hw:cpu_thread_policy"] = extended.get(
+ "cpu-thread-pinning-policy"
+ ).lower()
+
+ # Set the mem policy as specified in the descriptor
+ if extended.get("mem-policy"):
+ extra_specs["hw:numa_mempolicy"] = extended.get(
+ "mem-policy"
+ ).lower()
# create flavor
new_flavor = self.nova.flavors.create(
if disk_list:
block_device_mapping = {}
for disk in disk_list:
- if disk.get("vim_id"):
- block_device_mapping["_vd" + chr(base_disk_index)] = disk[
- "vim_id"
- ]
- existing_vim_volumes.append({"id": disk["vim_id"]})
- else:
- if "image_id" in disk:
- base_disk_index = ord("a")
+ if "image_id" in disk:
+ # persistent root volume
+ base_disk_index = ord("a")
+ image_id = ""
+ # use existing persistent root volume
+ if disk.get("vim_volume_id"):
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[
+ "vim_volume_id"
+ ]
+ existing_vim_volumes.append({"id": disk["vim_volume_id"]})
+ # use existing persistent root volume
+ elif disk.get("vim_id"):
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[
+ "vim_id"
+ ]
+ existing_vim_volumes.append({"id": disk["vim_id"]})
+ else:
+ # create persistent root volume
volume = self.cinder.volumes.create(
size=disk["size"],
- name=name + "_vd" + chr(base_disk_index),
+ name=name + "vd" + chr(base_disk_index),
imageRef=disk["image_id"],
# Make sure volume is in the same AZ as the VM to be attached to
availability_zone=vm_av_zone,
)
boot_volume_id = volume.id
+ created_items["volume:" + str(volume.id)] = True
+ block_device_mapping[
+ "vd" + chr(base_disk_index)
+ ] = volume.id
+ else:
+ # non-root persistent volume
+ key_id = (
+ "vim_volume_id"
+ if "vim_volume_id" in disk.keys()
+ else "vim_id"
+ )
+ if disk.get(key_id):
+ # use existing persistent volume
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[
+ key_id
+ ]
+ existing_vim_volumes.append({"id": disk[key_id]})
else:
+ # create persistent volume
volume = self.cinder.volumes.create(
size=disk["size"],
- name=name + "_vd" + chr(base_disk_index),
+ name=name + "vd" + chr(base_disk_index),
# Make sure volume is in the same AZ as the VM to be attached to
availability_zone=vm_av_zone,
)
-
- created_items["volume:" + str(volume.id)] = True
- block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+ created_items["volume:" + str(volume.id)] = True
+ block_device_mapping[
+ "vd" + chr(base_disk_index)
+ ] = volume.id
base_disk_index += 1
)
)
server = self.nova.servers.create(
- name,
- image_id,
- flavor_id,
+ name=name,
+ image=image_id,
+ flavor=flavor_id,
nics=net_list_vim,
security_groups=self.config.get("security_groups"),
# TODO remove security_groups in future versions. Already at neutron port
try:
k_item, _, k_id = k.partition(":")
if k_item == "port":
- port_dict = self.neutron.list_ports(device_id=vm_id)
+ port_dict = self.neutron.list_ports()
existing_ports = [
port["id"] for port in port_dict["ports"] if port_dict
]
def action_vminstance(self, vm_id, action_dict, created_items={}):
"""Send and action over a VM instance from VIM
- Returns None or the console dict if the action was successfully sent to the VIM"""
+ Returns None or the console dict if the action was successfully sent to the VIM
+ """
self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
try:
server.resume()
elif server.status == "SHUTOFF":
server.start()
+ else:
+ self.logger.debug(
+ "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+ )
+ raise vimconn.VimConnException(
+ "Cannot 'start' instance while it is in active state",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
elif "pause" in action_dict:
server.pause()
elif "resume" in action_dict:
server.resume()
elif "shutoff" in action_dict or "shutdown" in action_dict:
- server.stop()
+ self.logger.debug("server status %s", server.status)
+ if server.status == "ACTIVE":
+ server.stop()
+ else:
+ self.logger.debug("ERROR: VM is not in Active state")
+ raise vimconn.VimConnException(
+ "VM is not in active state, stop operation is not allowed",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
elif "forceOff" in action_dict:
server.stop() # TODO
elif "terminate" in action_dict:
nvExceptions.NotFound,
) as e:
self._format_exception(e)
+
+ def resize_instance(self, vm_id, new_flavor_id):
+ """
+ For resizing the vm based on the given
+ flavor details
+ param:
+ vm_id : ID of an instance
+ new_flavor_id : Flavor id to be resized
+ Return the status of a resized instance
+ """
+ self._reload_connection()
+ self.logger.debug("resize the flavor of an instance")
+ instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+ old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+ new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+ try:
+ if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+ if old_flavor_disk > new_flavor_disk:
+ raise nvExceptions.BadRequest(
+ 400,
+ message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ )
+ else:
+ self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+ vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+ if vm_state:
+ instance_resized_status = self.confirm_resize(vm_id)
+ return instance_resized_status
+ else:
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' vm_state is in ERROR",
+ )
+
+ else:
+ self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' instance while it is in vm_state resized",
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
+
+ def confirm_resize(self, vm_id):
+ """
+ Confirm the resize of an instance
+ param:
+ vm_id: ID of an instance
+ """
+ self._reload_connection()
+ self.nova.servers.confirm_resize(server=vm_id)
+ if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+ self.__wait_for_vm(vm_id, "ACTIVE")
+ instance_status = self.get_vdu_state(vm_id)[0]
+ return instance_status