require_version('RwcalYang', '1.0')
require_version('RwTypes', '1.0')
require_version('RwCloudYang', '1.0')
+require_version('RwCal', '1.0')
from gi.repository import (
RwTypes,
return openmano
-def rift2openmano_vnfd(rift_vnfd):
+def rift2openmano_vnfd(rift_vnfd, rift_nsd):
openmano_vnf = {"vnf":{}}
vnf = openmano_vnf["vnf"]
raise ValueError("Internal connection point reference %s not found" % cp_ref_id)
- def rift2openmano_if_type(rift_type):
+ def rift2openmano_if_type(ext_if):
+
+ cp_ref_name = ext_if.vnfd_connection_point_ref
+ for vld in rift_nsd.vlds:
+
+ # if it is an explicit mgmt_network then check if the given
+ # cp_ref is a part of it
+ if not vld.mgmt_network:
+ continue
+
+ for vld_cp in vld.vnfd_connection_point_ref:
+ if vld_cp.vnfd_connection_point_ref == cp_ref_name:
+ return "mgmt"
+
+
+ rift_type = ext_if.virtual_interface.type_yang
+ # Retaining it for backward compatibility!
if rift_type == "OM_MGMT":
return "mgmt"
elif rift_type == "VIRTIO" or rift_type == "E1000":
vdu, ext_if = find_vdu_and_ext_if_by_cp_ref(cp.name)
connection = {
"name": cp.name,
- "type": rift2openmano_if_type(ext_if.virtual_interface.type_yang),
+ "type": rift2openmano_if_type(ext_if),
"VNFC": vdu.name,
"local_iface_name": ext_if.name,
"description": "%s iface on VDU %s" % (ext_if.name, vdu.name),
}
+ grouping image-properties {
+ leaf image {
+ description
+ "Image name for the software image.
+ If the image name is found within the VNF packaage it will
+ be uploaded to all cloud accounts during onboarding process.
+ Otherwise, the image must be added to the cloud account with
+ the same name as entered here.
+ ";
+ type string;
+ }
+
+ leaf image-checksum {
+ description
+ "Image md5sum for the software image.
+ The md5sum, if provided, along with the image name uniquely
+ identifies an image uploaded to the CAL.
+ ";
+ type string;
+ }
+ }
+
grouping vnf-configuration {
container vnf-configuration {
rwpb:msg-new VnfConfiguration;
uses ip-profile-info;
}
}
-
+
+ grouping volume-info {
+ description "Grouping for Volume-info";
+
+ leaf description {
+ description "Description for Volume";
+ type string;
+ }
+
+ leaf size {
+ description "Size of disk in GB";
+ type uint64;
+ }
+
+ choice volume-source {
+ description
+ "Defines the source of the volume. Possible options are
+ 1. Ephemeral -- Empty disk
+ 2. Image -- Refer to image to be used for volume
+ 3. Volume -- Reference of pre-existing volume to be used
+ ";
+
+ case ephemeral {
+ leaf ephemeral {
+ type empty;
+ }
+ }
+
+ case image {
+ uses image-properties;
+ }
+
+ case volume {
+ leaf volume-ref {
+ description "Reference for pre-existing volume in VIM";
+ type string;
+ }
+ }
+ }
+
+ container boot-params {
+ leaf boot-volume {
+ description "This flag indicates if this is boot volume or not";
+ type boolean;
+ }
+ leaf boot-priority {
+ description "Boot priority associated with volume";
+ type int32;
+ }
+ }
+
+ container guest-params {
+ description "Guest virtualization parameter associated with volume";
+
+ leaf device_bus {
+ description "Type of disk-bus on which this disk is exposed to guest";
+ type enumeration {
+ enum ide;
+ enum usb;
+ enum virtio;
+ enum scsi;
+ }
+ }
+
+ leaf device_type {
+ description "The type of device as exposed to guest";
+ type enumeration {
+ enum disk;
+ enum cdrom;
+ enum floppy;
+ enum lun;
+ }
+ }
+ }
+ }
}
// replicate for pnfd container here
uses manotypes:provider-network;
+ leaf mgmt-network {
+ description "Flag indicating whether this network is a VIM management network";
+ type boolean;
+ default false;
+ }
+
choice init-params {
description "Extra parameters for VLD instantiation";
type string;
}
}
+
case vim-network-profile {
leaf ip-profile-ref {
description "Named reference to IP-profile object";
type string;
}
- }
+ }
+
}
}
type uint32;
}
+ leaf uptime {
+ description
+ "Active period of this Network Service.
+ Uptime is expressed in seconds";
+
+ type uint32;
+ }
+
list connection-point {
description
"List for external connection points.
type uint32;
}
+ leaf uptime {
+ description
+ "Active period of this Virtual Link.
+ Uptime is expressed in seconds";
+
+ type uint32;
+ }
+
leaf network-id {
description
"Identifier for the allocated network resource.";
}
}
- container vnfd-catalog {
-
- description
- "Virtual Network Function Descriptor (VNFD).";
-
- list vnfd {
- key "id";
-
+ grouping vnfd-descriptor {
leaf id {
description "Identifier for the VNFD.";
type string;
uses manotypes:alarm;
}
- leaf image {
- description
- "Image name for the software image.
- If the image name is found within the VNF packaage it will
- be uploaded to all cloud accounts during onboarding process.
- Otherwise, the image must be added to the cloud account with
- the same name as entered here.
- ";
- mandatory true;
- type string;
- }
-
- leaf image-checksum {
- description
- "Image md5sum for the software image.
- The md5sum, if provided, along with the image name uniquely
- identifies an image uploaded to the CAL.
- ";
- type string;
- }
+ uses manotypes:image-properties;
choice cloud-init-input {
description
}
uses virtual-interface;
}
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
}
list vdu-dependency {
key "name";
uses manotypes:placement-group-info;
-
+
list member-vdus {
description
}
}
}
- }
+ }
+
+ container vnfd-catalog {
+ description
+ "Virtual Network Function Descriptor (VNFD).";
+
+ list vnfd {
+ key "id";
+
+ uses vnfd-descriptor;
+ }
}
}
type uint32;
}
- leaf vnfd-ref {
- description "Reference to VNFD";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
- }
+ leaf uptime {
+ description
+ "Active period of this Virtual Network Function.
+ Uptime is expressed in seconds";
+
+ type uint32;
+ }
+
+ container vnfd {
+ description "VNF descriptor used to instantiate this VNF";
+ uses vnfd:vnfd-descriptor;
}
// Use parameters provided here to configure this VNF
leaf vdu-id-ref {
type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd"
- + "[vnfd:id = current()/../../vnfr:vnfd-ref]"
- + "/vnfd:vdu/vnfd:id";
+ path "../../vnfd/vdu/id";
}
}
uses manotypes:hypervisor-epa;
uses manotypes:host-epa;
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ leaf volume-id {
+ description "VIM assigned volume id";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
+
list alarms {
description
"A list of the alarms that have been created for this VDU";
try:
ksconn = self._get_keystone_connection()
service_endpoint = ksconn.service_catalog.url_for(**endpoint_kwargs)
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
+ raise
except Exception as e:
logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
raise
{
server_name(string) : Name of the VM/Server
flavor_id (string) : UUID of the flavor to be used for VM
- image_id (string) : UUID of the image to be used VM/Server instance
+ image_id (string) : UUID of the image to be used VM/Server instance,
+ This could be None if volumes (with images) are being used
network_list(List) : A List of network_ids. A port will be created in these networks
port_list (List) : A List of port-ids. These ports will be added to VM.
metadata (dict) : A dictionary of arbitrary key-value pairs associated with VM/server
nvconn = self._get_nova_connection()
+
try:
server = nvconn.servers.create(kwargs['name'],
kwargs['image_id'],
userdata = kwargs['userdata'],
security_groups = kwargs['security_groups'],
availability_zone = kwargs['availability_zone'],
- block_device_mapping = None,
+ block_device_mapping_v2 = kwargs['block_device_mapping_v2'],
nics = nics,
scheduler_hints = kwargs['scheduler_hints'],
config_drive = None)
logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s" %str(e))
raise
+ def volume_list(self, server_id):
+ """
+ List of volumes attached to the server
+
+ Arguments:
+ None
+ Returns:
+ List of dictionary objects where dictionary is representation of class (novaclient.v2.volumes.Volume)
+ """
+ nvconn = self._get_nova_connection()
+ try:
+ volumes = nvconn.volumes.get_server_volumes(server_id=server_id)
+ except Exception as e:
+ logger.error("OpenstackDriver: Get volume information failed. Exception: %s" %str(e))
+ raise
+
+ volume_info = [v.to_dict() for v in volumes]
+ return volume_info
+
+
def group_list(self):
"""
List of Server Affinity and Anti-Affinity Groups
logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
+ self._mgmt_network_id = None
if mgmt_network != None:
self._mgmt_network = mgmt_network
try:
ntconn = self.neutron_drv._get_neutron_connection()
networks = ntconn.list_networks()
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
+ raise
except Exception as e:
logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
raise
return self.nova_drv.flavor_get(flavor_id)
def nova_server_create(self, **kwargs):
+ def _verify_image(image_id):
+ image = self.glance_drv.image_get(image_id)
+ if image['status'] != 'active':
+ raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+
assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
- image = self.glance_drv.image_get(kwargs['image_id'])
- if image['status'] != 'active':
- raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+
+ if kwargs['block_device_mapping_v2'] is not None:
+ for block_map in kwargs['block_device_mapping_v2']:
+ if 'uuid' in block_map:
+ _verify_image(block_map['uuid'])
+ else:
+ _verify_image(kwargs['image_id'])
# if 'network_list' in kwargs:
# kwargs['network_list'].append(self._mgmt_network_id)
def nova_server_group_list(self):
return self.nova_drv.group_list()
+ def nova_volume_list(self, server_id):
+ return self.nova_drv.volume_list(server_id)
+
def neutron_network_list(self):
return self.neutron_drv.network_list()
### Important to call create_port_metadata before assign_floating_ip_address
### since assign_floating_ip_address can wait thus delaying port_metadata creation
- ### Wait for 2 minute for server to come up -- Needs fine tuning
- wait_time = 120
- sleep_time = 1
+ ### Wait for a max of 5 minute for server to come up -- Needs fine tuning
+ wait_time = 300
+ sleep_time = 2
for i in range(int(wait_time/sleep_time)):
server = drv.nova_server_get(argument.server_id)
if server['status'] == 'ACTIVE':
import rift.cal.rwcal_status as rwcal_status
import rwlogger
import neutronclient.common.exceptions as NeutronException
+import keystoneclient.exceptions as KeystoneExceptions
from gi.repository import (
GObject,
tenant_name = account.openstack.tenant,
mgmt_network = account.openstack.mgmt_network,
cert_validate = account.openstack.cert_validate )
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
+ NeutronException.NotFound) as e:
+ raise
except Exception as e:
self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
raise
Validation Code and Details String
"""
status = RwcalYang.CloudConnectionStatus()
-
try:
with self._use_driver(account) as drv:
drv.validate_account_creds()
+ except KeystoneExceptions.Unauthorized as e:
+ self.log.error("Invalid credentials given for VIM account %s" %account.name)
+ status.status = "failure"
+ status.details = "Invalid Credentials: %s" % str(e)
+
+ except KeystoneExceptions.AuthorizationFailure as e:
+ self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s" % (
+ account.name, account.openstack.auth_url))
+ status.status = "failure"
+ status.details = "Invalid auth url: %s" % str(e)
+
+ except NeutronException.NotFound as e:
+ self.log.error("Given management network %s could not be found for VIM account %s" % (
+ account.openstack.mgmt_network, account.name))
+ status.status = "failure"
+ status.details = "mgmt network does not exist: %s" % str(e)
+
except openstack_drv.ValidationError as e:
self.log.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
status.status = "failure"
kwargs = {}
kwargs['name'] = vminfo.vm_name
kwargs['flavor_id'] = vminfo.flavor_id
- kwargs['image_id'] = vminfo.image_id
+ if vminfo.has_field('image_id'):
+ kwargs['image_id'] = vminfo.image_id
with self._use_driver(account) as drv:
### If floating_ip is required and we don't have one, better fail before any further allocation
return link
@staticmethod
- def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group):
+ def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
"""Create a GI object for VDUInfoParams
Converts VM information dictionary object returned by openstack
if flavor_info is not None:
RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
+
+ # Fill the volume information
+ if volume_list is not None:
+ for os_volume in volume_list:
+ volr = vdu.volumes.add()
+ try:
+ " Device name is of format /dev/vda"
+ vol_name = (os_volume['device']).split('/')[2]
+ except:
+ continue
+ volr.name = vol_name
+ volr.volume_id = os_volume['volumeId']
+
return vdu
@rwcalstatus(ret_on_failure=[""])
raise OpenstackCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
return flavor_id
+ def _create_vm(self, account, vduinfo, pci_assignement=None, server_group=None, port_list=None, network_list=None, imageinfo_list=None):
+ """Create a new virtual machine.
+
+ Arguments:
+ account - a cloud account
+ vminfo - information that defines the type of VM to create
+
+ Returns:
+ The image id
+ """
+ kwargs = {}
+ kwargs['name'] = vduinfo.name
+ kwargs['flavor_id'] = vduinfo.flavor_id
+ if vduinfo.has_field('image_id'):
+ kwargs['image_id'] = vduinfo.image_id
+ else:
+ kwargs['image_id'] = ""
+
+ with self._use_driver(account) as drv:
+ ### If floating_ip is required and we don't have one, better fail before any further allocation
+ if vduinfo.has_field('allocate_public_address') and vduinfo.allocate_public_address:
+ if account.openstack.has_field('floating_ip_pool'):
+ pool_name = account.openstack.floating_ip_pool
+ else:
+ pool_name = None
+ floating_ip = self._allocate_floating_ip(drv, pool_name)
+ else:
+ floating_ip = None
+
+ if vduinfo.has_field('vdu_init') and vduinfo.vdu_init.has_field('userdata'):
+ kwargs['userdata'] = vduinfo.vdu_init.userdata
+ else:
+ kwargs['userdata'] = ''
+
+ if account.openstack.security_groups:
+ kwargs['security_groups'] = account.openstack.security_groups
+
+ kwargs['port_list'] = port_list
+ kwargs['network_list'] = network_list
+
+ metadata = {}
+ # Add all metadata related fields
+ if vduinfo.has_field('node_id'):
+ metadata['node_id'] = vduinfo.node_id
+ if pci_assignement is not None:
+ metadata['pci_assignement'] = pci_assignement
+ kwargs['metadata'] = metadata
+
+ if vduinfo.has_field('availability_zone') and vduinfo.availability_zone.has_field('name'):
+ kwargs['availability_zone'] = vduinfo.availability_zone
+ else:
+ kwargs['availability_zone'] = None
+
+ if server_group is not None:
+ kwargs['scheduler_hints'] = {'group': server_group}
+ else:
+ kwargs['scheduler_hints'] = None
+
+ kwargs['block_device_mapping_v2'] = None
+ if vduinfo.has_field('volumes') :
+ kwargs['block_device_mapping_v2'] = []
+ with self._use_driver(account) as drv:
+ # Only support image->volume
+ for volume in vduinfo.volumes:
+ block_map = dict()
+ block_map['boot_index'] = volume.boot_params.boot_priority
+ if "image" in volume:
+ # Support image->volume
+ # Match retrived image info with volume based image name and checksum
+ if volume.image is not None:
+ matching_images = [img for img in imageinfo_list if img['name'] == volume.image]
+ if volume.image_checksum is not None:
+ matching_images = [img for img in matching_images if img['checksum'] == volume.image_checksum]
+ img_id = matching_images[0]['id']
+ if img_id is None:
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Volume image not found for name {} checksum {}".format(volume.name, volume.checksum))
+ block_map['uuid'] = img_id
+ block_map['source_type'] = "image"
+ else:
+ block_map['source_type'] = "blank"
+
+ block_map['device_name'] = volume.name
+ block_map['destination_type'] = "volume"
+ block_map['volume_size'] = volume.size
+ block_map['delete_on_termination'] = True
+ if volume.guest_params.has_field('device_type') and volume.guest_params.device_type == 'cdrom':
+ block_map['device_type'] = 'cdrom'
+ if volume.guest_params.has_field('device_bus') and volume.guest_params.device_bus == 'ide':
+ block_map['disk_bus'] = 'ide'
+ kwargs['block_device_mapping_v2'].append(block_map)
+
+
+ with self._use_driver(account) as drv:
+ vm_id = drv.nova_server_create(**kwargs)
+ if floating_ip:
+ self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+
+ return vm_id
+
+ def get_openstack_image_info(self, account, image_name, image_checksum=None):
+ self.log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s",
+ image_name, image_checksum, account.name
+ )
+
+ image_list = []
+ with self._use_driver(account) as drv:
+ image_list = drv.glance_image_list()
+ matching_images = [img for img in image_list if img['name'] == image_name]
+
+ # If the image checksum was filled in then further filter the images by the checksum
+ if image_checksum is not None:
+ matching_images = [img for img in matching_images if img['checksum'] == image_checksum]
+ else:
+ self.log.warning("Image checksum not provided. Lookup using image name (%s) only.",
+ image_name)
+
+ if len(matching_images) == 0:
+ raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format(
+ image_name, image_checksum, account.name
+ ))
+
+ elif len(matching_images) > 1:
+ unique_checksums = {i.checksum for i in matching_images}
+ if len(unique_checksums) > 1:
+ msg = ("Too many images with different checksums matched "
+ "image name of %s for cloud account: %s" % (image_name, account.name))
+ raise ResMgrCALOperationFailure(msg)
+
+ return matching_images[0]
+
@rwcalstatus(ret_on_failure=[""])
def do_create_vdu(self, account, vdu_init):
"""Create a new virtual deployment unit
The vdu_id
"""
### First create required number of ports aka connection points
+ # Add the mgmt_ntwk by default.
+ mgmt_network_id = None
with self._use_driver(account) as drv:
+ mgmt_network_id = drv._mgmt_network_id
### If floating_ip is required and we don't have one, better fail before any further allocation
if vdu_init.has_field('allocate_public_address') and vdu_init.allocate_public_address:
if account.openstack.has_field('floating_ip_pool'):
port_list = []
network_list = []
+ imageinfo_list = []
+ is_explicit_mgmt_defined = False
for c_point in vdu_init.connection_points:
+ # if the user has specified explicit mgmt_network connection point
+ # then remove the mgmt_network from the VM list
+ if c_point.virtual_link_id == mgmt_network_id:
+ is_explicit_mgmt_defined = True
if c_point.virtual_link_id in network_list:
assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
else:
if not vdu_init.has_field('flavor_id'):
vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
- ### Check VDU Virtual Interface type and make sure VM with property exists
- if vdu_init.connection_points is not None:
- ### All virtual interfaces need to be of the same type for Openstack Accounts
- if not all(cp.type_yang == vdu_init.connection_points[0].type_yang for cp in vdu_init.connection_points):
- ### We have a mix of E1000 & VIRTIO virtual interface types in the VDU, abort instantiation.
- assert False, "Only one type of Virtual Intefaces supported for Openstack accounts. Found a mix of VIRTIO & E1000."
+ ### Obtain all images for volumes and perform validations
+ if vdu_init.has_field('volumes'):
+ for volume in vdu_init.volumes:
+ if "image" in volume:
+ image_checksum = volume.image_checksum if volume.has_field("image_checksum") else None
+ image_info = self.get_openstack_image_info(account, volume.image, image_checksum)
+ imageinfo_list.append(image_info)
+ elif vdu_init.has_field('image_id'):
+ with self._use_driver(account) as drv:
+ image_info = drv.glance_image_get(vdu_init.image_id)
+ imageinfo_list.append(image_info)
- with self._use_driver(account) as drv:
- img_info = drv.glance_image_get(vdu_init.image_id)
+ if not imageinfo_list:
+ err_str = ("VDU has no image information")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- virt_intf_type = vdu_init.connection_points[0].type_yang
- if virt_intf_type == 'E1000':
- if 'hw_vif_model' in img_info and img_info.hw_vif_model == 'e1000':
- self.log.debug("VDU has Virtual Interface E1000, found matching image with property hw_vif_model=e1000")
+ ### Check VDU Virtual Interface type and make sure VM with property exists
+ if vdu_init.connection_points:
+ ### All virtual interfaces need to be of the same type for Openstack Accounts
+ if not (all(cp.type_yang == 'E1000' for cp in vdu_init.connection_points) or all(cp.type_yang != 'E1000' for cp in vdu_init.connection_points)):
+ ### We have a mix of E1000 & VIRTIO/SR_IPOV virtual interface types in the VDU, abort instantiation.
+ assert False, "Only one type of Virtual Intefaces supported for Openstack accounts. Found a mix of VIRTIO/SR_IOV & E1000."
+
+ ## It is not clear if all the images need to checked for HW properties. In the absence of model info describing each im age's properties,
+ ### we shall assume that all images need to have similar properties
+ for img_info in imageinfo_list:
+
+ virt_intf_type = vdu_init.connection_points[0].type_yang
+ if virt_intf_type == 'E1000':
+ if 'hw_vif_model' in img_info and img_info.hw_vif_model == 'e1000':
+ self.log.debug("VDU has Virtual Interface E1000, found matching image with property hw_vif_model=e1000")
+ else:
+ err_str = ("VDU has Virtual Interface E1000, but image '%s' does not have property hw_vif_model=e1000" % img_info.name)
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ elif virt_intf_type == 'VIRTIO' or virt_intf_type == 'SR_IOV':
+ if 'hw_vif_model' in img_info:
+ err_str = ("VDU has Virtual Interface %s, but image '%s' has hw_vif_model mismatch" % virt_intf_type,img_info.name)
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ else:
+ self.log.debug("VDU has Virtual Interface %s, found matching image" % virt_intf_type)
else:
- err_str = ("VDU has Virtual Interface E1000, but image '%s' does not have property hw_vif_model=e1000" % img_info.name)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- elif virt_intf_type == 'VIRTIO':
- if 'hw_vif_model' in img_info:
- err_str = ("VDU has Virtual Interface VIRTIO, but image '%s' has hw_vif_model mismatch" % img_info.name)
+ err_str = ("VDU Virtual Interface '%s' not supported yet" % virt_intf_type)
self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- else:
- self.log.debug("VDU has Virtual Interface VIRTIO, found matching image")
- else:
- err_str = ("VDU Virtual Interface '%s' not supported yet" % virt_intf_type)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
with self._use_driver(account) as drv:
### Now Create VM
- vm = RwcalYang.VMInfoItem()
- vm.vm_name = vdu_init.name
- vm.flavor_id = vdu_init.flavor_id
- vm.image_id = vdu_init.image_id
- vm_network = vm.network_list.add()
- vm_network.network_id = drv._mgmt_network_id
- if vdu_init.has_field('vdu_init') and vdu_init.vdu_init.has_field('userdata'):
- vm.cloud_init.userdata = vdu_init.vdu_init.userdata
-
- if vdu_init.has_field('node_id'):
- vm.user_tags.node_id = vdu_init.node_id;
-
- if vdu_init.has_field('availability_zone') and vdu_init.availability_zone.has_field('name'):
- vm.availability_zone = vdu_init.availability_zone.name
-
+ vm_network_list = []
+ if not is_explicit_mgmt_defined:
+ vm_network_list.append(drv._mgmt_network_id)
+
+ if vdu_init.has_field('volumes'):
+ # Only combination supported: Image->Volume
+ for volume in vdu_init.volumes:
+ if "volume" in volume:
+ err_str = ("VDU Volume source not supported yet")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ if "guest_params" not in volume:
+ err_str = ("VDU Volume destination parameters '%s' not defined")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ if not volume.guest_params.has_field('device_type'):
+ err_str = ("VDU Volume destination type '%s' not defined")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ if volume.guest_params.device_type not in ['disk', 'cdrom'] :
+ err_str = ("VDU Volume destination type '%s' not supported" % volume.guest_params.device_type)
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+
+
+ server_group = None
if vdu_init.has_field('server_group'):
- ### Get list of server group in openstack for name->id mapping
- openstack_group_list = drv.nova_server_group_list()
- group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
- if len(group_id) != 1:
- raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
- vm.server_group = group_id[0]
-
- for port_id in port_list:
- port = vm.port_list.add()
- port.port_id = port_id
+ ### Get list of server group in openstack for name->id mapping
+ openstack_group_list = drv.nova_server_group_list()
+ group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
+ if len(group_id) != 1:
+ raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
+ server_group = group_id[0]
pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
if pci_assignement != '':
vm.user_tags.pci_assignement = pci_assignement
- vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+ vm_id = self._create_vm(account, vdu_init, pci_assignement=pci_assignement, server_group=server_group, port_list=port_list, network_list=vm_network_list, imageinfo_list = imageinfo_list)
self.prepare_vdu_on_boot(account, vm_id, floating_ip)
return vm_id
Object of type RwcalYang.VDUInfoParams
"""
with self._use_driver(account) as drv:
-
- ### Get list of ports excluding the one for management network
- port_list = [p for p in drv.neutron_port_list(**{'device_id': vdu_id}) if p['network_id'] != drv.get_mgmt_network_id()]
+ port_list = drv.neutron_port_list(**{'device_id': vdu_id})
vm = drv.nova_server_get(vdu_id)
openstack_group_list = drv.nova_server_group_list()
server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
+ openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
vdu_info = RwcalOpenstackPlugin._fill_vdu_info(vm,
flavor_info,
account.openstack.mgmt_network,
port_list,
- server_group)
+ server_group,
+ volume_list = openstack_srv_volume_list)
if vdu_info.state == 'active':
try:
console_info = drv.nova_server_console(vdu_info.vdu_id)
with self._use_driver(account) as drv:
vms = drv.nova_server_list()
for vm in vms:
- ### Get list of ports excluding one for management network
- port_list = [p for p in drv.neutron_port_list(**{'device_id': vm['id']}) if p['network_id'] != drv.get_mgmt_network_id()]
+ port_list = drv.neutron_port_list(**{'device_id': vm['id']})
flavor_info = None
openstack_group_list = drv.nova_server_group_list()
server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
+ openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
flavor_info,
account.openstack.mgmt_network,
port_list,
- server_group)
+ server_group,
+ volume_list = openstack_srv_volume_list)
if vdu.state == 'active':
try:
console_info = drv.nova_server_console(vdu.vdu_id)
uses connection-point-type;
}
+ leaf mgmt-network {
+ description
+ "Explicit mgmt-network name, otherwise the mgmt-network from
+ Cloud account is used";
+ type string;
+ }
+
leaf allocate-public-address {
description "If this VDU needs public IP address";
type boolean;
type string;
}
}
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+ uses manotypes:volume-info;
+ }
}
container vdu-init-params {
type string;
description "Console URL from the VIM, if available";
}
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ leaf volume-id {
+ description "CAL assigned volume-id ";
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+
+ }
}
+
container vnf-resources {
rwpb:msg-new VNFResources;
config false;
openstack_info = {
'username' : 'pluto',
'password' : 'mypasswd',
- 'auth_url' : 'http://10.66.4.14:5000/v3/',
+ 'auth_url' : 'http://10.66.4.17:5000/v3/',
'project_name' : 'demo',
'mgmt_network' : 'private',
'reserved_flavor' : 'm1.medium',
- 'reserved_image' : 'rift-root-latest.qcow2',
+ 'reserved_image' : 'Fedora-x86_64-20-20131211.1-sda-ping.qcow2',
'physical_network' : None,
'network_type' : None,
'segmentation_id' : None
Creates an object for class RwcalYang.CloudAccount()
"""
account = RwcalYang.CloudAccount()
+ account.name = "Gruntxx"
account.account_type = "openstack"
account.openstack.key = openstack_info['username']
account.openstack.secret = openstack_info['password']
rc, rs = self.cal.get_network_list(self._acct)
self.assertEqual(rc, RwStatus.SUCCESS)
- networks = [ network for network in rs.networkinfo_list if (network.network_name == 'rift.cal.unittest.network' or network.network_name == 'rift.cal.virtual_link') ]
+ networks = [ network for network in rs.networkinfo_list if ((network.network_name == 'rift.cal.unittest.network') or ('rift.cal.virtual_link' in network.network_name) ) ]
for network in networks:
+ logger.debug("Openstack-CAL-Test: Deleting old VL %s", network.network_id)
self.cal.delete_virtual_link(self._acct, network.network_id)
def tearDown(self):
vlink_req = self._get_virtual_link_request_info()
rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
vlink_id = rsp
logger.info("Openstack-CAL-Test: Test Create VDU API")
rc, rsp = self.cal.create_vdu(self._acct, vdu_req)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp)
vdu_id = rsp
### Create another virtual_link
rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
vlink_id2= rsp
logger.info("Openstack-CAL-Test: VDU/Virtual Link create-delete test successfully completed")
-
class VmData(object):
"""A convenience class that provides all the stats and EPA Attributes
from the VM provided
if __name__ == "__main__":
- logging.basicConfig(level=logging.INFO)
+ logging.basicConfig(level=logging.DEBUG)
unittest.main()
try:
path = os.path.join(self._rift_artif_dir,
'launchpad/libs',
- agent_vnfr.vnfr_msg.vnfd_ref,
+ agent_vnfr.vnfr_msg.vnfd.id,
'charms/trusty',
charm)
self._log.debug("jujuCA: Charm dir is {}".format(path))
RwLaunchpadYang as launchpadyang,
RwVnfrYang,
RwVnfdYang,
- RwNsdYang
+ RwNsdYang,
+ VnfrYang
)
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
- 'id': '1',
- 'vnfd_ref': '1',
- })
+ mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': '1'})
+ mock_vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total
# Storage
- vdu_metrics.storage.used = metrics.storage.used
- vdu_metrics.storage.total = 1e9 * self.vdur.vm_flavor.storage_gb
- vdu_metrics.storage.utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+ try:
+ vdu_metrics.storage.used = metrics.storage.used
+ if self.vdur.has_field('volumes'):
+ for volume in self.vdur.volumes:
+ if vdu_metrics.storage.total is None:
+ vdu_metrics.storage.total = 1e9 * volume.size
+ else:
+ vdu_metrics.storage.total += (1e9 * volume.size)
+ else:
+ vdu_metrics.storage.total = 1e9 * self.vdur.vm_flavor.storage_gb
+ utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+ if utilization > 100:
+ utilization = 100
+ vdu_metrics.storage.utilization = utilization
+ except ZeroDivisionError:
+ vdu_metrics.storage.utilization = 0
# Network (incoming)
vdu_metrics.network.incoming.packets = metrics.network.incoming.packets
# This indicates that the NSD had no mon-param config.
if not nsd.monitoring_param:
for vnfr in constituent_vnfrs:
- vnfd = store.get_vnfd(vnfr.vnfd_ref)
+ vnfd = store.get_vnfd(vnfr.vnfd.id)
for monp in vnfd.monitoring_param:
mon_params.append(NsrMonitoringParam(
monp,
# value => (value_type, value)
self.vnfr_monparams = {}
+ # create_nsr_mon_params() is already validating for 'is_legacy' by checking if
+ # nsd is having 'monitoring_param'. So removing 'self.aggregation_type is None' check for is_legacy.
+ self.is_legacy = is_legacy
+
if not is_legacy:
self._msg = self._convert_nsd_msg(monp_config)
else:
"""Aggregation type"""
return self.nsr_mon_param_msg.aggregation_type
- @property
- def is_legacy(self):
- return (self.aggregation_type is None)
+ # @property
+ # def is_legacy(self):
+ # return (self.aggregation_type is None)
@classmethod
def extract_value(cls, monp):
def _convert_nsd_msg(self, nsd_monp):
"""Create initial msg without values"""
- vnfd_to_vnfr = {vnfr.vnfd_ref: vnfr_id
+ vnfd_to_vnfr = {vnfr.vnfd.id: vnfr_id
for vnfr_id, vnfr in self._constituent_vnfr_map.items()}
# First, convert the monp param ref from vnfd to vnfr terms.
vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
self,
vnfr,
- self.store.get_vnfd(vnfr.vnfd_ref))
+ self.store.get_vnfd(vnfr.vnfd.id))
self.vnfr_monitors[vnfr.id] = vnf_mon
self.vnfrs[vnfr.id] = vnfr
mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
'id': '1',
- 'vnfd_ref': '1',
'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
})
+ mock_vnfr.vnfd = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
class OpenmanoVnfr(object):
- def __init__(self, log, loop, cli_api, vnfr):
+ def __init__(self, log, loop, cli_api, vnfr, nsd):
self._log = log
self._loop = loop
self._cli_api = cli_api
self._created = False
+ self.nsd = nsd
+
@property
def vnfd(self):
return rift2openmano.RiftVNFD(self._vnfr.vnfd)
@property
def openmano_vnfd(self):
self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id)
- openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd)
+ openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd, self.nsd)
return openmano_vnfd
@property
@asyncio.coroutine
def add_vlr(self, vlr):
self._vlrs.append(vlr)
+ yield from self._publisher.publish_vlr(None, vlr.vlr_msg)
yield from asyncio.sleep(1, loop=self._loop)
@asyncio.coroutine
self._cli_api.ns_vim_network_delete,
vlr.name,
vlr.om_datacenter_name)
+ yield from self._publisher.unpublish_vlr(None, vlr.vlr_msg)
yield from asyncio.sleep(1, loop=self._loop)
@asyncio.coroutine
def add_vnfr(self, vnfr):
- vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr)
+ vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr, nsd=self.nsd)
yield from vnfr.create()
self._vnfrs.append(vnfr)
self._cli_api = None
self._http_api = None
self._openmano_nsrs = {}
+ self._vnfr_uptime_tasks = {}
self._set_ro_account(ro_account)
self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg)
with self._dts.transaction() as xact:
yield from self._publisher.publish_vnfr(xact, vnfr_msg)
+ self._log.debug("Creating a task to update uptime for vnfr: %s", vnfr.id)
+ self._vnfr_uptime_tasks[vnfr.id] = self._loop.create_task(self.vnfr_uptime_update(vnfr))
+
+ def vnfr_uptime_update(self, vnfr):
+ try:
+ vnfr_ = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': vnfr.id})
+ while True:
+ vnfr_.uptime = int(time.time()) - vnfr._create_time
+ yield from self._publisher.publish_vnfr(None, vnfr_)
+ yield from asyncio.sleep(2, loop=self._loop)
+ except asyncio.CancelledError:
+ self._log.debug("Received cancellation request for vnfr_uptime_update task")
@asyncio.coroutine
def instantiate_vl(self, nsr, vlr):
"""
nsr_id = nsr.id
openmano_nsr = self._openmano_nsrs[nsr_id]
+
yield from openmano_nsr.terminate()
yield from openmano_nsr.delete()
"""
Terminate the network service
"""
- pass
+ if vnfr.id in self._vnfr_uptime_tasks:
+ self._vnfr_uptime_tasks[vnfr.id].cancel()
@asyncio.coroutine
def terminate_vl(self, vlr):
# Update the NSR's config status
new_status = ROConfigManager.map_config_status(cm_nsr['state'])
- self._log.debug("Updating config status of NSR {} to {}({})".
- format(nsrid, new_status, cm_nsr['state']))
- yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
+ self._log.info("Updating config status of NSR {} to {}({})".
+ format(nsrid, new_status, cm_nsr['state']))
+
+ # If terminate nsr request comes when NS instantiation is in 'Configuring state'; self.nsm.nsrs dict
+ # is already empty when self.nsm.nsrs[nsrid].set_config_status gets executed. So adding a check here.
+ if nsrid in self.nsm.nsrs:
+ yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
except Exception as e:
self._log.error("Failed to process cm-state for nsr {}: {}".
class VirtualLinkRecord(object):
""" Virtual Link Records class"""
+ XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
@staticmethod
@asyncio.coroutine
def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id, restart_mode=False):
self._vlr_id = str(uuid.uuid4())
self._state = VlRecordState.INIT
self._prev_state = None
+ self._create_time = int(time.time())
@property
def xpath(self):
"nsr_id_ref": self._nsr_id,
"vld_ref": self.vld_msg.id,
"name": self.name,
+ "create_time": self._create_time,
"cloud_account": self.cloud_account_name,
"om_datacenter": self.om_datacenter_name,
}
@asyncio.coroutine
def instantiate(self):
""" Instantiate this VL """
-
self._log.debug("Instaniating VLR key %s, vld %s",
self.xpath, self._vld_msg)
vlr = None
self._group_instance_id = group_instance_id
self._placement_groups = placement_groups
self._config_status = NsrYang.ConfigStates.INIT
+ self._create_time = int(time.time())
self._prev_state = VnfRecordState.INIT
self._state = VnfRecordState.INIT
vnfr_dict = {
"id": self.id,
"nsr_id_ref": self._nsr_id,
- "vnfd_ref": self.vnfd.id,
"name": self.name,
"cloud_account": self._cloud_account_name,
"om_datacenter": self._om_datacenter_name,
vnfr_dict.update(vnfd_copy_dict)
vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+
+ vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
vnfr.member_vnf_index_ref = self.member_vnf_index
vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
cpr.vlr_ref = vlr_ref.id
self.vnfr_msg.connection_point.append(cpr)
self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s",
- cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd_ref)
+ cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd.id)
if not self.restart_mode:
yield from self._dts.query_create(self.xpath,
""" Network service record """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False):
+ def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False,
+ vlr_handler=None):
self._dts = dts
self._log = log
self._loop = loop
self._nsr_cfg_msg = nsr_cfg_msg
self._nsm_plugin = nsm_plugin
self._sdn_account_name = sdn_account_name
+ self._vlr_handler = vlr_handler
self._nsd = None
self._nsr_msg = None
self._is_active = False
self._vl_phase_completed = False
self._vnf_phase_completed = False
+ self.vlr_uptime_tasks = {}
# Initalise the state to init
for vlr in self._vlrs:
yield from self.nsm_plugin.instantiate_vl(self, vlr)
vlr.state = VlRecordState.ACTIVE
+ self.vlr_uptime_tasks[vlr.id] = self._loop.create_task(self.vlr_uptime_update(vlr))
+
+
+ def vlr_uptime_update(self, vlr):
+ try:
+
+ vlr_ = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict({'id': vlr.id})
+ while True:
+ vlr_.uptime = int(time.time()) - vlr._create_time
+ yield from self._vlr_handler.update(None, VirtualLinkRecord.vlr_xpath(vlr), vlr_)
+ yield from asyncio.sleep(2, loop=self._loop)
+ except asyncio.CancelledError:
+ self._log.debug("Received cancellation request for vlr_uptime_update task")
+ yield from self._vlr_handler.delete(None, VirtualLinkRecord.vlr_xpath(vlr))
+
@asyncio.coroutine
def create(self, config_xact):
""" This function creates VLs for every VLD in the NSD
associated with this NSR"""
for vld in self.nsd_msg.vld:
+
self._log.debug("Found vld %s in nsr id %s", vld, self.id)
cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
for cloud_account,om_datacenter in cloud_account_list:
for vlr in self.vlrs:
yield from self.nsm_plugin.terminate_vl(vlr)
vlr.state = VlRecordState.TERMINATED
+ if vlr.id in self.vlr_uptime_tasks:
+ self.vlr_uptime_tasks[vlr.id].cancel()
self._log.debug("Terminating network service id %s", self.id)
nsr.config_status = self.map_config_status()
nsr.config_status_details = self._config_status_details
nsr.create_time = self._create_time
+ nsr.uptime = int(time.time()) - self._create_time
for cfg_prim in self.nsd_msg.service_primitive:
cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
nsr_msg,
sdn_account_name,
key_pairs,
- restart_mode=restart_mode
+ restart_mode=restart_mode,
+ vlr_handler=self._ro_plugin_selector._records_publisher._vlr_pub_hdlr
)
self._nsrs[nsr_msg.id] = nsr
nsm_plugin.create_nsr(nsr_msg, nsr_msg.nsd, key_pairs)
def create_virtual_compute(self, req_params):
#rc, rsp = self._rwcal.get_vdu_list(self._account)
self._log.debug("Calling get_vdu_list API")
+
rc, rsp = yield from self._loop.run_in_executor(self._executor,
self._rwcal.get_vdu_list,
self._account)
params = RwcalYang.VDUInitParams()
params.from_dict(req_params.as_dict())
- image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
- params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
+ if 'image_name' in req_params:
+ image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
+ params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
#rc, rs = self._rwcal.create_vdu(self._account, params)
self._log.debug("Calling create_vdu API with params %s" %(str(params)))
class Resource(object):
- def __init__(self, resource_id, resource_type):
+ def __init__(self, resource_id, resource_type, request):
self._id = resource_id
self._type = resource_type
+ self._request = request
@property
def resource_id(self):
def resource_type(self):
return self._type
+ @property
+ def request(self):
+ return self._request
+
def cleanup(self):
pass
class ComputeResource(Resource):
- def __init__(self, resource_id, resource_type):
- super(ComputeResource, self).__init__(resource_id, resource_type)
+ pass
class NetworkResource(Resource):
- def __init__(self, resource_id, resource_type):
- super(NetworkResource, self).__init__(resource_id, resource_type)
+ pass
class ResourcePoolInfo(object):
if resource_id in self._all_resources:
self._log.error("Resource with id %s name %s of type %s is already used", resource_id, request.name, resource_type)
raise ResMgrNoResourcesAvailable("Resource with name %s of type network is already used" %(resource_id))
- resource = self._resource_class(resource_id, resource_type)
+ resource = self._resource_class(resource_id, resource_type, request)
self._all_resources[resource_id] = resource
self._allocated_resources[resource_id] = resource
self._log.info("Successfully allocated virtual-network resource from CAL with resource-id: %s", resource_id)
def allocate_dynamic_resource(self, request):
#request.flavor_id = yield from self.select_resource_flavor(request)
resource_id = yield from self._cal.create_virtual_compute(request)
- resource = self._resource_class(resource_id, 'dynamic')
+ resource = self._resource_class(resource_id, 'dynamic', request)
self._all_resources[resource_id] = resource
self._allocated_resources[resource_id] = resource
self._log.info("Successfully allocated virtual-compute resource from CAL with resource-id: %s", resource_id)
@asyncio.coroutine
def get_resource_info(self, resource):
info = yield from self._cal.get_virtual_compute_info(resource.resource_id)
+
self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
resource.resource_id, str(info))
response = RwResourceMgrYang.VDUEventData_ResourceInfo()
return info
def _get_resource_state(self, resource_info, requested_params):
+
+
+ def conn_pts_len_equal():
+ # if explicit mgmt network is defined then the allocated ports might
+ # one more than the expected.
+ allocated_ports = len(resource_info.connection_points)
+ requested_ports = len(requested_params.connection_points)
+
+ if not requested_params.mgmt_network:
+ allocated_ports -= 1
+
+ return allocated_ports == requested_ports
+
if resource_info.state == 'failed':
self._log.error("<Compute-Resource: %s> Reached failed state.",
resource_info.name)
resource_info.name, requested_params)
return 'pending'
- if(len(requested_params.connection_points) !=
- len(resource_info.connection_points)):
+ if not conn_pts_len_equal():
self._log.warning("<Compute-Resource: %s> Waiting for requested number of ports to be assigned to virtual-compute, requested: %d, assigned: %d",
resource_info.name,
len(requested_params.connection_points),
return r_info
self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
- new_resource = pool._resource_class(r_id, 'dynamic')
+ new_resource = pool._resource_class(r_id, 'dynamic', request)
if resource_type == 'compute':
requested_params = RwcalYang.VDUInitParams()
requested_params.from_dict(request.as_dict())
def monitor_vdu_state(response_xpath, pathentry):
self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
- loop_cnt = 180
+ time_to_wait = 300
+ sleep_time = 2
+ loop_cnt = int(time_to_wait/sleep_time)
for i in range(loop_cnt):
- self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 1 second", response_xpath)
- yield from asyncio.sleep(1, loop = self._loop)
+ self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 2 second", response_xpath)
+ yield from asyncio.sleep(2, loop = self._loop)
try:
response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
except Exception as e:
return
else:
### End of loop. This is only possible if VDU did not reach active state
- err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, loop_cnt)
+ err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, time_to_wait)
self._log.info(err_msg)
response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
response_info.resource_state = 'failed'
import rift.tasklets
import rift.package.store
import rift.package.cloud_init
+import rift.mano.dts as mano_dts
class VMResourceError(Exception):
vdud,
vnfr,
mgmt_intf,
+ mgmt_network,
cloud_account_name,
vnfd_package_store,
vdur_id=None,
self._mgmt_intf = mgmt_intf
self._cloud_account_name = cloud_account_name
self._vnfd_package_store = vnfd_package_store
+ self._mgmt_network = mgmt_network
self._vdur_id = vdur_id or str(uuid.uuid4())
self._int_intf = []
@property
def image_name(self):
""" name that should be used to lookup the image on the CMP """
+ if 'image' not in self._vdud:
+ return None
return os.path.basename(self._vdud.image)
@property
"vswitch_epa",
"hypervisor_epa",
"host_epa",
+ "volumes",
"name"]
vdu_copy_dict = {k: v for k, v in
self._vdud.as_dict().items() if k in vdu_fields}
}
if self.vm_resp is not None:
vdur_dict.update({"vim_id": self.vm_resp.vdu_id,
- "flavor_id": self.vm_resp.flavor_id,
- "image_id": self.vm_resp.image_id,
+ "flavor_id": self.vm_resp.flavor_id
})
+ if self._vm_resp.has_field('image_id'):
+ vdur_dict.update({ "image_id": self.vm_resp.image_id })
if self.management_ip is not None:
vdur_dict["management_ip"] = self.management_ip
vdur_dict.update(vdu_copy_dict)
+ if self.vm_resp is not None:
+ if self._vm_resp.has_field('volumes'):
+ for opvolume in self._vm_resp.volumes:
+ vdurvol_data = [vduvol for vduvol in vdur_dict['volumes'] if vduvol['name'] == opvolume.name]
+ if len(vdurvol_data) == 1:
+ vdurvol_data[0]["volume_id"] = opvolume.volume_id
+
icp_list = []
ii_list = []
placement_groups = []
for group in self._placement_groups:
placement_groups.append(group.as_dict())
-
vdur_dict['placement_groups_info'] = placement_groups
+
return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
@property
vm_create_msg_dict = {
"name": self.name,
- "image_name": self.image_name,
}
+ if self.image_name is not None:
+ vm_create_msg_dict["image_name"] = self.image_name
+
if self.image_checksum is not None:
vm_create_msg_dict["image_checksum"] = self.image_checksum
if config is not None:
vm_create_msg_dict['vdu_init'] = {'userdata': config}
+ if self._mgmt_network:
+ vm_create_msg_dict['mgmt_network'] = self._mgmt_network
+
cp_list = []
for intf, cp, vlr in self._ext_intf:
cp_info = {"name": cp,
msg.event_id = self._request_id
msg.cloud_account = self.cloud_account_name
msg.request_info.from_dict(vm_create_msg_dict)
+
+ for volume in self._vdud.volumes:
+ v = msg.request_info.volumes.add()
+ v.from_dict(volume.as_dict())
return msg
@asyncio.coroutine
class VirtualNetworkFunctionRecord(object):
""" Virtual Network Function Record """
- def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg):
+ def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg, mgmt_network=None):
self._dts = dts
self._log = log
self._loop = loop
self._cluster_name = cluster_name
self._vnfr_msg = vnfr_msg
self._vnfr_id = vnfr_msg.id
- self._vnfd_id = vnfr_msg.vnfd_ref
+ self._vnfd_id = vnfr_msg.vnfd.id
self._vnfm = vnfm
self._vcs_handler = vcs_handler
self._vnfr = vnfr_msg
+ self._mgmt_network = mgmt_network
- self._vnfd = None
+ self._vnfd = vnfr_msg.vnfd
self._state = VirtualNetworkFunctionRecordState.INIT
self._state_failed_reason = None
self._ext_vlrs = {} # The list of external virtual links
self._vnf_mon = None
self._config_status = vnfr_msg.config_status
self._vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(self._log)
+ self._rw_vnfd = None
+ self._vnfd_ref_count = 0
def _get_vdur_from_vdu_id(self, vdu_id):
self._log.debug("Finding vdur for vdu_id %s", vdu_id)
"FAILED": "failed", }
return op_status_map[self._state.name]
- @property
- def vnfd_xpath(self):
+ @staticmethod
+ def vnfd_xpath(vnfd_id):
""" VNFD xpath associated with this VNFR """
- return("C,/vnfd:vnfd-catalog/"
- "vnfd:vnfd[vnfd:id = '{}']".format(self._vnfd_id))
+ return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+
+ @property
+ def vnfd_ref_count(self):
+ """ Returns the VNFD reference count associated with this VNFR """
+ return self._vnfd_ref_count
+
+ def vnfd_in_use(self):
+ """ Returns whether vnfd is in use or not """
+ return True if self._vnfd_ref_count > 0 else False
+
+ def vnfd_ref(self):
+ """ Take a reference on this object """
+ self._vnfd_ref_count += 1
+ return self._vnfd_ref_count
+
+ def vnfd_unref(self):
+ """ Release reference on this object """
+ if self._vnfd_ref_count < 1:
+ msg = ("Unref on a VNFD object - vnfd id %s, vnfd_ref_count = %s" %
+ (self.vnfd.id, self._vnfd_ref_count))
+ self._log.critical(msg)
+ raise VnfRecordError(msg)
+ self._log.debug("Releasing ref on VNFD %s - curr vnfd_ref_count:%s",
+ self.vnfd.id, self._vnfd_ref_count)
+ self._vnfd_ref_count -= 1
+ return self._vnfd_ref_count
@property
def vnfd(self):
def mgmt_intf_info(self):
""" Get Management interface info for this VNFR """
- mgmt_intf_desc = self.vnfd.msg.mgmt_interface
+ mgmt_intf_desc = self.vnfd.mgmt_interface
ip_addr = None
if mgmt_intf_desc.has_field("cp"):
ip_addr = self.cp_ip_addr(mgmt_intf_desc.cp)
def msg(self):
""" Message associated with this VNFR """
vnfd_fields = ["short_name", "vendor", "description", "version"]
- vnfd_copy_dict = {k: v for k, v in self.vnfd.msg.as_dict().items() if k in vnfd_fields}
+ vnfd_copy_dict = {k: v for k, v in self.vnfd.as_dict().items() if k in vnfd_fields}
mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
ip_address, port = self.mgmt_intf_info()
"nsr_id_ref": self._vnfr_msg.nsr_id_ref,
"name": self.name,
"member_vnf_index_ref": self.member_vnf_index,
- "vnfd_ref": self.vnfd_id,
"operational_status": self.operational_status,
"operational_status_details": self._state_failed_reason,
"cloud_account": self.cloud_account_name,
vnfr_dict.update(vnfd_copy_dict)
vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr_msg.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
+
+ vnfr_msg.create_time = self._create_time
+ vnfr_msg.uptime = int(time.time()) - self._create_time
vnfr_msg.mgmt_interface = mgmt_intf
# Add all the VLRs to VNFR
vdur = vnfr_msg.vdur.add()
vdur.from_dict(vdu.msg.as_dict())
- if self.vnfd.msg.mgmt_interface.has_field('dashboard_params'):
+ if self.vnfd.mgmt_interface.has_field('dashboard_params'):
vnfr_msg.dashboard_url = self.dashboard_url
for cpr in self._cprs:
ip, cfg_port = self.mgmt_intf_info()
protocol = 'http'
http_port = 80
- if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('https'):
- if self.vnfd.msg.mgmt_interface.dashboard_params.https is True:
+ if self.vnfd.mgmt_interface.dashboard_params.has_field('https'):
+ if self.vnfd.mgmt_interface.dashboard_params.https is True:
protocol = 'https'
http_port = 443
- if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('port'):
- http_port = self.vnfd.msg.mgmt_interface.dashboard_params.port
+ if self.vnfd.mgmt_interface.dashboard_params.has_field('port'):
+ http_port = self.vnfd.mgmt_interface.dashboard_params.port
url = "{protocol}://{ip_address}:{port}/{path}".format(
protocol=protocol,
ip_address=ip,
port=http_port,
- path=self.vnfd.msg.mgmt_interface.dashboard_params.path.lstrip("/"),
+ path=self.vnfd.mgmt_interface.dashboard_params.path.lstrip("/"),
)
return url
""" Publish The VLs associated with this VNF """
self._log.debug("Publishing Internal Virtual Links for vnfd id: %s",
self.vnfd_id)
- for ivld_msg in self.vnfd.msg.internal_vld:
+ for ivld_msg in self.vnfd.internal_vld:
self._log.debug("Creating internal vld:"
" %s, int_cp_ref = %s",
ivld_msg, ivld_msg.internal_connection_point
nsr_config = yield from self.get_nsr_config()
### Step-3: Get VDU level placement groups
- for group in self.vnfd.msg.placement_groups:
+ for group in self.vnfd.placement_groups:
for member_vdu in group.member_vdus:
if member_vdu.member_vdu_ref == vdu.id:
group_info = self.resolve_placement_group_cloud_construct(group,
self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id)
- for vdu in self.vnfd.msg.vdu:
+ for vdu in self._rw_vnfd.vdu:
self._log.debug("Creating vdu: %s", vdu)
vdur_id = get_vdur_id(vdu)
vdud=vdu,
vnfr=vnfr,
mgmt_intf=self.has_mgmt_interface(vdu),
+ mgmt_network=self._mgmt_network,
cloud_account_name=self.cloud_account_name,
vnfd_package_store=self._vnfd_package_store,
vdur_id=vdur_id,
datastore.add(vdu)
# Substitute any variables contained in the cloud config script
- config = str(vdu.vdud_cloud_init)
+ config = str(vdu.vdud_cloud_init) if vdu.vdud_cloud_init is not None else ""
parts = re.split("\{\{ ([^\}]+) \}\}", config)
if len(parts) > 1:
def has_mgmt_interface(self, vdu):
# ## TODO: Support additional mgmt_interface type options
- if self.vnfd.msg.mgmt_interface.vdu_id == vdu.id:
+ if self.vnfd.mgmt_interface.vdu_id == vdu.id:
return True
return False
""" Publish the inventory associated with this VNF """
self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id)
- for component in self.vnfd.msg.component:
+ for component in self._rw_vnfd.component:
self._log.debug("Creating inventory component %s", component)
mangled_name = VcsComponent.mangle_name(component.component_name,
self.vnf_name,
def instantiate(self, xact, restart_mode=False):
""" instantiate this VNF """
self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
+ self._rw_vnfd = yield from self._vnfm.fetch_vnfd(self._vnfd_id)
@asyncio.coroutine
def fetch_vlrs():
cpr.vlr_ref = cp.vlr_ref
self._log.debug("Fetched VLR [%s] with path = [%s]", d, vlr_path)
- # Fetch the VNFD associated with the VNFR
- self._log.debug("VNFR-ID %s: Fetching vnfds", self._vnfr_id)
- self._vnfd = yield from self._vnfm.get_vnfd_ref(self._vnfd_id)
- self._log.debug("VNFR-ID %s: Fetched vnfd:%s", self._vnfr_id, self._vnfd)
+ # Increase the VNFD reference count
+ self.vnfd_ref()
- assert self.vnfd is not None
+ assert self.vnfd
# Fetch External VLRs
self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id)
self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id)
+ # create task updating uptime for this vnfr
+ self._log.debug("VNFR-ID %s: Starting task to update uptime", self._vnfr_id)
+ self._loop.create_task(self.vnfr_uptime_update(xact))
+
@asyncio.coroutine
def terminate(self, xact):
""" Terminate this virtual network function """
self._log.debug("Terminated VNF id %s", self.vnfr_id)
self.set_state(VirtualNetworkFunctionRecordState.TERMINATED)
+ @asyncio.coroutine
+ def vnfr_uptime_update(self, xact):
+ while True:
+ # Return when vnfr state is FAILED or TERMINATED etc
+ if self._state not in [VirtualNetworkFunctionRecordState.INIT,
+ VirtualNetworkFunctionRecordState.VL_INIT_PHASE,
+ VirtualNetworkFunctionRecordState.VM_INIT_PHASE,
+ VirtualNetworkFunctionRecordState.READY]:
+ return
+ yield from self.publish(xact)
+ yield from asyncio.sleep(2, loop=self._loop)
+
+
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
xact, action, scratch)
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
- # Create/Update a VNFD record
- for cfg in self._regh.get_xact_elements(xact):
- # Only interested in those VNFD cfgs whose ID was received in prepare callback
- if cfg.id in scratch.get('vnfds', []) or is_recovery:
- self._vnfm.update_vnfd(cfg)
-
- scratch.pop('vnfds', None)
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
- # Handle deletes in prepare_callback, but adds/updates in apply_callback
+ # Handle deletes in prepare_callback
if fref.is_field_deleted():
# Delete an VNFD record
self._log.debug("Deleting VNFD with id %s", msg.id)
raise VirtualNetworkFunctionDescriptorRefCountExists(err)
# Delete a VNFD record
yield from self._vnfm.delete_vnfd(msg.id)
- else:
- # Handle actual adds/updates in apply_callback,
- # just check if VNFD in use in prepare_callback
- if self._vnfm.vnfd_in_use(msg.id):
- self._log.debug("Cannot modify an VNFD in use - %s", msg)
- err = "Cannot modify an VNFD in use - %s" % msg
- raise VirtualNetworkFunctionDescriptorRefCountExists(err)
-
- # Add this VNFD to scratch to create/update in apply callback
- vnfds = scratch.setdefault('vnfds', [])
- vnfds.append(msg.id)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
)
if action == rwdts.QueryAction.CREATE:
- if not msg.has_field("vnfd_ref"):
- err = "Vnfd reference not provided"
+ if not msg.has_field("vnfd"):
+ err = "Vnfd not provided"
self._log.error(err)
raise VnfRecordError(err)
try:
yield from vnfr.terminate(xact_info.xact)
# Unref the VNFD
- vnfr.vnfd.unref()
+ vnfr.vnfd_unref()
yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr)
except Exception as e:
self._log.exception(e)
self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
-class VirtualNetworkFunctionDescriptor(object):
- """
- Virtual Network Function descriptor class
- """
-
- def __init__(self, dts, log, loop, vnfm, vnfd):
- self._dts = dts
- self._log = log
- self._loop = loop
-
- self._vnfm = vnfm
- self._vnfd = vnfd
- self._ref_count = 0
-
- @property
- def ref_count(self):
- """ Returns the reference count associated with
- this Virtual Network Function Descriptor"""
- return self._ref_count
-
- @property
- def id(self):
- """ Returns vnfd id """
- return self._vnfd.id
-
- @property
- def name(self):
- """ Returns vnfd name """
- return self._vnfd.name
-
- def in_use(self):
- """ Returns whether vnfd is in use or not """
- return True if self._ref_count > 0 else False
-
- def ref(self):
- """ Take a reference on this object """
- self._ref_count += 1
- return self._ref_count
-
- def unref(self):
- """ Release reference on this object """
- if self.ref_count < 1:
- msg = ("Unref on a VNFD object - vnfd id %s, ref_count = %s" %
- (self.id, self._ref_count))
- self._log.critical(msg)
- raise VnfRecordError(msg)
- self._log.debug("Releasing ref on VNFD %s - curr ref_count:%s",
- self.id, self.ref_count)
- self._ref_count -= 1
- return self._ref_count
-
- @property
- def msg(self):
- """ Return the message associated with this NetworkServiceDescriptor"""
- return self._vnfd
-
- @staticmethod
- def path_for_id(vnfd_id):
- """ Return path for the passed vnfd_id"""
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
-
- def path(self):
- """ Return the path associated with this NetworkServiceDescriptor"""
- return VirtualNetworkFunctionDescriptor.path_for_id(self.id)
-
- def update(self, vnfd):
- """ Update the Virtual Network Function Descriptor """
- if self.in_use():
- self._log.error("Cannot update descriptor %s in use refcnt=%d",
- self.id, self.ref_count)
-
- # The following loop is added to debug RIFT-13284
- for vnf_rec in self._vnfm._vnfrs.values():
- if vnf_rec.vnfd_id == self.id:
- self._log.error("descriptor %s in used by %s:%s",
- self.id, vnf_rec.vnfr_id, vnf_rec.msg)
- raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot update descriptor in use %s" % self.id)
- self._vnfd = vnfd
-
- def delete(self):
- """ Delete the Virtual Network Function Descriptor """
- if self.in_use():
- self._log.error("Cannot delete descriptor %s in use refcnt=%d",
- self.id)
-
- # The following loop is added to debug RIFT-13284
- for vnf_rec in self._vnfm._vnfrs.values():
- if vnf_rec.vnfd_id == self.id:
- self._log.error("descriptor %s in used by %s:%s",
- self.id, vnf_rec.vnfr_id, vnf_rec.msg)
- raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot delete descriptor in use %s" % self.id)
- self._vnfm.delete_vnfd(self.id)
-
-
class VnfdRefCountDtsHandler(object):
""" The VNFD Ref Count DTS handler """
XPATH = "D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count"
self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
+ self._vnfr_ref_handler = VnfdRefCountDtsHandler(dts, log, loop, self)
+ self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(log, dts, loop, callback=self.handle_nsr)
self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
self._vnfr_handler,
self._vcs_handler,
- VnfdRefCountDtsHandler(dts, log, loop, self)]
+ self._vnfr_ref_handler,
+ self._nsr_handler]
self._vnfrs = {}
- self._vnfds = {}
+ self._vnfds_to_vnfr = {}
+ self._nsrs = {}
@property
def vnfr_handler(self):
self._log.debug("Run VNFManager - registering static DTS handlers""")
yield from self.register()
+ def handle_nsr(self, nsr, action):
+ if action in [rwdts.QueryAction.CREATE]:
+ self._nsrs[nsr.id] = nsr
+ elif action == rwdts.QueryAction.DELETE:
+ if nsr.id in self._nsrs:
+ del self._nsrs[nsr.id]
+
+ def get_linked_mgmt_network(self, vnfr):
+ """For the given VNFR get the related mgmt network from the NSD, if
+ available.
+ """
+ vnfd_id = vnfr.vnfd.id
+ nsr_id = vnfr.nsr_id_ref
+
+ # for the given related VNFR, get the corresponding NSR-config
+ nsr_obj = None
+ try:
+ nsr_obj = self._nsrs[nsr_id]
+ except KeyError:
+ raise("Unable to find the NS with the ID: {}".format(nsr_id))
+
+ # for the related NSD check if a VLD exists such that it's a mgmt
+ # network
+ for vld in nsr_obj.nsd.vld:
+ if vld.mgmt_network:
+ return vld.name
+
+ return None
+
def get_vnfr(self, vnfr_id):
""" get VNFR by vnfr id """
self._log.info("Create VirtualNetworkFunctionRecord %s from vnfd_id: %s",
vnfr.id,
- vnfr.vnfd_ref)
+ vnfr.vnfd.id)
+
+ mgmt_network = self.get_linked_mgmt_network(vnfr)
self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord(
- self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr
+ self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr,
+ mgmt_network=mgmt_network
)
+
+ #Update ref count
+ if vnfr.vnfd.id in self._vnfds_to_vnfr:
+ self._vnfds_to_vnfr[vnfr.vnfd.id] += 1
+ else:
+ self._vnfds_to_vnfr[vnfr.vnfd.id] = 1
+
return self._vnfrs[vnfr.id]
@asyncio.coroutine
if vnfr.vnfr_id in self._vnfrs:
self._log.debug("Deleting VNFR id %s", vnfr.vnfr_id)
yield from self._vnfr_handler.delete(xact, vnfr.xpath)
+
+ if vnfr.vnfd.id in self._vnfds_to_vnfr:
+ if self._vnfds_to_vnfr[vnfr.vnfd.id]:
+ self._vnfds_to_vnfr[vnfr.vnfd.id] -= 1
+
del self._vnfrs[vnfr.vnfr_id]
@asyncio.coroutine
def fetch_vnfd(self, vnfd_id):
""" Fetch VNFDs based with the vnfd id"""
- vnfd_path = VirtualNetworkFunctionDescriptor.path_for_id(vnfd_id)
+ vnfd_path = VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id)
self._log.debug("Fetch vnfd with path %s", vnfd_path)
vnfd = None
return vnfd
- @asyncio.coroutine
- def get_vnfd_ref(self, vnfd_id):
- """ Get Virtual Network Function descriptor for the passed vnfd_id"""
- vnfd = yield from self.get_vnfd(vnfd_id)
- vnfd.ref()
- return vnfd
-
- @asyncio.coroutine
- def get_vnfd(self, vnfd_id):
- """ Get Virtual Network Function descriptor for the passed vnfd_id"""
- vnfd = None
- if vnfd_id not in self._vnfds:
- self._log.error("Cannot find VNFD id:%s", vnfd_id)
- vnfd = yield from self.fetch_vnfd(vnfd_id)
-
- if vnfd is None:
- self._log.error("Cannot find VNFD id:%s", vnfd_id)
- raise VirtualNetworkFunctionDescriptorError("Cannot find VNFD id:%s", vnfd_id)
-
- if vnfd.id != vnfd_id:
- self._log.error("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
- raise VirtualNetworkFunctionDescriptorError("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
-
- if vnfd.id not in self._vnfds:
- self.create_vnfd(vnfd)
-
- return self._vnfds[vnfd_id]
-
def vnfd_in_use(self, vnfd_id):
""" Is this VNFD in use """
self._log.debug("Is this VNFD in use - msg:%s", vnfd_id)
- if vnfd_id in self._vnfds:
- return self._vnfds[vnfd_id].in_use()
+ if vnfd_id in self._vnfds_to_vnfr:
+ return (self._vnfds_to_vnfr[vnfd_id] > 0)
return False
@asyncio.coroutine
path, msg)
yield from self.vnfr_handler.update(xact, path, msg)
- def create_vnfd(self, vnfd):
- """ Create a virtual network function descriptor """
- self._log.debug("Create virtual networkfunction descriptor - %s", vnfd)
- if vnfd.id in self._vnfds:
- self._log.error("Cannot create VNFD %s -VNFD id already exists", vnfd)
- raise VirtualNetworkFunctionDescriptorError("VNFD already exists-%s", vnfd.id)
-
- self._vnfds[vnfd.id] = VirtualNetworkFunctionDescriptor(self._dts,
- self._log,
- self._loop,
- self,
- vnfd)
- return self._vnfds[vnfd.id]
-
- def update_vnfd(self, vnfd):
- """ update the Virtual Network Function descriptor """
- self._log.debug("Update virtual network function descriptor - %s", vnfd)
-
- if vnfd.id not in self._vnfds:
- self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
- self.create_vnfd(vnfd)
- else:
- self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd)
- self._vnfds[vnfd.id].update(vnfd)
-
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
""" Delete the Virtual Network Function descriptor with the passed id """
self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id)
- if vnfd_id not in self._vnfds:
- self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id)
- raise VirtualNetworkFunctionDescriptorNotFound("Cannot find %s", vnfd_id)
-
- if self._vnfds[vnfd_id].in_use():
- self._log.debug("Cannot delete VNFD id %s reference exists %s",
- vnfd_id,
- self._vnfds[vnfd_id].ref_count)
- raise VirtualNetworkFunctionDescriptorRefCountExists(
- "Cannot delete :%s, ref_count:%s",
- vnfd_id,
- self._vnfds[vnfd_id].ref_count)
+ if vnfd_id in self._vnfds_to_vnfr:
+ if self._vnfds_to_vnfr[vnfd_id]:
+ self._log.debug("Cannot delete VNFD id %s reference exists %s",
+ vnfd_id,
+ self._vnfds_to_vnfr[vnfd_id].vnfd_ref_count)
+ raise VirtualNetworkFunctionDescriptorRefCountExists(
+ "Cannot delete :%s, ref_count:%s",
+ vnfd_id,
+ self._vnfds_to_vnfr[vnfd_id].vnfd_ref_count)
+
+ del self._vnfds_to_vnfr[vnfd_id]
# Remove any files uploaded with VNFD and stored under $RIFT_ARTIFACTS/libs/<id>
try:
shutil.rmtree(vnfd_dir, ignore_errors=True)
except Exception as e:
self._log.error("Exception in cleaning up VNFD {}: {}".
- format(self._vnfds[vnfd_id].name, e))
+ format(self._vnfds_to_vnfr[vnfd_id].vnfd.name, e))
self._log.exception(e)
- del self._vnfds[vnfd_id]
def vnfd_refcount_xpath(self, vnfd_id):
""" xpath for ref count entry """
""" Get the vnfd_list from this VNFM"""
vnfd_list = []
if vnfd_id is None or vnfd_id == "":
- for vnfd in self._vnfds.values():
+ for vnfd in self._vnfds_to_vnfr.keys():
+ vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg.vnfd_id_ref = vnfd
+ vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd]
+ vnfd_list.append((self.vnfd_refcount_xpath(vnfd), vnfd_msg))
+ elif vnfd_id in self._vnfds_to_vnfr:
vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
- vnfd_msg.vnfd_id_ref = vnfd.id
- vnfd_msg.instance_ref_count = vnfd.ref_count
- vnfd_list.append((self.vnfd_refcount_xpath(vnfd.id), vnfd_msg))
- elif vnfd_id in self._vnfds:
- vnfd_msg.vnfd_id_ref = self._vnfds[vnfd_id].id
- vnfd_msg.instance_ref_count = self._vnfds[vnfd_id].ref_count
+ vnfd_msg.vnfd_id_ref = vnfd_id
+ vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd_id]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
return vnfd_list
if vnfr.id not in const_vnfr_ids:
continue
- vnfd = get_vnfd(vnfr.vnfd_ref)
+ vnfd = get_vnfd(vnfr.vnfd.id)
yield vnfd, vnfr
if vnfr.id not in const_vnfr_ids:
continue
- vnfd = get_vnfd(vnfr.vnfd_ref)
+ vnfd = get_vnfd(vnfr.vnfd.id)
yield vnfd, vnfr
def check_configuration_on_standby(standby_ip):