rw.core.rwvx-rwdts=${PLATFORM_VERSION} \
rw.automation.core-RWAUTO=${PLATFORM_VERSION} \
rw.core.rwvx-rwha-1.0=${PLATFORM_VERSION}
+
+ sudo apt-get install python-cinderclient
sudo chmod 777 /usr/rift /usr/rift/usr/share
require_version('RwcalYang', '1.0')
require_version('RwTypes', '1.0')
require_version('RwCloudYang', '1.0')
+require_version('RwCal', '1.0')
from gi.repository import (
RwTypes,
return openmano
-def rift2openmano_vnfd(rift_vnfd):
+def rift2openmano_vnfd(rift_vnfd, rift_nsd):
openmano_vnf = {"vnf":{}}
vnf = openmano_vnf["vnf"]
raise ValueError("Internal connection point reference %s not found" % cp_ref_id)
- def rift2openmano_if_type(rift_type):
+ def rift2openmano_if_type(ext_if):
+
+ cp_ref_name = ext_if.vnfd_connection_point_ref
+ for vld in rift_nsd.vlds:
+
+ # if it is an explicit mgmt_network then check if the given
+ # cp_ref is a part of it
+ if not vld.mgmt_network:
+ continue
+
+ for vld_cp in vld.vnfd_connection_point_ref:
+ if vld_cp.vnfd_connection_point_ref == cp_ref_name:
+ return "mgmt"
+
+
+ rift_type = ext_if.virtual_interface.type_yang
+ # Retaining it for backward compatibility!
if rift_type == "OM_MGMT":
return "mgmt"
elif rift_type == "VIRTIO" or rift_type == "E1000":
vdu, ext_if = find_vdu_and_ext_if_by_cp_ref(cp.name)
connection = {
"name": cp.name,
- "type": rift2openmano_if_type(ext_if.virtual_interface.type_yang),
+ "type": rift2openmano_if_type(ext_if),
"VNFC": vdu.name,
"local_iface_name": ext_if.name,
"description": "%s iface on VDU %s" % (ext_if.name, vdu.name),
"Derived from earlier versions of base YANG files";
}
+ typedef meta-data-type {
+ type enumeration {
+ enum STRING;
+ enum JSON;
+ }
+ }
+
typedef parameter-data-type {
type enumeration {
enum STRING;
}
+ grouping image-properties {
+ leaf image {
+ description
+ "Image name for the software image.
+ If the image name is found within the VNF packaage it will
+ be uploaded to all cloud accounts during onboarding process.
+ Otherwise, the image must be added to the cloud account with
+ the same name as entered here.
+ ";
+ type string;
+ }
+
+ leaf image-checksum {
+ description
+ "Image md5sum for the software image.
+ The md5sum, if provided, along with the image name uniquely
+ identifies an image uploaded to the CAL.
+ ";
+ type string;
+ }
+ }
+
grouping vnf-configuration {
container vnf-configuration {
rwpb:msg-new VnfConfiguration;
uses ip-profile-info;
}
}
-
+
+ grouping custom-config-files {
+ description "Grouping for files needed to be mounted into an additional drive";
+ list custom-config-files {
+ description
+ "List of configuration files to be written on an additional drive";
+ key "source";
+ leaf source {
+ description "Name of the configuration file";
+ type string;
+ }
+ leaf dest {
+ description "Full path of the destination in the guest";
+ type string;
+ }
+ }
+ }
+
+ grouping custom-meta-data {
+ description "Grouping for instance-specific meta data";
+ list custom-meta-data {
+ description
+ "List of meta-data to be associated with the instance";
+ key "name";
+ leaf name {
+ description "Name of the meta-data parameter";
+ type string;
+ }
+
+ leaf data-type {
+ description "Data-type the meta-data parameter";
+ type manotypes:meta-data-type;
+ default "STRING";
+ }
+
+ leaf value {
+ description "Value of the meta-data parameter";
+ type string;
+ }
+ }
+ }
+
+ grouping custom-boot-data {
+ description "Grouping for custom vim data";
+ container custom-boot-data {
+ uses manotypes:custom-config-files;
+ uses manotypes:custom-meta-data;
+ leaf custom-drive {
+ description "Some VIMs implement custom drives to host custom-files or meta-data";
+ type boolean;
+ default false;
+ }
+ }
+ }
+
+ grouping volume-info {
+ description "Grouping for Volume-info";
+
+ leaf description {
+ description "Description for Volume";
+ type string;
+ }
+
+ leaf size {
+ description "Size of disk in GB";
+ type uint64;
+ }
+
+ choice volume-source {
+ description
+ "Defines the source of the volume. Possible options are
+ 1. Ephemeral -- Empty disk
+ 2. Image -- Refer to image to be used for volume
+ 3. Volume -- Reference of pre-existing volume to be used
+ ";
+
+ case ephemeral {
+ leaf ephemeral {
+ type empty;
+ }
+ }
+
+ case image {
+ uses image-properties;
+ }
+
+ case volume {
+ leaf volume-ref {
+ description "Reference for pre-existing volume in VIM";
+ type string;
+ }
+ }
+ }
+
+ container boot-params {
+ leaf boot-volume {
+ description "This flag indicates if this is boot volume or not";
+ type boolean;
+ }
+ leaf boot-priority {
+ description "Boot priority associated with volume";
+ type int32;
+ }
+ }
+
+ container guest-params {
+ description "Guest virtualization parameter associated with volume";
+
+ leaf device_bus {
+ description "Type of disk-bus on which this disk is exposed to guest";
+ type enumeration {
+ enum ide;
+ enum usb;
+ enum virtio;
+ enum scsi;
+ }
+ }
+
+ leaf device_type {
+ description "The type of device as exposed to guest";
+ type enumeration {
+ enum disk;
+ enum cdrom;
+ enum floppy;
+ enum lun;
+ }
+ }
+
+ uses custom-meta-data;
+ }
+ }
}
// replicate for pnfd container here
uses manotypes:provider-network;
+ leaf mgmt-network {
+ description "Flag indicating whether this network is a VIM management network";
+ type boolean;
+ default false;
+ }
+
choice init-params {
description "Extra parameters for VLD instantiation";
type string;
}
}
+
case vim-network-profile {
leaf ip-profile-ref {
description "Named reference to IP-profile object";
type string;
}
- }
+ }
+
}
}
type uint32;
}
+ leaf uptime {
+ description
+ "Active period of this Network Service.
+ Uptime is expressed in seconds";
+
+ type uint32;
+ }
+
list connection-point {
description
"List for external connection points.
type uint32;
}
+ leaf uptime {
+ description
+ "Active period of this Virtual Link.
+ Uptime is expressed in seconds";
+
+ type uint32;
+ }
+
leaf network-id {
description
"Identifier for the allocated network resource.";
}
}
- container vnfd-catalog {
-
- description
- "Virtual Network Function Descriptor (VNFD).";
-
- list vnfd {
- key "id";
-
+ grouping vnfd-descriptor {
leaf id {
description "Identifier for the VNFD.";
type string;
uses manotypes:alarm;
}
- leaf image {
- description
- "Image name for the software image.
- If the image name is found within the VNF packaage it will
- be uploaded to all cloud accounts during onboarding process.
- Otherwise, the image must be added to the cloud account with
- the same name as entered here.
- ";
- mandatory true;
- type string;
- }
-
- leaf image-checksum {
- description
- "Image md5sum for the software image.
- The md5sum, if provided, along with the image name uniquely
- identifies an image uploaded to the CAL.
- ";
- type string;
- }
+ uses manotypes:image-properties;
choice cloud-init-input {
description
}
}
+ uses manotypes:custom-boot-data;
+
list internal-connection-point {
key "id";
description
}
uses virtual-interface;
}
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
}
list vdu-dependency {
key "name";
uses manotypes:placement-group-info;
-
+
list member-vdus {
description
}
}
}
- }
+ }
+
+ container vnfd-catalog {
+ description
+ "Virtual Network Function Descriptor (VNFD).";
+
+ list vnfd {
+ key "id";
+
+ uses vnfd-descriptor;
+ }
}
}
type uint32;
}
- leaf vnfd-ref {
- description "Reference to VNFD";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
- }
+ leaf uptime {
+ description
+ "Active period of this Virtual Network Function.
+ Uptime is expressed in seconds";
+
+ type uint32;
+ }
+
+ container vnfd {
+ description "VNF descriptor used to instantiate this VNF";
+ uses vnfd:vnfd-descriptor;
}
// Use parameters provided here to configure this VNF
leaf vdu-id-ref {
type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd"
- + "[vnfd:id = current()/../../vnfr:vnfd-ref]"
- + "/vnfd:vdu/vnfd:id";
+ path "../../vnfd/vdu/id";
}
}
uses manotypes:hypervisor-epa;
uses manotypes:host-epa;
+ uses manotypes:custom-boot-data;
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ leaf volume-id {
+ description "VIM assigned volume id";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
+
list alarms {
description
"A list of the alarms that have been created for this VDU";
from neutronclient.neutron import client as ntclient
from glanceclient.v2 import client as glclient
from ceilometerclient import client as ceilo_client
+from cinderclient.v2 import client as cinder_client
# Exceptions
import novaclient.exceptions as NovaException
import keystoneclient.exceptions as KeystoneExceptions
import neutronclient.common.exceptions as NeutronException
import glanceclient.exc as GlanceException
+import cinderclient.exceptions as CinderException
logger = logging.getLogger('rwcal.openstack.drv')
logger.setLevel(logging.DEBUG)
"""
return self._tenant_name
+ def get_user_domain_name(self):
+ """
+ Returns None as this field does not exist for v2.
+ """
+ return None;
+
+ def get_project_domain_name(self):
+ """
+ Returns None as this field does not exist for v2.
+ """
+ return None;
+
def _get_keystone_connection(self):
"""
Returns object of class python-keystoneclient class
try:
ksconn = self._get_keystone_connection()
service_endpoint = ksconn.service_catalog.url_for(**endpoint_kwargs)
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
+ raise
except Exception as e:
logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
raise
"""
Driver class for keystoneclient V3 APIs
"""
- def __init__(self, username, password, auth_url,tenant_name, insecure):
+ def __init__(self, username, password, auth_url,tenant_name, insecure, user_domain_name = None, project_domain_name = None):
"""
Constructor for KeystoneDriverV3 class
Arguments:
Returns: None
"""
- self._username = username
- self._password = password
- self._auth_url = auth_url
- self._tenant_name = tenant_name
- self._insecure = insecure
+ self._username = username
+ self._password = password
+ self._auth_url = auth_url
+ self._tenant_name = tenant_name
+ self._insecure = insecure
+ self._user_domain_name = user_domain_name
+ self._project_domain_name = project_domain_name
super(KeystoneDriverV3, self).__init__(ksclientv3.Client)
def _get_keystone_credentials(self):
"""
Returns the dictionary of kwargs required to instantiate python-keystoneclient class
"""
- creds = {}
- #creds['user_domain'] = self._domain_name
- creds['username'] = self._username
- creds['password'] = self._password
- creds['auth_url'] = self._auth_url
- creds['project_name'] = self._tenant_name
- creds['insecure'] = self._insecure
+ creds = {}
+ creds['username'] = self._username
+ creds['password'] = self._password
+ creds['auth_url'] = self._auth_url
+ creds['project_name'] = self._tenant_name
+ creds['insecure'] = self._insecure
+ creds['user_domain_name'] = self._user_domain_name
+ creds['project_domain_name'] = self._project_domain_name
return creds
+ def get_user_domain_name(self):
+ """
+ Returns the domain_name of the associated OpenStack user account
+ """
+ return self._user_domain_name;
+
+ def get_project_domain_name(self):
+ """
+ Returns the domain_name of the associated OpenStack project
+ """
+ return self._project_domain_name;
+
def get_auth_token(self):
"""
Returns a valid auth_token
creds['project_id'] = self.ks_drv.get_tenant_name()
creds['auth_token'] = self.ks_drv.get_auth_token()
creds['insecure'] = self.ks_drv.get_security_mode()
+ creds['user_domain_name'] = self.ks_drv.get_user_domain_name()
+ creds['project_domain_name'] = self.ks_drv.get_project_domain_name()
+
return creds
def _get_nova_connection(self):
{
server_name(string) : Name of the VM/Server
flavor_id (string) : UUID of the flavor to be used for VM
- image_id (string) : UUID of the image to be used VM/Server instance
+ image_id (string) : UUID of the image to be used VM/Server instance,
+ This could be None if volumes (with images) are being used
network_list(List) : A List of network_ids. A port will be created in these networks
port_list (List) : A List of port-ids. These ports will be added to VM.
metadata (dict) : A dictionary of arbitrary key-value pairs associated with VM/server
nvconn = self._get_nova_connection()
+
try:
server = nvconn.servers.create(kwargs['name'],
kwargs['image_id'],
kwargs['flavor_id'],
meta = kwargs['metadata'],
- files = None,
+ files = kwargs['files'],
reservation_id = None,
min_count = None,
max_count = None,
userdata = kwargs['userdata'],
security_groups = kwargs['security_groups'],
availability_zone = kwargs['availability_zone'],
- block_device_mapping = None,
+ block_device_mapping_v2 = kwargs['block_device_mapping_v2'],
nics = nics,
scheduler_hints = kwargs['scheduler_hints'],
- config_drive = None)
+ config_drive = kwargs['config_drive'])
except Exception as e:
logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
raise
logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s" %str(e))
raise
+ def volume_list(self, server_id):
+ """
+ List of volumes attached to the server
+
+ Arguments:
+ None
+ Returns:
+ List of dictionary objects where dictionary is representation of class (novaclient.v2.volumes.Volume)
+ """
+ nvconn = self._get_nova_connection()
+ try:
+ volumes = nvconn.volumes.get_server_volumes(server_id=server_id)
+ except Exception as e:
+ logger.error("OpenstackDriver: Get volume information failed. Exception: %s" %str(e))
+ raise
+
+ volume_info = [v.to_dict() for v in volumes]
+ return volume_info
+
+
def group_list(self):
"""
List of Server Affinity and Anti-Affinity Groups
Constructor for NovaDriver
Arguments: KeystoneDriver class object
"""
- super(NovaDriverV21, self).__init__(ks_drv, 'computev21', '2.1')
+ super(NovaDriverV21, self).__init__(ks_drv, 'compute', '2.1')
class GlanceDriver(object):
"""
"""
Driver for openstack nova, neutron, glance, keystone, swift, cinder services
"""
- def __init__(self, username, password, auth_url, tenant_name, mgmt_network = None, cert_validate = False):
+ def __init__(self, username, password, auth_url, tenant_name, mgmt_network = None, cert_validate = False, user_domain_name = None, project_domain_name = None):
"""
OpenstackDriver Driver constructor
Arguments:
"""
insecure = not cert_validate
if auth_url.find('/v3') != -1:
- self.ks_drv = KeystoneDriverV3(username, password, auth_url, tenant_name, insecure)
+ self.ks_drv = KeystoneDriverV3(username, password, auth_url, tenant_name, insecure, user_domain_name, project_domain_name)
self.glance_drv = GlanceDriverV2(self.ks_drv)
self.nova_drv = NovaDriverV21(self.ks_drv)
self.neutron_drv = NeutronDriverV2(self.ks_drv)
self.ceilo_drv = CeilometerDriverV2(self.ks_drv)
+ self.cinder_drv = CinderDriverV2(self.ks_drv)
elif auth_url.find('/v2') != -1:
self.ks_drv = KeystoneDriverV2(username, password, auth_url, tenant_name, insecure)
self.glance_drv = GlanceDriverV2(self.ks_drv)
self.nova_drv = NovaDriverV2(self.ks_drv)
self.neutron_drv = NeutronDriverV2(self.ks_drv)
self.ceilo_drv = CeilometerDriverV2(self.ks_drv)
+ self.cinder_drv = CinderDriverV2(self.ks_drv)
else:
logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
+ self._mgmt_network_id = None
if mgmt_network != None:
self._mgmt_network = mgmt_network
try:
ntconn = self.neutron_drv._get_neutron_connection()
networks = ntconn.list_networks()
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
+ raise
except Exception as e:
logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
raise
return self.nova_drv.flavor_get(flavor_id)
def nova_server_create(self, **kwargs):
+ def _verify_image(image_id):
+ image = self.glance_drv.image_get(image_id)
+ if image['status'] != 'active':
+ raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+
assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
- image = self.glance_drv.image_get(kwargs['image_id'])
- if image['status'] != 'active':
- raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+
+ if kwargs['block_device_mapping_v2'] is not None:
+ for block_map in kwargs['block_device_mapping_v2']:
+ if 'uuid' in block_map:
+ _verify_image(block_map['uuid'])
+ else:
+ _verify_image(kwargs['image_id'])
# if 'network_list' in kwargs:
# kwargs['network_list'].append(self._mgmt_network_id)
def nova_server_group_list(self):
return self.nova_drv.group_list()
+ def nova_volume_list(self, server_id):
+ return self.nova_drv.volume_list(server_id)
+
def neutron_network_list(self):
return self.neutron_drv.network_list()
def ceilo_alarm_delete(self, alarm_id):
self.ceilo_drv.client.alarms.delete(alarm_id)
+
+ def cinder_volume_list(self):
+ return self.cinder_drv.volume_list()
+
+ def cinder_volume_get(self,vol_id):
+ return self.cinder_drv.volume_get(vol_id)
+
+ def cinder_volume_set_metadata(self, volumeid, metadata):
+ return self.cinder_drv.volume_set_metadata(volumeid, metadata)
+
+ def cinder_volume_delete_metadata(self, volumeid, metadata):
+ return self.cinder_drv.volume_delete_metadata(volumeid, metadata)
+
+
+
+class CinderDriver(object):
+ """
+ Driver for openstack cinder-client
+ """
+ def __init__(self, ks_drv, service_name, version):
+ """
+ Constructor for CinderDriver
+ Arguments: KeystoneDriver class object
+ """
+ self.ks_drv = ks_drv
+ self._service_name = service_name
+ self._version = version
+
+ def _get_cinder_credentials(self):
+ """
+ Returns a dictionary of kwargs required to instantiate python-cinderclient class
+
+ Arguments: None
+
+ Returns:
+ A dictionary object of arguments
+ """
+ creds = {}
+ creds['version'] = self._version
+ creds['username'] = self.ks_drv.get_username()
+ creds['api_key'] = self.ks_drv.get_password()
+ creds['auth_url'] = self.ks_drv.get_service_endpoint("identity", "publicURL")
+ creds['project_id'] = self.ks_drv.get_tenant_name()
+ creds['insecure'] = self.ks_drv.get_security_mode()
+
+ return creds
+
+ def _get_cinder_connection(self):
+ """
+ Returns a object of class python-cinderclient
+ """
+ if not hasattr(self, '_cinder_connection'):
+ self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
+ else:
+ # Reinitialize if auth_token is no longer valid
+ if not self.ks_drv.is_auth_token_valid():
+ self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
+ return self._cinder_connection
+
+ def volume_list(self):
+ """
+ Returns list of dictionaries. Each dictionary contains attributes associated with
+ volumes
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ volumes = []
+ try:
+ volume_info = cinderconn.volumes.list()
+ except Exception as e:
+ logger.error("OpenstackDriver: List volumes operation failed. Exception: %s" %(str(e)))
+ raise
+ volumes = [ volume for volume in volume_info ]
+ return volumes
+
+ def volume_get(self, volume_id):
+ """
+ Get details volume
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ try:
+ vol = cinderconn.volumes.get(volume_id)
+ except Exception as e:
+ logger.error("OpenstackDriver: Get volume operation failed. Exception: %s" %(str(e)))
+ raise
+ return vol
+
+ def volume_set_metadata(self, volume_id, metadata):
+ """
+ Set metadata for volume
+ Metadata is a dictionary of key-value pairs
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ try:
+ cinderconn.volumes.set_metadata(volume_id, metadata)
+ except Exception as e:
+ logger.error("OpenstackDriver: Set metadata operation failed. Exception: %s" %(str(e)))
+ raise
+
+ def volume_delete_metadata(self, volume_id, metadata):
+ """
+ Delete metadata for volume
+ Metadata is a dictionary of key-value pairs
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ cinderconn = self._get_cinder_connection()
+ try:
+ cinderconn.volumes.delete_metadata(volume_id, metadata)
+ except Exception as e:
+ logger.error("OpenstackDriver: Delete metadata operation failed. Exception: %s" %(str(e)))
+ raise
+
+class CinderDriverV2(CinderDriver):
+ """
+ Driver for openstack cinder-client V2
+ """
+ def __init__(self, ks_drv):
+ super(CinderDriverV2, self).__init__(ks_drv, 'volumev2', 2)
+
import argparse
import sys, os, time
import rwlogger
+import yaml
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
nvconn = drv.nova_drv._get_nova_connection()
nvconn.servers.set_meta(argument.server_id, meta_data)
+
+def get_volume_id(server_vol_list, name):
+ if server_vol_list is None:
+ return
+
+ for os_volume in server_vol_list:
+ try:
+ " Device name is of format /dev/vda"
+ vol_name = (os_volume['device']).split('/')[2]
+ except:
+ continue
+ if name == vol_name:
+ return os_volume['volumeId']
+def create_volume_metadata(drv, argument):
+ if argument.vol_metadata is None:
+ return
+
+ yaml_vol_str = argument.vol_metadata.read()
+ yaml_vol_cfg = yaml.load(yaml_vol_str)
+
+ srv_volume_list = drv.nova_volume_list(argument.server_id)
+ for volume in yaml_vol_cfg:
+ if 'guest_params' not in volume:
+ continue
+ if 'custom_meta_data' not in volume['guest_params']:
+ continue
+ vmd = dict()
+ for vol_md_item in volume['guest_params']['custom_meta_data']:
+ if 'value' not in vol_md_item:
+ continue
+ vmd[vol_md_item['name']] = vol_md_item['value']
+
+ # Get volume id
+ vol_id = get_volume_id(srv_volume_list, volume['name'])
+ if vol_id is None:
+ logger.error("Server %s Could not find volume %s" %(argument.server_id, volume['name']))
+ sys.exit(3)
+ drv.cinder_volume_set_metadata(vol_id, vmd)
+
def prepare_vm_after_boot(drv,argument):
### Important to call create_port_metadata before assign_floating_ip_address
### since assign_floating_ip_address can wait thus delaying port_metadata creation
- ### Wait for 2 minute for server to come up -- Needs fine tuning
- wait_time = 120
- sleep_time = 1
+ ### Wait for a max of 5 minute for server to come up -- Needs fine tuning
+ wait_time = 500
+ sleep_time = 2
for i in range(int(wait_time/sleep_time)):
server = drv.nova_server_get(argument.server_id)
if server['status'] == 'ACTIVE':
sys.exit(4)
#create_port_metadata(drv, argument)
+ create_volume_metadata(drv, argument)
assign_floating_ip_address(drv, argument)
default = False,
help = "Create Port Metadata")
+ parser.add_argument("--vol_metadata", type=argparse.FileType('r'))
+
argument = parser.parse_args()
if not argument.auth_url:
import os
import subprocess
import uuid
+import tempfile
+import yaml
import rift.rwcal.openstack as openstack_drv
import rw_status
import rift.cal.rwcal_status as rwcal_status
import rwlogger
import neutronclient.common.exceptions as NeutronException
+import keystoneclient.exceptions as KeystoneExceptions
+import tornado
+import gi
+
+gi.require_version('RwSdn', '1.0')
from gi.repository import (
GObject,
self._rwlog_handler = None
RwcalOpenstackPlugin.instance_num += 1
-
@contextlib.contextmanager
def _use_driver(self, account):
if self._rwlog_handler is None:
with rwlogger.rwlog_root_handler(self._rwlog_handler):
try:
- drv = self._driver_class(username = account.openstack.key,
- password = account.openstack.secret,
- auth_url = account.openstack.auth_url,
- tenant_name = account.openstack.tenant,
- mgmt_network = account.openstack.mgmt_network,
- cert_validate = account.openstack.cert_validate )
+ drv = self._driver_class(username = account.openstack.key,
+ password = account.openstack.secret,
+ auth_url = account.openstack.auth_url,
+ tenant_name = account.openstack.tenant,
+ mgmt_network = account.openstack.mgmt_network,
+ cert_validate = account.openstack.cert_validate,
+ user_domain_name = account.openstack.user_domain,
+ project_domain_name = account.openstack.project_domain)
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
+ NeutronException.NotFound) as e:
+ raise
except Exception as e:
self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
raise
Validation Code and Details String
"""
status = RwcalYang.CloudConnectionStatus()
-
try:
with self._use_driver(account) as drv:
drv.validate_account_creds()
+ except KeystoneExceptions.Unauthorized as e:
+ self.log.error("Invalid credentials given for VIM account %s" %account.name)
+ status.status = "failure"
+ status.details = "Invalid Credentials: %s" % str(e)
+
+ except KeystoneExceptions.AuthorizationFailure as e:
+ self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s" % (
+ account.name, account.openstack.auth_url))
+ status.status = "failure"
+ status.details = "Invalid auth url: %s" % str(e)
+
+ except NeutronException.NotFound as e:
+ self.log.error("Given management network %s could not be found for VIM account %s" % (
+ account.openstack.mgmt_network, account.name))
+ status.status = "failure"
+ status.details = "mgmt network does not exist: %s" % str(e)
+
except openstack_drv.ValidationError as e:
self.log.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
status.status = "failure"
image = drv.glance_image_get(image_id)
return RwcalOpenstackPlugin._fill_image_info(image)
+ # This is being deprecated. Please do not use for new SW development
@rwstatus(ret_on_failure=[""])
def do_create_vm(self, account, vminfo):
"""Create a new virtual machine.
kwargs = {}
kwargs['name'] = vminfo.vm_name
kwargs['flavor_id'] = vminfo.flavor_id
- kwargs['image_id'] = vminfo.image_id
+ if vminfo.has_field('image_id'):
+ kwargs['image_id'] = vminfo.image_id
with self._use_driver(account) as drv:
### If floating_ip is required and we don't have one, better fail before any further allocation
return link
@staticmethod
- def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group):
+ def _fill_vdu_info(drv, vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
"""Create a GI object for VDUInfoParams
Converts VM information dictionary object returned by openstack
for key, value in vm_info['metadata'].items():
if key == 'node_id':
vdu.node_id = value
+ else:
+ custommetadata = vdu.custom_boot_data.custom_meta_data.add()
+ custommetadata.name = key
+ custommetadata.value = str(value)
+
+ # Look for config_drive
+ if ('config_drive' in vm_info):
+ vdu.custom_boot_data.custom_drive = vm_info['config_drive']
if ('image' in vm_info) and ('id' in vm_info['image']):
vdu.image_id = vm_info['image']['id']
if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
if flavor_info is not None:
RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
+
+ # Fill the volume information
+ if volume_list is not None:
+ for os_volume in volume_list:
+ volr = vdu.volumes.add()
+ try:
+ " Device name is of format /dev/vda"
+ vol_name = (os_volume['device']).split('/')[2]
+ except:
+ continue
+ volr.name = vol_name
+ volr.volume_id = os_volume['volumeId']
+ try:
+ vol_details = drv.cinder_volume_get(volr.volume_id)
+ except:
+ continue
+ if vol_details is None:
+ continue
+ for key, value in vol_details.metadata.items():
+ volmd = volr.custom_meta_data.add()
+ volmd.name = key
+ volmd.value = value
+
return vdu
@rwcalstatus(ret_on_failure=[""])
raise OpenstackCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
return flavor_id
+ def _create_vm(self, account, vduinfo, pci_assignement=None, server_group=None, port_list=None, network_list=None, imageinfo_list=None):
+ """Create a new virtual machine.
+
+ Arguments:
+ account - a cloud account
+ vminfo - information that defines the type of VM to create
+
+ Returns:
+ The image id
+ """
+ kwargs = {}
+ kwargs['name'] = vduinfo.name
+ kwargs['flavor_id'] = vduinfo.flavor_id
+ if vduinfo.has_field('image_id'):
+ kwargs['image_id'] = vduinfo.image_id
+ else:
+ kwargs['image_id'] = ""
+
+ with self._use_driver(account) as drv:
+ ### If floating_ip is required and we don't have one, better fail before any further allocation
+ if vduinfo.has_field('allocate_public_address') and vduinfo.allocate_public_address:
+ if account.openstack.has_field('floating_ip_pool'):
+ pool_name = account.openstack.floating_ip_pool
+ else:
+ pool_name = None
+ floating_ip = self._allocate_floating_ip(drv, pool_name)
+ else:
+ floating_ip = None
+
+ if vduinfo.has_field('vdu_init') and vduinfo.vdu_init.has_field('userdata'):
+ kwargs['userdata'] = vduinfo.vdu_init.userdata
+ else:
+ kwargs['userdata'] = ''
+
+ if account.openstack.security_groups:
+ kwargs['security_groups'] = account.openstack.security_groups
+
+ kwargs['port_list'] = port_list
+ kwargs['network_list'] = network_list
+
+ metadata = {}
+ files = {}
+ config_drive = False
+ # Add all metadata related fields
+ if vduinfo.has_field('node_id'):
+ metadata['node_id'] = vduinfo.node_id
+ if pci_assignement is not None:
+ metadata['pci_assignement'] = pci_assignement
+ if vduinfo.has_field('custom_boot_data'):
+ if vduinfo.custom_boot_data.has_field('custom_meta_data'):
+ for custom_meta_item in vduinfo.custom_boot_data.custom_meta_data:
+ if custom_meta_item.data_type == "STRING":
+ metadata[custom_meta_item.name] = custom_meta_item.value
+ elif custom_meta_item.data_type == "JSON":
+ metadata[custom_meta_item.name] = tornado.escape.json_decode(custom_meta_item.value)
+ else:
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Unsupported data-type {} for custom-meta-data name {} ".format(custom_meta_item.data_type, custom_meta_item.name))
+ if vduinfo.custom_boot_data.has_field('custom_config_files'):
+ for custom_config_file in vduinfo.custom_boot_data.custom_config_files:
+ files[custom_config_file.dest] = custom_config_file.source
+
+ if vduinfo.custom_boot_data.has_field('custom_drive'):
+ if vduinfo.custom_boot_data.custom_drive is True:
+ config_drive = True
+
+ kwargs['metadata'] = metadata
+ kwargs['files'] = files
+ kwargs['config_drive'] = config_drive
+
+ if vduinfo.has_field('availability_zone') and vduinfo.availability_zone.has_field('name'):
+ kwargs['availability_zone'] = vduinfo.availability_zone
+ else:
+ kwargs['availability_zone'] = None
+
+ if server_group is not None:
+ kwargs['scheduler_hints'] = {'group': server_group}
+ else:
+ kwargs['scheduler_hints'] = None
+
+ kwargs['block_device_mapping_v2'] = None
+ vol_metadata = False
+ if vduinfo.has_field('volumes') :
+ kwargs['block_device_mapping_v2'] = []
+ with self._use_driver(account) as drv:
+ # Only support image->volume
+ for volume in vduinfo.volumes:
+ block_map = dict()
+ block_map['boot_index'] = volume.boot_params.boot_priority
+ if "image" in volume:
+ # Support image->volume
+ # Match retrived image info with volume based image name and checksum
+ if volume.image is not None:
+ matching_images = [img for img in imageinfo_list if img['name'] == volume.image]
+ if volume.image_checksum is not None:
+ matching_images = [img for img in matching_images if img['checksum'] == volume.image_checksum]
+ img_id = matching_images[0]['id']
+ if img_id is None:
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Volume image not found for name {} checksum {}".format(volume.name, volume.checksum))
+ block_map['uuid'] = img_id
+ block_map['source_type'] = "image"
+ else:
+ block_map['source_type'] = "blank"
+
+ block_map['device_name'] = volume.name
+ block_map['destination_type'] = "volume"
+ block_map['volume_size'] = volume.size
+ block_map['delete_on_termination'] = True
+ if volume.guest_params.has_field('device_type') and volume.guest_params.device_type == 'cdrom':
+ block_map['device_type'] = 'cdrom'
+ if volume.guest_params.has_field('device_bus') and volume.guest_params.device_bus == 'ide':
+ block_map['disk_bus'] = 'ide'
+ kwargs['block_device_mapping_v2'].append(block_map)
+
+
+ with self._use_driver(account) as drv:
+ vm_id = drv.nova_server_create(**kwargs)
+ if floating_ip:
+ self.prepare_vdu_on_boot(account, vm_id, floating_ip, vduinfo.volumes)
+
+ return vm_id
+
+ def get_openstack_image_info(self, account, image_name, image_checksum=None):
+ self.log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s",
+ image_name, image_checksum, account.name
+ )
+
+ image_list = []
+ with self._use_driver(account) as drv:
+ image_list = drv.glance_image_list()
+ matching_images = [img for img in image_list if img['name'] == image_name]
+
+ # If the image checksum was filled in then further filter the images by the checksum
+ if image_checksum is not None:
+ matching_images = [img for img in matching_images if img['checksum'] == image_checksum]
+ else:
+ self.log.warning("Image checksum not provided. Lookup using image name (%s) only.",
+ image_name)
+
+ if len(matching_images) == 0:
+ raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format(
+ image_name, image_checksum, account.name
+ ))
+
+ elif len(matching_images) > 1:
+ unique_checksums = {i.checksum for i in matching_images}
+ if len(unique_checksums) > 1:
+ msg = ("Too many images with different checksums matched "
+ "image name of %s for cloud account: %s" % (image_name, account.name))
+ raise ResMgrCALOperationFailure(msg)
+
+ return matching_images[0]
+
@rwcalstatus(ret_on_failure=[""])
def do_create_vdu(self, account, vdu_init):
"""Create a new virtual deployment unit
The vdu_id
"""
### First create required number of ports aka connection points
+ # Add the mgmt_ntwk by default.
+ mgmt_network_id = None
with self._use_driver(account) as drv:
+ mgmt_network_id = drv._mgmt_network_id
### If floating_ip is required and we don't have one, better fail before any further allocation
if vdu_init.has_field('allocate_public_address') and vdu_init.allocate_public_address:
if account.openstack.has_field('floating_ip_pool'):
port_list = []
network_list = []
+ imageinfo_list = []
+ is_explicit_mgmt_defined = False
for c_point in vdu_init.connection_points:
+ # if the user has specified explicit mgmt_network connection point
+ # then remove the mgmt_network from the VM list
+ if c_point.virtual_link_id == mgmt_network_id:
+ is_explicit_mgmt_defined = True
if c_point.virtual_link_id in network_list:
assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
else:
if not vdu_init.has_field('flavor_id'):
vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
- ### Check VDU Virtual Interface type and make sure VM with property exists
- if vdu_init.connection_points is not None:
- ### All virtual interfaces need to be of the same type for Openstack Accounts
- if not all(cp.type_yang == vdu_init.connection_points[0].type_yang for cp in vdu_init.connection_points):
- ### We have a mix of E1000 & VIRTIO virtual interface types in the VDU, abort instantiation.
- assert False, "Only one type of Virtual Intefaces supported for Openstack accounts. Found a mix of VIRTIO & E1000."
+ ### Obtain all images for volumes and perform validations
+ if vdu_init.has_field('volumes'):
+ for volume in vdu_init.volumes:
+ if "image" in volume:
+ image_checksum = volume.image_checksum if volume.has_field("image_checksum") else None
+ image_info = self.get_openstack_image_info(account, volume.image, image_checksum)
+ imageinfo_list.append(image_info)
+ elif vdu_init.has_field('image_id'):
+ with self._use_driver(account) as drv:
+ image_info = drv.glance_image_get(vdu_init.image_id)
+ imageinfo_list.append(image_info)
- with self._use_driver(account) as drv:
- img_info = drv.glance_image_get(vdu_init.image_id)
+ if not imageinfo_list:
+ err_str = ("VDU has no image information")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- virt_intf_type = vdu_init.connection_points[0].type_yang
- if virt_intf_type == 'E1000':
- if 'hw_vif_model' in img_info and img_info.hw_vif_model == 'e1000':
- self.log.debug("VDU has Virtual Interface E1000, found matching image with property hw_vif_model=e1000")
+ ### Check VDU Virtual Interface type and make sure VM with property exists
+ if vdu_init.connection_points:
+ ### All virtual interfaces need to be of the same type for Openstack Accounts
+ if not (all(cp.type_yang == 'E1000' for cp in vdu_init.connection_points) or all(cp.type_yang != 'E1000' for cp in vdu_init.connection_points)):
+ ### We have a mix of E1000 & VIRTIO/SR_IPOV virtual interface types in the VDU, abort instantiation.
+ assert False, "Only one type of Virtual Intefaces supported for Openstack accounts. Found a mix of VIRTIO/SR_IOV & E1000."
+
+ ## It is not clear if all the images need to checked for HW properties. In the absence of model info describing each im age's properties,
+ ### we shall assume that all images need to have similar properties
+ for img_info in imageinfo_list:
+
+ virt_intf_type = vdu_init.connection_points[0].type_yang
+ if virt_intf_type == 'E1000':
+ if 'hw_vif_model' in img_info and img_info.hw_vif_model == 'e1000':
+ self.log.debug("VDU has Virtual Interface E1000, found matching image with property hw_vif_model=e1000")
+ else:
+ err_str = ("VDU has Virtual Interface E1000, but image '%s' does not have property hw_vif_model=e1000" % img_info.name)
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ elif virt_intf_type == 'VIRTIO' or virt_intf_type == 'SR_IOV':
+ if 'hw_vif_model' in img_info:
+ err_str = ("VDU has Virtual Interface %s, but image '%s' has hw_vif_model mismatch" % virt_intf_type,img_info.name)
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ else:
+ self.log.debug("VDU has Virtual Interface %s, found matching image" % virt_intf_type)
else:
- err_str = ("VDU has Virtual Interface E1000, but image '%s' does not have property hw_vif_model=e1000" % img_info.name)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- elif virt_intf_type == 'VIRTIO':
- if 'hw_vif_model' in img_info:
- err_str = ("VDU has Virtual Interface VIRTIO, but image '%s' has hw_vif_model mismatch" % img_info.name)
+ err_str = ("VDU Virtual Interface '%s' not supported yet" % virt_intf_type)
self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- else:
- self.log.debug("VDU has Virtual Interface VIRTIO, found matching image")
- else:
- err_str = ("VDU Virtual Interface '%s' not supported yet" % virt_intf_type)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
with self._use_driver(account) as drv:
### Now Create VM
- vm = RwcalYang.VMInfoItem()
- vm.vm_name = vdu_init.name
- vm.flavor_id = vdu_init.flavor_id
- vm.image_id = vdu_init.image_id
- vm_network = vm.network_list.add()
- vm_network.network_id = drv._mgmt_network_id
- if vdu_init.has_field('vdu_init') and vdu_init.vdu_init.has_field('userdata'):
- vm.cloud_init.userdata = vdu_init.vdu_init.userdata
-
- if vdu_init.has_field('node_id'):
- vm.user_tags.node_id = vdu_init.node_id;
-
- if vdu_init.has_field('availability_zone') and vdu_init.availability_zone.has_field('name'):
- vm.availability_zone = vdu_init.availability_zone.name
-
+ vm_network_list = []
+ if not is_explicit_mgmt_defined:
+ vm_network_list.append(drv._mgmt_network_id)
+
+ if vdu_init.has_field('volumes'):
+ # Only combination supported: Image->Volume
+ for volume in vdu_init.volumes:
+ if "volume" in volume:
+ err_str = ("VDU Volume source not supported yet")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ if "guest_params" not in volume:
+ err_str = ("VDU Volume destination parameters '%s' not defined")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ if not volume.guest_params.has_field('device_type'):
+ err_str = ("VDU Volume destination type '%s' not defined")
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ if volume.guest_params.device_type not in ['disk', 'cdrom'] :
+ err_str = ("VDU Volume destination type '%s' not supported" % volume.guest_params.device_type)
+ self.log.error(err_str)
+ raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+
+
+ server_group = None
if vdu_init.has_field('server_group'):
- ### Get list of server group in openstack for name->id mapping
- openstack_group_list = drv.nova_server_group_list()
- group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
- if len(group_id) != 1:
- raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
- vm.server_group = group_id[0]
-
- for port_id in port_list:
- port = vm.port_list.add()
- port.port_id = port_id
+ ### Get list of server group in openstack for name->id mapping
+ openstack_group_list = drv.nova_server_group_list()
+ group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
+ if len(group_id) != 1:
+ raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
+ server_group = group_id[0]
pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
if pci_assignement != '':
vm.user_tags.pci_assignement = pci_assignement
- vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
- self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+ vm_id = self._create_vm(account, vdu_init, pci_assignement=pci_assignement, server_group=server_group, port_list=port_list, network_list=vm_network_list, imageinfo_list = imageinfo_list)
return vm_id
def prepare_vpci_metadata(self, drv, vdu_init):
- def prepare_vdu_on_boot(self, account, server_id, floating_ip):
+ def prepare_vdu_on_boot(self, account, server_id, floating_ip, volumes=None):
cmd = PREPARE_VM_CMD.format(auth_url = account.openstack.auth_url,
username = account.openstack.key,
password = account.openstack.secret,
if floating_ip is not None:
cmd += (" --floating_ip "+ floating_ip.ip)
+ vol_metadata = False
+ if volumes is not None:
+ for volume in volumes:
+ if volume.guest_params.has_field('custom_meta_data'):
+ vol_metadata = True
+ break
+
+ if vol_metadata is True:
+ tmp_file = None
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
+ vol_list = list()
+ for volume in volumes:
+ vol_dict = volume.as_dict()
+ vol_list.append(vol_dict)
+
+ yaml.dump(vol_list, tmp_file)
+ cmd += (" --vol_metadata {}").format(tmp_file.name)
+
exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
exec_cmd = exec_path+'/'+cmd
self.log.info("Running command: %s" %(exec_cmd))
Object of type RwcalYang.VDUInfoParams
"""
with self._use_driver(account) as drv:
-
- ### Get list of ports excluding the one for management network
- port_list = [p for p in drv.neutron_port_list(**{'device_id': vdu_id}) if p['network_id'] != drv.get_mgmt_network_id()]
+ port_list = drv.neutron_port_list(**{'device_id': vdu_id})
vm = drv.nova_server_get(vdu_id)
openstack_group_list = drv.nova_server_group_list()
server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
- vdu_info = RwcalOpenstackPlugin._fill_vdu_info(vm,
+ openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
+ vdu_info = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
flavor_info,
account.openstack.mgmt_network,
port_list,
- server_group)
+ server_group,
+ volume_list = openstack_srv_volume_list)
if vdu_info.state == 'active':
try:
console_info = drv.nova_server_console(vdu_info.vdu_id)
with self._use_driver(account) as drv:
vms = drv.nova_server_list()
for vm in vms:
- ### Get list of ports excluding one for management network
- port_list = [p for p in drv.neutron_port_list(**{'device_id': vm['id']}) if p['network_id'] != drv.get_mgmt_network_id()]
+ port_list = drv.neutron_port_list(**{'device_id': vm['id']})
flavor_info = None
openstack_group_list = drv.nova_server_group_list()
server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
- vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
+ openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
+ vdu = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
flavor_info,
account.openstack.mgmt_network,
port_list,
- server_group)
+ server_group,
+ volume_list = openstack_srv_volume_list)
if vdu.state == 'active':
try:
console_info = drv.nova_server_console(vdu.vdu_id)
mandatory true;
}
+ leaf user-domain {
+ type string;
+ default "Default";
+ description "Domain of the OpenStack user";
+ }
+ leaf project-domain {
+ type string;
+ default "Default";
+ description "Domain of the OpenStack project";
+ }
+
leaf tenant {
type string;
mandatory true;
uses connection-point-type;
}
+ leaf mgmt-network {
+ description
+ "Explicit mgmt-network name, otherwise the mgmt-network from
+ Cloud account is used";
+ type string;
+ }
+
leaf allocate-public-address {
description "If this VDU needs public IP address";
type boolean;
type string;
}
}
+
+ uses manotypes:custom-boot-data;
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+ uses manotypes:volume-info;
+ }
}
container vdu-init-params {
type string;
description "Console URL from the VIM, if available";
}
+
+ uses manotypes:custom-boot-data;
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ leaf volume-id {
+ description "CAL assigned volume-id ";
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ uses manotypes:custom-meta-data;
+ }
}
+
container vnf-resources {
rwpb:msg-new VNFResources;
config false;
import time
import unittest
import hashlib
-
import novaclient.exceptions as nova_exception
import paramiko
import rw_peas
from gi.repository import RwcalYang
from gi.repository.RwTypes import RwStatus
-from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver
+from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver, KeystoneDriverV3, KeystoneDriverV2
logger = logging.getLogger('rwcal-openstack')
+PING_USERDATA = '''
+#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+'''
+
#
# Important information about openstack installation. This needs to be manually verified
#
openstack_info = {
'username' : 'pluto',
'password' : 'mypasswd',
- 'auth_url' : 'http://10.66.4.14:5000/v3/',
+ 'auth_url' : 'http://10.66.4.17:5000/v3/',
'project_name' : 'demo',
'mgmt_network' : 'private',
'reserved_flavor' : 'm1.medium',
- 'reserved_image' : 'rift-root-latest.qcow2',
+ 'reserved_image' : 'Fedora-x86_64-20-20131211.1-sda-ping.qcow2',
'physical_network' : None,
'network_type' : None,
- 'segmentation_id' : None
+ 'segmentation_id' : None,
+ 'user_domain_name' : 'default',
+ 'project_domain_name': 'default'
+ }
+
+openstack_V3_info = {
+ 'username' : 'riftdev_admin',
+ 'password' : 'mypasswd',
+ 'auth_url' : 'http://10.68.0.11:5000/v3/',
+ 'project_name' : 'demov3',
+ 'mgmt_network' : 'center',
+ 'physical_network' : None,
+ 'network_type' : None,
+ 'segmentation_id' : None,
+ 'user_domain_name' : 'riftdev',
+ 'project_domain_name': 'riftdev'
}
"""
Creates an object for class RwcalYang.CloudAccount()
"""
- account = RwcalYang.CloudAccount()
- account.account_type = "openstack"
- account.openstack.key = openstack_info['username']
- account.openstack.secret = openstack_info['password']
- account.openstack.auth_url = openstack_info['auth_url']
- account.openstack.tenant = openstack_info['project_name']
- account.openstack.mgmt_network = openstack_info['mgmt_network']
+ account = RwcalYang.CloudAccount()
+ account.name = "Gruntxx"
+ account.account_type = "openstack"
+ account.openstack.key = openstack_info['username']
+ account.openstack.secret = openstack_info['password']
+ account.openstack.auth_url = openstack_info['auth_url']
+ account.openstack.tenant = openstack_info['project_name']
+ account.openstack.mgmt_network = openstack_info['mgmt_network']
+ account.openstack.user_domain = openstack_info['user_domain_name']
+ account.openstack.project_domain = openstack_info['project_domain_name']
return account
def get_cal_plugin():
rc, rs = self.cal.get_network_list(self._acct)
self.assertEqual(rc, RwStatus.SUCCESS)
- networks = [ network for network in rs.networkinfo_list if (network.network_name == 'rift.cal.unittest.network' or network.network_name == 'rift.cal.virtual_link') ]
+ networks = [ network for network in rs.networkinfo_list if ((network.network_name == 'rift.cal.unittest.network') or ('rift.cal.virtual_link' in network.network_name) ) ]
for network in networks:
+ logger.debug("Openstack-CAL-Test: Deleting old VL %s", network.network_id)
self.cal.delete_virtual_link(self._acct, network.network_id)
def tearDown(self):
openstack_info['username'],
openstack_info['password'],
openstack_info['auth_url'],
+ None,
openstack_info['project_name'])
# Get hold of the client instance need for Token Manager
client = drv._get_keystone_connection()
flavors = nova.flavor_list()
self.assertTrue(len(flavors) > 1)
+ def test_v3_Keystone(self):
+ # Keystone v3 authentication
+ auth_exp = False
+ try:
+ drv = KeystoneDriverV3(openstack_V3_info['username'],
+ openstack_V3_info['password'],
+ openstack_V3_info['auth_url'],
+ openstack_V3_info['project_name'],
+ None,
+ openstack_V3_info['user_domain_name'],
+ openstack_V3_info['project_domain_name'])
+ client = drv._get_keystone_connection()
+ except Exception:
+ auth_exp = True
+ self.assertFalse(auth_exp)
+
+ # Incorrect domain being to passed to v3 Keystone API
+ auth_exp = False
+ try:
+ drv = KeystoneDriverV3(openstack_V3_info['username'],
+ openstack_V3_info['password'],
+ openstack_V3_info['auth_url'],
+ openstack_V3_info['project_name'],
+ None,
+ "DummyDom",
+ openstack_V3_info['project_domain_name'])
+ client = drv._get_keystone_connection()
+ except Exception:
+ auth_exp = True
+ self.assertTrue(auth_exp)
+
+ # Keystone v3 authentication-Backward compatabilty test
+ auth_exp = False
+ try:
+ drv = KeystoneDriverV3(openstack_info['username'],
+ openstack_info['password'],
+ openstack_info['auth_url'],
+ openstack_info['project_name'],
+ None,
+ openstack_info['user_domain_name'],
+ openstack_info['project_domain_name'])
+ client = drv._get_keystone_connection()
+ except Exception:
+ auth_exp = True
+ self.assertFalse(auth_exp)
+
+ # Keystone v3 authentication-Backward compatabilty
+ auth_exp = False
+ try:
+ drv = KeystoneDriverV3(openstack_info['username'],
+ openstack_info['password'],
+ openstack_info['auth_url'],
+ openstack_info['project_name'],
+ None,
+ None,
+ None)
+ client = drv._get_keystone_connection()
+ except Exception:
+ auth_exp = True
+ self.assertFalse(auth_exp)
+
+ # Keystone v2 authentication
+ auth_exp = False
+ try:
+ drv2 = KeystoneDriverV2(
+ openstack_info['username'],
+ openstack_info['password'],
+ 'http://10.66.4.17:5000/v2.0',
+ openstack_info['project_name'],
+ None)
+ client = drv2._get_keystone_connection()
+ except Exception:
+ auth_exp = True
+ self.assertFalse(auth_exp)
+
@unittest.skip("Skipping test_vm_operations")
def test_vm_operations(self):
"""
vdu.node_id = OpenStackTest.NodeID
vdu.image_id = self._image.id
vdu.flavor_id = self._flavor.id
- vdu.vdu_init.userdata = ''
+ vdu.vdu_init.userdata = PING_USERDATA
vdu.allocate_public_address = True
+ meta1 = vdu.custom_boot_data.custom_meta_data.add()
+ meta1.name = "EMS_IP"
+ meta1.data_type = "STRING"
+ meta1.value = "10.5.6.6"
+ #meta2 = vdu.custom_boot_data.custom_meta_data.add()
+ #meta2.name = "Cluster_data"
+ #meta2.data_type = "JSON"
+ #meta2.value = '''{ "cluster_id": "12" , "vnfc_id": "112" }'''
+ #vdu.custom_boot_data.custom_drive = True
+ customfile1 = vdu.custom_boot_data.custom_config_files.add()
+ customfile1.source = "abcdef124"
+ customfile1.dest = "/tmp/tempfile.txt"
+ customfile2 = vdu.custom_boot_data.custom_config_files.add()
+ customfile2.source = "123456"
+ customfile2.dest = "/tmp/tempfile2.txt"
c1 = vdu.connection_points.add()
c1.name = "c_point1"
c1.virtual_link_id = virtual_link_id
vlink_req = self._get_virtual_link_request_info()
rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
vlink_id = rsp
logger.info("Openstack-CAL-Test: Test Create VDU API")
rc, rsp = self.cal.create_vdu(self._acct, vdu_req)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp)
vdu_id = rsp
### Create another virtual_link
rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
vlink_id2= rsp
logger.info("Openstack-CAL-Test: VDU/Virtual Link create-delete test successfully completed")
-
class VmData(object):
"""A convenience class that provides all the stats and EPA Attributes
from the VM provided
if __name__ == "__main__":
- logging.basicConfig(level=logging.INFO)
+ logging.basicConfig(level=logging.DEBUG)
unittest.main()
try:
path = os.path.join(self._rift_artif_dir,
'launchpad/libs',
- agent_vnfr.vnfr_msg.vnfd_ref,
+ agent_vnfr.vnfr_msg.vnfd.id,
'charms/trusty',
charm)
self._log.debug("jujuCA: Charm dir is {}".format(path))
RwLaunchpadYang as launchpadyang,
RwVnfrYang,
RwVnfdYang,
- RwNsdYang
+ RwNsdYang,
+ VnfrYang
)
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
- 'id': '1',
- 'vnfd_ref': '1',
- })
+ mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': '1'})
+ mock_vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
pkg.extract_file(script_file, dest_path)
except package.ExtractError as e:
raise ScriptExtractionError("Failed to extract script %s" % script_name) from e
+
+ def read_script(self, pkg, filename):
+ script_files = PackageScriptExtractor.package_script_files(pkg)
+
+ for script_name, script_file in script_files.items():
+ if script_name == filename:
+ self._log.debug("Found %s script file in package at %s", filename, script_file)
+
+ try:
+ with pkg.open(script_file) as f:
+ userdata = f.read()
+ self._log.info("Custom script read from file %s", userdata)
+ # File contents are read in binary string, decode to regular string and return
+ return userdata.decode()
+ except package.ExtractError as e:
+ raise ScriptExtractionError("Failed to extract script %s" % script_name) from e
+
+ # If we've reached this point but not found a matching script,
+ # raise an Exception, since we got here only because there was supposed
+ # to be a script in the VDU
+ errmsg = "No script file found in the descriptor package"
+ self._log.error(errmsg)
+ raise ScriptExtractionError(errmsg)
+
vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total
# Storage
- vdu_metrics.storage.used = metrics.storage.used
- vdu_metrics.storage.total = 1e9 * self.vdur.vm_flavor.storage_gb
- vdu_metrics.storage.utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+ try:
+ vdu_metrics.storage.used = metrics.storage.used
+ if self.vdur.has_field('volumes'):
+ for volume in self.vdur.volumes:
+ if vdu_metrics.storage.total is None:
+ vdu_metrics.storage.total = 1e9 * volume.size
+ else:
+ vdu_metrics.storage.total += (1e9 * volume.size)
+ else:
+ vdu_metrics.storage.total = 1e9 * self.vdur.vm_flavor.storage_gb
+ utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+ if utilization > 100:
+ utilization = 100
+ vdu_metrics.storage.utilization = utilization
+ except ZeroDivisionError:
+ vdu_metrics.storage.utilization = 0
# Network (incoming)
vdu_metrics.network.incoming.packets = metrics.network.incoming.packets
# This indicates that the NSD had no mon-param config.
if not nsd.monitoring_param:
for vnfr in constituent_vnfrs:
- vnfd = store.get_vnfd(vnfr.vnfd_ref)
+ vnfd = store.get_vnfd(vnfr.vnfd.id)
for monp in vnfd.monitoring_param:
mon_params.append(NsrMonitoringParam(
monp,
# value => (value_type, value)
self.vnfr_monparams = {}
+ # create_nsr_mon_params() is already validating for 'is_legacy' by checking if
+ # nsd is having 'monitoring_param'. So removing 'self.aggregation_type is None' check for is_legacy.
+ self.is_legacy = is_legacy
+
if not is_legacy:
self._msg = self._convert_nsd_msg(monp_config)
else:
"""Aggregation type"""
return self.nsr_mon_param_msg.aggregation_type
- @property
- def is_legacy(self):
- return (self.aggregation_type is None)
+ # @property
+ # def is_legacy(self):
+ # return (self.aggregation_type is None)
@classmethod
def extract_value(cls, monp):
def _convert_nsd_msg(self, nsd_monp):
"""Create initial msg without values"""
- vnfd_to_vnfr = {vnfr.vnfd_ref: vnfr_id
+ vnfd_to_vnfr = {vnfr.vnfd.id: vnfr_id
for vnfr_id, vnfr in self._constituent_vnfr_map.items()}
# First, convert the monp param ref from vnfd to vnfr terms.
vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
self,
vnfr,
- self.store.get_vnfd(vnfr.vnfd_ref))
+ self.store.get_vnfd(vnfr.vnfd.id))
self.vnfr_monitors[vnfr.id] = vnf_mon
self.vnfrs[vnfr.id] = vnfr
mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
'id': '1',
- 'vnfd_ref': '1',
'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
})
+ mock_vnfr.vnfd = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
class OpenmanoVnfr(object):
- def __init__(self, log, loop, cli_api, vnfr):
+ def __init__(self, log, loop, cli_api, vnfr, nsd):
self._log = log
self._loop = loop
self._cli_api = cli_api
self._created = False
+ self.nsd = nsd
+
@property
def vnfd(self):
return rift2openmano.RiftVNFD(self._vnfr.vnfd)
@property
def openmano_vnfd(self):
self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id)
- openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd)
+ openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd, self.nsd)
return openmano_vnfd
@property
@asyncio.coroutine
def add_vlr(self, vlr):
self._vlrs.append(vlr)
+ yield from self._publisher.publish_vlr(None, vlr.vlr_msg)
yield from asyncio.sleep(1, loop=self._loop)
@asyncio.coroutine
self._cli_api.ns_vim_network_delete,
vlr.name,
vlr.om_datacenter_name)
+ yield from self._publisher.unpublish_vlr(None, vlr.vlr_msg)
yield from asyncio.sleep(1, loop=self._loop)
@asyncio.coroutine
def add_vnfr(self, vnfr):
- vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr)
+ vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr, nsd=self.nsd)
yield from vnfr.create()
self._vnfrs.append(vnfr)
self._cli_api = None
self._http_api = None
self._openmano_nsrs = {}
+ self._vnfr_uptime_tasks = {}
self._set_ro_account(ro_account)
self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg)
with self._dts.transaction() as xact:
yield from self._publisher.publish_vnfr(xact, vnfr_msg)
+ self._log.debug("Creating a task to update uptime for vnfr: %s", vnfr.id)
+ self._vnfr_uptime_tasks[vnfr.id] = self._loop.create_task(self.vnfr_uptime_update(vnfr))
+
+ def vnfr_uptime_update(self, vnfr):
+ try:
+ vnfr_ = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': vnfr.id})
+ while True:
+ vnfr_.uptime = int(time.time()) - vnfr._create_time
+ yield from self._publisher.publish_vnfr(None, vnfr_)
+ yield from asyncio.sleep(2, loop=self._loop)
+ except asyncio.CancelledError:
+ self._log.debug("Received cancellation request for vnfr_uptime_update task")
@asyncio.coroutine
def instantiate_vl(self, nsr, vlr):
"""
nsr_id = nsr.id
openmano_nsr = self._openmano_nsrs[nsr_id]
+
yield from openmano_nsr.terminate()
yield from openmano_nsr.delete()
"""
Terminate the network service
"""
- pass
+ if vnfr.id in self._vnfr_uptime_tasks:
+ self._vnfr_uptime_tasks[vnfr.id].cancel()
@asyncio.coroutine
def terminate_vl(self, vlr):
# Update the NSR's config status
new_status = ROConfigManager.map_config_status(cm_nsr['state'])
- self._log.debug("Updating config status of NSR {} to {}({})".
- format(nsrid, new_status, cm_nsr['state']))
- yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
+ self._log.info("Updating config status of NSR {} to {}({})".
+ format(nsrid, new_status, cm_nsr['state']))
+
+ # If terminate nsr request comes when NS instantiation is in 'Configuring state'; self.nsm.nsrs dict
+ # is already empty when self.nsm.nsrs[nsrid].set_config_status gets executed. So adding a check here.
+ if nsrid in self.nsm.nsrs:
+ yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
except Exception as e:
self._log.error("Failed to process cm-state for nsr {}: {}".
class VirtualLinkRecord(object):
""" Virtual Link Records class"""
+ XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
@staticmethod
@asyncio.coroutine
def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id, restart_mode=False):
self._vlr_id = str(uuid.uuid4())
self._state = VlRecordState.INIT
self._prev_state = None
+ self._create_time = int(time.time())
@property
def xpath(self):
"nsr_id_ref": self._nsr_id,
"vld_ref": self.vld_msg.id,
"name": self.name,
+ "create_time": self._create_time,
"cloud_account": self.cloud_account_name,
"om_datacenter": self.om_datacenter_name,
}
@asyncio.coroutine
def instantiate(self):
""" Instantiate this VL """
-
self._log.debug("Instaniating VLR key %s, vld %s",
self.xpath, self._vld_msg)
vlr = None
self._group_instance_id = group_instance_id
self._placement_groups = placement_groups
self._config_status = NsrYang.ConfigStates.INIT
+ self._create_time = int(time.time())
self._prev_state = VnfRecordState.INIT
self._state = VnfRecordState.INIT
vnfr_dict = {
"id": self.id,
"nsr_id_ref": self._nsr_id,
- "vnfd_ref": self.vnfd.id,
"name": self.name,
"cloud_account": self._cloud_account_name,
"om_datacenter": self._om_datacenter_name,
vnfr_dict.update(vnfd_copy_dict)
vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+
+ vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
vnfr.member_vnf_index_ref = self.member_vnf_index
vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
cpr.vlr_ref = vlr_ref.id
self.vnfr_msg.connection_point.append(cpr)
self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s",
- cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd_ref)
+ cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd.id)
if not self.restart_mode:
yield from self._dts.query_create(self.xpath,
""" Network service record """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False):
+ def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False,
+ vlr_handler=None):
self._dts = dts
self._log = log
self._loop = loop
self._nsr_cfg_msg = nsr_cfg_msg
self._nsm_plugin = nsm_plugin
self._sdn_account_name = sdn_account_name
+ self._vlr_handler = vlr_handler
self._nsd = None
self._nsr_msg = None
self._is_active = False
self._vl_phase_completed = False
self._vnf_phase_completed = False
+ self.vlr_uptime_tasks = {}
# Initalise the state to init
for vlr in self._vlrs:
yield from self.nsm_plugin.instantiate_vl(self, vlr)
vlr.state = VlRecordState.ACTIVE
+ self.vlr_uptime_tasks[vlr.id] = self._loop.create_task(self.vlr_uptime_update(vlr))
+
+
+ def vlr_uptime_update(self, vlr):
+ try:
+
+ vlr_ = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict({'id': vlr.id})
+ while True:
+ vlr_.uptime = int(time.time()) - vlr._create_time
+ yield from self._vlr_handler.update(None, VirtualLinkRecord.vlr_xpath(vlr), vlr_)
+ yield from asyncio.sleep(2, loop=self._loop)
+ except asyncio.CancelledError:
+ self._log.debug("Received cancellation request for vlr_uptime_update task")
+ yield from self._vlr_handler.delete(None, VirtualLinkRecord.vlr_xpath(vlr))
+
@asyncio.coroutine
def create(self, config_xact):
""" This function creates VLs for every VLD in the NSD
associated with this NSR"""
for vld in self.nsd_msg.vld:
+
self._log.debug("Found vld %s in nsr id %s", vld, self.id)
cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
for cloud_account,om_datacenter in cloud_account_list:
for vlr in self.vlrs:
yield from self.nsm_plugin.terminate_vl(vlr)
vlr.state = VlRecordState.TERMINATED
+ if vlr.id in self.vlr_uptime_tasks:
+ self.vlr_uptime_tasks[vlr.id].cancel()
self._log.debug("Terminating network service id %s", self.id)
nsr.config_status = self.map_config_status()
nsr.config_status_details = self._config_status_details
nsr.create_time = self._create_time
+ nsr.uptime = int(time.time()) - self._create_time
for cfg_prim in self.nsd_msg.service_primitive:
cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
nsr_msg,
sdn_account_name,
key_pairs,
- restart_mode=restart_mode
+ restart_mode=restart_mode,
+ vlr_handler=self._ro_plugin_selector._records_publisher._vlr_pub_hdlr
)
self._nsrs[nsr_msg.id] = nsr
nsm_plugin.create_nsr(nsr_msg, nsr_msg.nsd, key_pairs)
def create_virtual_compute(self, req_params):
#rc, rsp = self._rwcal.get_vdu_list(self._account)
self._log.debug("Calling get_vdu_list API")
+
rc, rsp = yield from self._loop.run_in_executor(self._executor,
self._rwcal.get_vdu_list,
self._account)
params = RwcalYang.VDUInitParams()
params.from_dict(req_params.as_dict())
- image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
- params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
+ if 'image_name' in req_params:
+ image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
+ params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
#rc, rs = self._rwcal.create_vdu(self._account, params)
self._log.debug("Calling create_vdu API with params %s" %(str(params)))
class Resource(object):
- def __init__(self, resource_id, resource_type):
+ def __init__(self, resource_id, resource_type, request):
self._id = resource_id
self._type = resource_type
+ self._request = request
@property
def resource_id(self):
def resource_type(self):
return self._type
+ @property
+ def request(self):
+ return self._request
+
def cleanup(self):
pass
class ComputeResource(Resource):
- def __init__(self, resource_id, resource_type):
- super(ComputeResource, self).__init__(resource_id, resource_type)
+ pass
class NetworkResource(Resource):
- def __init__(self, resource_id, resource_type):
- super(NetworkResource, self).__init__(resource_id, resource_type)
+ pass
class ResourcePoolInfo(object):
if resource_id in self._all_resources:
self._log.error("Resource with id %s name %s of type %s is already used", resource_id, request.name, resource_type)
raise ResMgrNoResourcesAvailable("Resource with name %s of type network is already used" %(resource_id))
- resource = self._resource_class(resource_id, resource_type)
+ resource = self._resource_class(resource_id, resource_type, request)
self._all_resources[resource_id] = resource
self._allocated_resources[resource_id] = resource
self._log.info("Successfully allocated virtual-network resource from CAL with resource-id: %s", resource_id)
def allocate_dynamic_resource(self, request):
#request.flavor_id = yield from self.select_resource_flavor(request)
resource_id = yield from self._cal.create_virtual_compute(request)
- resource = self._resource_class(resource_id, 'dynamic')
+ resource = self._resource_class(resource_id, 'dynamic', request)
self._all_resources[resource_id] = resource
self._allocated_resources[resource_id] = resource
self._log.info("Successfully allocated virtual-compute resource from CAL with resource-id: %s", resource_id)
@asyncio.coroutine
def get_resource_info(self, resource):
info = yield from self._cal.get_virtual_compute_info(resource.resource_id)
+
self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
resource.resource_id, str(info))
response = RwResourceMgrYang.VDUEventData_ResourceInfo()
return info
def _get_resource_state(self, resource_info, requested_params):
+
+
+ def conn_pts_len_equal():
+ # if explicit mgmt network is defined then the allocated ports might
+ # one more than the expected.
+ allocated_ports = len(resource_info.connection_points)
+ requested_ports = len(requested_params.connection_points)
+
+ if not requested_params.mgmt_network:
+ allocated_ports -= 1
+
+ return allocated_ports == requested_ports
+
if resource_info.state == 'failed':
self._log.error("<Compute-Resource: %s> Reached failed state.",
resource_info.name)
resource_info.name, requested_params)
return 'pending'
- if(len(requested_params.connection_points) !=
- len(resource_info.connection_points)):
+ if not conn_pts_len_equal():
self._log.warning("<Compute-Resource: %s> Waiting for requested number of ports to be assigned to virtual-compute, requested: %d, assigned: %d",
resource_info.name,
len(requested_params.connection_points),
return r_info
self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
- new_resource = pool._resource_class(r_id, 'dynamic')
+ new_resource = pool._resource_class(r_id, 'dynamic', request)
if resource_type == 'compute':
requested_params = RwcalYang.VDUInitParams()
requested_params.from_dict(request.as_dict())
def monitor_vdu_state(response_xpath, pathentry):
self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
- loop_cnt = 180
+ time_to_wait = 300
+ sleep_time = 2
+ loop_cnt = int(time_to_wait/sleep_time)
for i in range(loop_cnt):
- self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 1 second", response_xpath)
- yield from asyncio.sleep(1, loop = self._loop)
+ self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 2 second", response_xpath)
+ yield from asyncio.sleep(2, loop = self._loop)
try:
response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
except Exception as e:
return
else:
### End of loop. This is only possible if VDU did not reach active state
- err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, loop_cnt)
+ err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, time_to_wait)
self._log.info(err_msg)
response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
response_info.resource_state = 'failed'
import rift.tasklets
import rift.package.store
import rift.package.cloud_init
+import rift.package.script
+import rift.mano.dts as mano_dts
class VMResourceError(Exception):
vdud,
vnfr,
mgmt_intf,
+ mgmt_network,
cloud_account_name,
vnfd_package_store,
vdur_id=None,
self._mgmt_intf = mgmt_intf
self._cloud_account_name = cloud_account_name
self._vnfd_package_store = vnfd_package_store
+ self._mgmt_network = mgmt_network
self._vdur_id = vdur_id or str(uuid.uuid4())
self._int_intf = []
@property
def image_name(self):
""" name that should be used to lookup the image on the CMP """
+ if 'image' not in self._vdud:
+ return None
return os.path.basename(self._vdud.image)
@property
@property
def msg(self):
- """ VDU message """
+ """ Process VDU message from resmgr"""
vdu_fields = ["vm_flavor",
"guest_epa",
"vswitch_epa",
"hypervisor_epa",
"host_epa",
+ "volumes",
"name"]
vdu_copy_dict = {k: v for k, v in
self._vdud.as_dict().items() if k in vdu_fields}
}
if self.vm_resp is not None:
vdur_dict.update({"vim_id": self.vm_resp.vdu_id,
- "flavor_id": self.vm_resp.flavor_id,
- "image_id": self.vm_resp.image_id,
+ "flavor_id": self.vm_resp.flavor_id
})
+ if self._vm_resp.has_field('image_id'):
+ vdur_dict.update({ "image_id": self.vm_resp.image_id })
if self.management_ip is not None:
vdur_dict["management_ip"] = self.management_ip
vdur_dict.update(vdu_copy_dict)
+ if self.vm_resp is not None:
+ if self._vm_resp.has_field('volumes'):
+ for opvolume in self._vm_resp.volumes:
+ vdurvol_data = [vduvol for vduvol in vdur_dict['volumes'] if vduvol['name'] == opvolume.name]
+ if len(vdurvol_data) == 1:
+ vdurvol_data[0]["volume_id"] = opvolume.volume_id
+ if opvolume.has_field('custom_meta_data'):
+ metadata_list = list()
+ for metadata_item in opvolume.custom_meta_data:
+ metadata_list.append(metadata_item.as_dict())
+ if 'guest_params' not in vdurvol_data[0]:
+ vdurvol_data[0]['guest_params'] = dict()
+ vdurvol_data[0]['guest_params']['custom_meta_data'] = metadata_list
+
+ if self._vm_resp.has_field('custom_boot_data'):
+ vdur_dict['custom_boot_data'] = dict()
+ if self._vm_resp.custom_boot_data.has_field('custom_drive'):
+ vdur_dict['custom_boot_data']['custom_drive'] = self._vm_resp.custom_boot_data.custom_drive
+ if self._vm_resp.custom_boot_data.has_field('custom_meta_data'):
+ metadata_list = list()
+ for metadata_item in self._vm_resp.custom_boot_data.custom_meta_data:
+ metadata_list.append(metadata_item.as_dict())
+ vdur_dict['custom_boot_data']['custom_meta_data'] = metadata_list
+ if self._vm_resp.custom_boot_data.has_field('custom_config_files'):
+ file_list = list()
+ for file_item in self._vm_resp.custom_boot_data.custom_config_files:
+ file_list.append(file_item.as_dict())
+ vdur_dict['custom_boot_data']['custom_config_files'] = file_list
+
icp_list = []
ii_list = []
placement_groups = []
for group in self._placement_groups:
placement_groups.append(group.as_dict())
-
vdur_dict['placement_groups_info'] = placement_groups
+
return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
@property
self._log.info("Ignoring placement group with cloud construct for cloud-type: %s", cloud_type)
return
+ def process_custom_bootdata(self, vm_create_msg_dict):
+ """Process the custom boot data"""
+ if 'custom_config_files' not in vm_create_msg_dict['custom_boot_data']:
+ return
+
+ stored_package = self._vnfd_package_store.get_package(self._vnfr.vnfd_id)
+ script_extractor = rift.package.script.PackageScriptExtractor(self._log)
+ for custom_file_item in vm_create_msg_dict['custom_boot_data']['custom_config_files']:
+ if 'source' not in custom_file_item or 'dest' not in custom_file_item:
+ continue
+ source = custom_file_item['source']
+ # Find source file in scripts dir of VNFD
+ self._vnfd_package_store.refresh()
+ self._log.debug("Checking for source config file at %s", source)
+ try:
+ source_file_str = script_extractor.read_script(stored_package, source)
+ except rift.package.script.ScriptExtractionError as e:
+ raise VirtualDeploymentUnitRecordError(e)
+ # Update source file location with file contents
+ custom_file_item['source'] = source_file_str
+
+ return
+
def resmgr_msg(self, config=None):
vdu_fields = ["vm_flavor",
"guest_epa",
"vswitch_epa",
"hypervisor_epa",
- "host_epa"]
+ "host_epa",
+ "volumes",
+ "custom_boot_data"]
self._log.debug("Creating params based on VDUD: %s", self._vdud)
vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields}
vm_create_msg_dict = {
"name": self.name,
- "image_name": self.image_name,
}
+ if self.image_name is not None:
+ vm_create_msg_dict["image_name"] = self.image_name
+
if self.image_checksum is not None:
vm_create_msg_dict["image_checksum"] = self.image_checksum
if config is not None:
vm_create_msg_dict['vdu_init'] = {'userdata': config}
+ if self._mgmt_network:
+ vm_create_msg_dict['mgmt_network'] = self._mgmt_network
+
cp_list = []
for intf, cp, vlr in self._ext_intf:
cp_info = {"name": cp,
vm_create_msg_dict.update(vdu_copy_dict)
self.process_placement_groups(vm_create_msg_dict)
+ if 'custom_boot_data' in vm_create_msg_dict:
+ self.process_custom_bootdata(vm_create_msg_dict)
msg = RwResourceMgrYang.VDUEventData()
msg.event_id = self._request_id
msg.cloud_account = self.cloud_account_name
msg.request_info.from_dict(vm_create_msg_dict)
+
return msg
@asyncio.coroutine
class VirtualNetworkFunctionRecord(object):
""" Virtual Network Function Record """
- def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg):
+ def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg, mgmt_network=None):
self._dts = dts
self._log = log
self._loop = loop
self._cluster_name = cluster_name
self._vnfr_msg = vnfr_msg
self._vnfr_id = vnfr_msg.id
- self._vnfd_id = vnfr_msg.vnfd_ref
+ self._vnfd_id = vnfr_msg.vnfd.id
self._vnfm = vnfm
self._vcs_handler = vcs_handler
self._vnfr = vnfr_msg
+ self._mgmt_network = mgmt_network
- self._vnfd = None
+ self._vnfd = vnfr_msg.vnfd
self._state = VirtualNetworkFunctionRecordState.INIT
self._state_failed_reason = None
self._ext_vlrs = {} # The list of external virtual links
self._vnf_mon = None
self._config_status = vnfr_msg.config_status
self._vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(self._log)
+ self._rw_vnfd = None
+ self._vnfd_ref_count = 0
def _get_vdur_from_vdu_id(self, vdu_id):
self._log.debug("Finding vdur for vdu_id %s", vdu_id)
"FAILED": "failed", }
return op_status_map[self._state.name]
- @property
- def vnfd_xpath(self):
+ @staticmethod
+ def vnfd_xpath(vnfd_id):
""" VNFD xpath associated with this VNFR """
- return("C,/vnfd:vnfd-catalog/"
- "vnfd:vnfd[vnfd:id = '{}']".format(self._vnfd_id))
+ return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+
+ @property
+ def vnfd_ref_count(self):
+ """ Returns the VNFD reference count associated with this VNFR """
+ return self._vnfd_ref_count
+
+ def vnfd_in_use(self):
+ """ Returns whether vnfd is in use or not """
+ return True if self._vnfd_ref_count > 0 else False
+
+ def vnfd_ref(self):
+ """ Take a reference on this object """
+ self._vnfd_ref_count += 1
+ return self._vnfd_ref_count
+
+ def vnfd_unref(self):
+ """ Release reference on this object """
+ if self._vnfd_ref_count < 1:
+ msg = ("Unref on a VNFD object - vnfd id %s, vnfd_ref_count = %s" %
+ (self.vnfd.id, self._vnfd_ref_count))
+ self._log.critical(msg)
+ raise VnfRecordError(msg)
+ self._log.debug("Releasing ref on VNFD %s - curr vnfd_ref_count:%s",
+ self.vnfd.id, self._vnfd_ref_count)
+ self._vnfd_ref_count -= 1
+ return self._vnfd_ref_count
@property
def vnfd(self):
def mgmt_intf_info(self):
""" Get Management interface info for this VNFR """
- mgmt_intf_desc = self.vnfd.msg.mgmt_interface
+ mgmt_intf_desc = self.vnfd.mgmt_interface
ip_addr = None
if mgmt_intf_desc.has_field("cp"):
ip_addr = self.cp_ip_addr(mgmt_intf_desc.cp)
def msg(self):
""" Message associated with this VNFR """
vnfd_fields = ["short_name", "vendor", "description", "version"]
- vnfd_copy_dict = {k: v for k, v in self.vnfd.msg.as_dict().items() if k in vnfd_fields}
+ vnfd_copy_dict = {k: v for k, v in self.vnfd.as_dict().items() if k in vnfd_fields}
mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
ip_address, port = self.mgmt_intf_info()
"nsr_id_ref": self._vnfr_msg.nsr_id_ref,
"name": self.name,
"member_vnf_index_ref": self.member_vnf_index,
- "vnfd_ref": self.vnfd_id,
"operational_status": self.operational_status,
"operational_status_details": self._state_failed_reason,
"cloud_account": self.cloud_account_name,
vnfr_dict.update(vnfd_copy_dict)
vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr_msg.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
+
+ vnfr_msg.create_time = self._create_time
+ vnfr_msg.uptime = int(time.time()) - self._create_time
vnfr_msg.mgmt_interface = mgmt_intf
# Add all the VLRs to VNFR
vdur = vnfr_msg.vdur.add()
vdur.from_dict(vdu.msg.as_dict())
- if self.vnfd.msg.mgmt_interface.has_field('dashboard_params'):
+ if self.vnfd.mgmt_interface.has_field('dashboard_params'):
vnfr_msg.dashboard_url = self.dashboard_url
for cpr in self._cprs:
ip, cfg_port = self.mgmt_intf_info()
protocol = 'http'
http_port = 80
- if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('https'):
- if self.vnfd.msg.mgmt_interface.dashboard_params.https is True:
+ if self.vnfd.mgmt_interface.dashboard_params.has_field('https'):
+ if self.vnfd.mgmt_interface.dashboard_params.https is True:
protocol = 'https'
http_port = 443
- if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('port'):
- http_port = self.vnfd.msg.mgmt_interface.dashboard_params.port
+ if self.vnfd.mgmt_interface.dashboard_params.has_field('port'):
+ http_port = self.vnfd.mgmt_interface.dashboard_params.port
url = "{protocol}://{ip_address}:{port}/{path}".format(
protocol=protocol,
ip_address=ip,
port=http_port,
- path=self.vnfd.msg.mgmt_interface.dashboard_params.path.lstrip("/"),
+ path=self.vnfd.mgmt_interface.dashboard_params.path.lstrip("/"),
)
return url
""" Publish The VLs associated with this VNF """
self._log.debug("Publishing Internal Virtual Links for vnfd id: %s",
self.vnfd_id)
- for ivld_msg in self.vnfd.msg.internal_vld:
+ for ivld_msg in self.vnfd.internal_vld:
self._log.debug("Creating internal vld:"
" %s, int_cp_ref = %s",
ivld_msg, ivld_msg.internal_connection_point
nsr_config = yield from self.get_nsr_config()
### Step-3: Get VDU level placement groups
- for group in self.vnfd.msg.placement_groups:
+ for group in self.vnfd.placement_groups:
for member_vdu in group.member_vdus:
if member_vdu.member_vdu_ref == vdu.id:
group_info = self.resolve_placement_group_cloud_construct(group,
self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id)
- for vdu in self.vnfd.msg.vdu:
+ for vdu in self._rw_vnfd.vdu:
self._log.debug("Creating vdu: %s", vdu)
vdur_id = get_vdur_id(vdu)
vdud=vdu,
vnfr=vnfr,
mgmt_intf=self.has_mgmt_interface(vdu),
+ mgmt_network=self._mgmt_network,
cloud_account_name=self.cloud_account_name,
vnfd_package_store=self._vnfd_package_store,
vdur_id=vdur_id,
datastore.add(vdu)
# Substitute any variables contained in the cloud config script
- config = str(vdu.vdud_cloud_init)
+ config = str(vdu.vdud_cloud_init) if vdu.vdud_cloud_init is not None else ""
parts = re.split("\{\{ ([^\}]+) \}\}", config)
if len(parts) > 1:
def has_mgmt_interface(self, vdu):
# ## TODO: Support additional mgmt_interface type options
- if self.vnfd.msg.mgmt_interface.vdu_id == vdu.id:
+ if self.vnfd.mgmt_interface.vdu_id == vdu.id:
return True
return False
""" Publish the inventory associated with this VNF """
self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id)
- for component in self.vnfd.msg.component:
+ for component in self._rw_vnfd.component:
self._log.debug("Creating inventory component %s", component)
mangled_name = VcsComponent.mangle_name(component.component_name,
self.vnf_name,
def instantiate(self, xact, restart_mode=False):
""" instantiate this VNF """
self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
+ self._rw_vnfd = yield from self._vnfm.fetch_vnfd(self._vnfd_id)
@asyncio.coroutine
def fetch_vlrs():
cpr.vlr_ref = cp.vlr_ref
self._log.debug("Fetched VLR [%s] with path = [%s]", d, vlr_path)
- # Fetch the VNFD associated with the VNFR
- self._log.debug("VNFR-ID %s: Fetching vnfds", self._vnfr_id)
- self._vnfd = yield from self._vnfm.get_vnfd_ref(self._vnfd_id)
- self._log.debug("VNFR-ID %s: Fetched vnfd:%s", self._vnfr_id, self._vnfd)
+ # Increase the VNFD reference count
+ self.vnfd_ref()
- assert self.vnfd is not None
+ assert self.vnfd
# Fetch External VLRs
self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id)
self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id)
+ # create task updating uptime for this vnfr
+ self._log.debug("VNFR-ID %s: Starting task to update uptime", self._vnfr_id)
+ self._loop.create_task(self.vnfr_uptime_update(xact))
+
@asyncio.coroutine
def terminate(self, xact):
""" Terminate this virtual network function """
self._log.debug("Terminated VNF id %s", self.vnfr_id)
self.set_state(VirtualNetworkFunctionRecordState.TERMINATED)
+ @asyncio.coroutine
+ def vnfr_uptime_update(self, xact):
+ while True:
+ # Return when vnfr state is FAILED or TERMINATED etc
+ if self._state not in [VirtualNetworkFunctionRecordState.INIT,
+ VirtualNetworkFunctionRecordState.VL_INIT_PHASE,
+ VirtualNetworkFunctionRecordState.VM_INIT_PHASE,
+ VirtualNetworkFunctionRecordState.READY]:
+ return
+ yield from self.publish(xact)
+ yield from asyncio.sleep(2, loop=self._loop)
+
+
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
xact, action, scratch)
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
- # Create/Update a VNFD record
- for cfg in self._regh.get_xact_elements(xact):
- # Only interested in those VNFD cfgs whose ID was received in prepare callback
- if cfg.id in scratch.get('vnfds', []) or is_recovery:
- self._vnfm.update_vnfd(cfg)
-
- scratch.pop('vnfds', None)
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
- # Handle deletes in prepare_callback, but adds/updates in apply_callback
+ # Handle deletes in prepare_callback
if fref.is_field_deleted():
# Delete an VNFD record
self._log.debug("Deleting VNFD with id %s", msg.id)
raise VirtualNetworkFunctionDescriptorRefCountExists(err)
# Delete a VNFD record
yield from self._vnfm.delete_vnfd(msg.id)
- else:
- # Handle actual adds/updates in apply_callback,
- # just check if VNFD in use in prepare_callback
- if self._vnfm.vnfd_in_use(msg.id):
- self._log.debug("Cannot modify an VNFD in use - %s", msg)
- err = "Cannot modify an VNFD in use - %s" % msg
- raise VirtualNetworkFunctionDescriptorRefCountExists(err)
-
- # Add this VNFD to scratch to create/update in apply callback
- vnfds = scratch.setdefault('vnfds', [])
- vnfds.append(msg.id)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
)
if action == rwdts.QueryAction.CREATE:
- if not msg.has_field("vnfd_ref"):
- err = "Vnfd reference not provided"
+ if not msg.has_field("vnfd"):
+ err = "Vnfd not provided"
self._log.error(err)
raise VnfRecordError(err)
try:
yield from vnfr.terminate(xact_info.xact)
# Unref the VNFD
- vnfr.vnfd.unref()
+ vnfr.vnfd_unref()
yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr)
except Exception as e:
self._log.exception(e)
self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
-class VirtualNetworkFunctionDescriptor(object):
- """
- Virtual Network Function descriptor class
- """
-
- def __init__(self, dts, log, loop, vnfm, vnfd):
- self._dts = dts
- self._log = log
- self._loop = loop
-
- self._vnfm = vnfm
- self._vnfd = vnfd
- self._ref_count = 0
-
- @property
- def ref_count(self):
- """ Returns the reference count associated with
- this Virtual Network Function Descriptor"""
- return self._ref_count
-
- @property
- def id(self):
- """ Returns vnfd id """
- return self._vnfd.id
-
- @property
- def name(self):
- """ Returns vnfd name """
- return self._vnfd.name
-
- def in_use(self):
- """ Returns whether vnfd is in use or not """
- return True if self._ref_count > 0 else False
-
- def ref(self):
- """ Take a reference on this object """
- self._ref_count += 1
- return self._ref_count
-
- def unref(self):
- """ Release reference on this object """
- if self.ref_count < 1:
- msg = ("Unref on a VNFD object - vnfd id %s, ref_count = %s" %
- (self.id, self._ref_count))
- self._log.critical(msg)
- raise VnfRecordError(msg)
- self._log.debug("Releasing ref on VNFD %s - curr ref_count:%s",
- self.id, self.ref_count)
- self._ref_count -= 1
- return self._ref_count
-
- @property
- def msg(self):
- """ Return the message associated with this NetworkServiceDescriptor"""
- return self._vnfd
-
- @staticmethod
- def path_for_id(vnfd_id):
- """ Return path for the passed vnfd_id"""
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
-
- def path(self):
- """ Return the path associated with this NetworkServiceDescriptor"""
- return VirtualNetworkFunctionDescriptor.path_for_id(self.id)
-
- def update(self, vnfd):
- """ Update the Virtual Network Function Descriptor """
- if self.in_use():
- self._log.error("Cannot update descriptor %s in use refcnt=%d",
- self.id, self.ref_count)
-
- # The following loop is added to debug RIFT-13284
- for vnf_rec in self._vnfm._vnfrs.values():
- if vnf_rec.vnfd_id == self.id:
- self._log.error("descriptor %s in used by %s:%s",
- self.id, vnf_rec.vnfr_id, vnf_rec.msg)
- raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot update descriptor in use %s" % self.id)
- self._vnfd = vnfd
-
- def delete(self):
- """ Delete the Virtual Network Function Descriptor """
- if self.in_use():
- self._log.error("Cannot delete descriptor %s in use refcnt=%d",
- self.id)
-
- # The following loop is added to debug RIFT-13284
- for vnf_rec in self._vnfm._vnfrs.values():
- if vnf_rec.vnfd_id == self.id:
- self._log.error("descriptor %s in used by %s:%s",
- self.id, vnf_rec.vnfr_id, vnf_rec.msg)
- raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot delete descriptor in use %s" % self.id)
- self._vnfm.delete_vnfd(self.id)
-
-
class VnfdRefCountDtsHandler(object):
""" The VNFD Ref Count DTS handler """
XPATH = "D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count"
self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
+ self._vnfr_ref_handler = VnfdRefCountDtsHandler(dts, log, loop, self)
+ self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(log, dts, loop, callback=self.handle_nsr)
self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
self._vnfr_handler,
self._vcs_handler,
- VnfdRefCountDtsHandler(dts, log, loop, self)]
+ self._vnfr_ref_handler,
+ self._nsr_handler]
self._vnfrs = {}
- self._vnfds = {}
+ self._vnfds_to_vnfr = {}
+ self._nsrs = {}
@property
def vnfr_handler(self):
self._log.debug("Run VNFManager - registering static DTS handlers""")
yield from self.register()
+ def handle_nsr(self, nsr, action):
+ if action in [rwdts.QueryAction.CREATE]:
+ self._nsrs[nsr.id] = nsr
+ elif action == rwdts.QueryAction.DELETE:
+ if nsr.id in self._nsrs:
+ del self._nsrs[nsr.id]
+
+ def get_linked_mgmt_network(self, vnfr):
+ """For the given VNFR get the related mgmt network from the NSD, if
+ available.
+ """
+ vnfd_id = vnfr.vnfd.id
+ nsr_id = vnfr.nsr_id_ref
+
+ # for the given related VNFR, get the corresponding NSR-config
+ nsr_obj = None
+ try:
+ nsr_obj = self._nsrs[nsr_id]
+ except KeyError:
+ raise("Unable to find the NS with the ID: {}".format(nsr_id))
+
+ # for the related NSD check if a VLD exists such that it's a mgmt
+ # network
+ for vld in nsr_obj.nsd.vld:
+ if vld.mgmt_network:
+ return vld.name
+
+ return None
+
def get_vnfr(self, vnfr_id):
""" get VNFR by vnfr id """
self._log.info("Create VirtualNetworkFunctionRecord %s from vnfd_id: %s",
vnfr.id,
- vnfr.vnfd_ref)
+ vnfr.vnfd.id)
+
+ mgmt_network = self.get_linked_mgmt_network(vnfr)
self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord(
- self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr
+ self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr,
+ mgmt_network=mgmt_network
)
+
+ #Update ref count
+ if vnfr.vnfd.id in self._vnfds_to_vnfr:
+ self._vnfds_to_vnfr[vnfr.vnfd.id] += 1
+ else:
+ self._vnfds_to_vnfr[vnfr.vnfd.id] = 1
+
return self._vnfrs[vnfr.id]
@asyncio.coroutine
if vnfr.vnfr_id in self._vnfrs:
self._log.debug("Deleting VNFR id %s", vnfr.vnfr_id)
yield from self._vnfr_handler.delete(xact, vnfr.xpath)
+
+ if vnfr.vnfd.id in self._vnfds_to_vnfr:
+ if self._vnfds_to_vnfr[vnfr.vnfd.id]:
+ self._vnfds_to_vnfr[vnfr.vnfd.id] -= 1
+
del self._vnfrs[vnfr.vnfr_id]
@asyncio.coroutine
def fetch_vnfd(self, vnfd_id):
""" Fetch VNFDs based with the vnfd id"""
- vnfd_path = VirtualNetworkFunctionDescriptor.path_for_id(vnfd_id)
+ vnfd_path = VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id)
self._log.debug("Fetch vnfd with path %s", vnfd_path)
vnfd = None
return vnfd
- @asyncio.coroutine
- def get_vnfd_ref(self, vnfd_id):
- """ Get Virtual Network Function descriptor for the passed vnfd_id"""
- vnfd = yield from self.get_vnfd(vnfd_id)
- vnfd.ref()
- return vnfd
-
- @asyncio.coroutine
- def get_vnfd(self, vnfd_id):
- """ Get Virtual Network Function descriptor for the passed vnfd_id"""
- vnfd = None
- if vnfd_id not in self._vnfds:
- self._log.error("Cannot find VNFD id:%s", vnfd_id)
- vnfd = yield from self.fetch_vnfd(vnfd_id)
-
- if vnfd is None:
- self._log.error("Cannot find VNFD id:%s", vnfd_id)
- raise VirtualNetworkFunctionDescriptorError("Cannot find VNFD id:%s", vnfd_id)
-
- if vnfd.id != vnfd_id:
- self._log.error("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
- raise VirtualNetworkFunctionDescriptorError("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
-
- if vnfd.id not in self._vnfds:
- self.create_vnfd(vnfd)
-
- return self._vnfds[vnfd_id]
-
def vnfd_in_use(self, vnfd_id):
""" Is this VNFD in use """
self._log.debug("Is this VNFD in use - msg:%s", vnfd_id)
- if vnfd_id in self._vnfds:
- return self._vnfds[vnfd_id].in_use()
+ if vnfd_id in self._vnfds_to_vnfr:
+ return (self._vnfds_to_vnfr[vnfd_id] > 0)
return False
@asyncio.coroutine
path, msg)
yield from self.vnfr_handler.update(xact, path, msg)
- def create_vnfd(self, vnfd):
- """ Create a virtual network function descriptor """
- self._log.debug("Create virtual networkfunction descriptor - %s", vnfd)
- if vnfd.id in self._vnfds:
- self._log.error("Cannot create VNFD %s -VNFD id already exists", vnfd)
- raise VirtualNetworkFunctionDescriptorError("VNFD already exists-%s", vnfd.id)
-
- self._vnfds[vnfd.id] = VirtualNetworkFunctionDescriptor(self._dts,
- self._log,
- self._loop,
- self,
- vnfd)
- return self._vnfds[vnfd.id]
-
- def update_vnfd(self, vnfd):
- """ update the Virtual Network Function descriptor """
- self._log.debug("Update virtual network function descriptor - %s", vnfd)
-
- if vnfd.id not in self._vnfds:
- self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
- self.create_vnfd(vnfd)
- else:
- self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd)
- self._vnfds[vnfd.id].update(vnfd)
-
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
""" Delete the Virtual Network Function descriptor with the passed id """
self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id)
- if vnfd_id not in self._vnfds:
- self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id)
- raise VirtualNetworkFunctionDescriptorNotFound("Cannot find %s", vnfd_id)
-
- if self._vnfds[vnfd_id].in_use():
- self._log.debug("Cannot delete VNFD id %s reference exists %s",
- vnfd_id,
- self._vnfds[vnfd_id].ref_count)
- raise VirtualNetworkFunctionDescriptorRefCountExists(
- "Cannot delete :%s, ref_count:%s",
- vnfd_id,
- self._vnfds[vnfd_id].ref_count)
+ if vnfd_id in self._vnfds_to_vnfr:
+ if self._vnfds_to_vnfr[vnfd_id]:
+ self._log.debug("Cannot delete VNFD id %s reference exists %s",
+ vnfd_id,
+ self._vnfds_to_vnfr[vnfd_id].vnfd_ref_count)
+ raise VirtualNetworkFunctionDescriptorRefCountExists(
+ "Cannot delete :%s, ref_count:%s",
+ vnfd_id,
+ self._vnfds_to_vnfr[vnfd_id].vnfd_ref_count)
+
+ del self._vnfds_to_vnfr[vnfd_id]
# Remove any files uploaded with VNFD and stored under $RIFT_ARTIFACTS/libs/<id>
try:
shutil.rmtree(vnfd_dir, ignore_errors=True)
except Exception as e:
self._log.error("Exception in cleaning up VNFD {}: {}".
- format(self._vnfds[vnfd_id].name, e))
+ format(self._vnfds_to_vnfr[vnfd_id].vnfd.name, e))
self._log.exception(e)
- del self._vnfds[vnfd_id]
def vnfd_refcount_xpath(self, vnfd_id):
""" xpath for ref count entry """
""" Get the vnfd_list from this VNFM"""
vnfd_list = []
if vnfd_id is None or vnfd_id == "":
- for vnfd in self._vnfds.values():
+ for vnfd in self._vnfds_to_vnfr.keys():
+ vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg.vnfd_id_ref = vnfd
+ vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd]
+ vnfd_list.append((self.vnfd_refcount_xpath(vnfd), vnfd_msg))
+ elif vnfd_id in self._vnfds_to_vnfr:
vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
- vnfd_msg.vnfd_id_ref = vnfd.id
- vnfd_msg.instance_ref_count = vnfd.ref_count
- vnfd_list.append((self.vnfd_refcount_xpath(vnfd.id), vnfd_msg))
- elif vnfd_id in self._vnfds:
- vnfd_msg.vnfd_id_ref = self._vnfds[vnfd_id].id
- vnfd_msg.instance_ref_count = self._vnfds[vnfd_id].ref_count
+ vnfd_msg.vnfd_id_ref = vnfd_id
+ vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd_id]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
return vnfd_list
if vnfr.id not in const_vnfr_ids:
continue
- vnfd = get_vnfd(vnfr.vnfd_ref)
+ vnfd = get_vnfd(vnfr.vnfd.id)
yield vnfd, vnfr
if vnfr.id not in const_vnfr_ids:
continue
- vnfd = get_vnfd(vnfr.vnfd_ref)
+ vnfd = get_vnfd(vnfr.vnfd.id)
yield vnfd, vnfr
def check_configuration_on_standby(standby_ip):