FILES
rift/rwcal/openstack/__init__.py
rift/rwcal/openstack/openstack_drv.py
- rift/rwcal/openstack/openstack_utils.py
rift/rwcal/openstack/prepare_vm.py
+ rift/rwcal/openstack/keystone/__init__.py
+ rift/rwcal/openstack/keystone/keystone_drv.py
+ rift/rwcal/openstack/nova/nova_drv.py
+ rift/rwcal/openstack/nova/__init__.py
+ rift/rwcal/openstack/neutron/__init__.py
+ rift/rwcal/openstack/neutron/neutron_drv.py
+ rift/rwcal/openstack/glance/__init__.py
+ rift/rwcal/openstack/glance/glance_drv.py
+ rift/rwcal/openstack/cinder/__init__.py
+ rift/rwcal/openstack/cinder/cinder_drv.py
+ rift/rwcal/openstack/ceilometer/__init__.py
+ rift/rwcal/openstack/ceilometer/ceilometer_drv.py
+ rift/rwcal/openstack/session/__init__.py
+ rift/rwcal/openstack/session/session_drv.py
+ rift/rwcal/openstack/session/auth_drv.py
+ rift/rwcal/openstack/portchain/__init__.py
+ rift/rwcal/openstack/portchain/portchain_drv.py
+ rift/rwcal/openstack/utils/__init__.py
+ rift/rwcal/openstack/utils/flavor.py
+ rift/rwcal/openstack/utils/network.py
+ rift/rwcal/openstack/utils/compute.py
+ rift/rwcal/openstack/utils/image.py
+
PYTHON3_ONLY
COMPONENT ${PKG_LONG_NAME})
#
from .openstack_drv import (
- OpenstackDriver,
- ValidationError
- )
-from .openstack_utils import OpenstackExtraSpecUtils
+ OpenstackDriver,
+ ValidationError
+)
+
+
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .ceilometer_drv import (
+ CeilometerDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+import json
+
+from ceilometerclient import client as ceclient
+
+
+class CeilometerAPIVersionException(Exception):
+ def __init__(self, errors):
+ self.errors = errors
+ super(CeilometerAPIVersionException, self).__init__("Multiple Exception Received")
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+ for n,e in enumerate(self.errors):
+ msg += "\n"
+ msg += " {}: {}".format(n, str(e))
+ return msg
+
+class CeilometerDriver(object):
+ """
+ CeilometerDriver Class for image management
+ """
+ ### List of supported API versions in prioritized order
+ supported_versions = ["2"]
+
+ def __init__(self,
+ sess_handle,
+ region_name = 'RegionOne',
+ service_type = 'metering',
+ logger = None):
+ """
+ Constructor for CeilometerDriver class
+ Arguments:
+ sess_handle (instance of class SessionDriver)
+ region_name (string ): Region name
+ service_type(string) : Service type name
+ logger (instance of logging.Logger)
+ """
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.ceilometer')
+ logger.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._sess_handle = sess_handle
+ #### Attempt to use API versions in prioritized order defined in
+ #### CeilometerDriver.supported_versions
+ def select_version(version):
+ try:
+ self.log.info("Attempting to use Ceilometer v%s APIs", version)
+ cedrv = ceclient.Client(version=version,
+ region_name = region_name,
+ service_type = service_type,
+ session=self._sess_handle.session)
+ except Exception as e:
+ self.log.info(str(e))
+ raise
+ else:
+ self.log.info("Ceilometer API v%s selected", version)
+ return (version, cedrv)
+
+ errors = []
+ for v in CeilometerDriver.supported_versions:
+ try:
+ (self._version, self._ce_drv) = select_version(v)
+ except Exception as e:
+ errors.append(e)
+ else:
+ break
+ else:
+ raise CeilometerAPIVersionException(errors)
+
+ @property
+ def ceilometer_endpoint(self):
+ return self._ce_drv.http_client.get_endpoint()
+
+ def _get_ceilometer_connection(self):
+ """
+ Returns instance of object ceilometerclient.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._ce_drv
+
+ @property
+ def client(self):
+ """
+ Returns instance of object ceilometerclient.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._ce_drv
+
+ @property
+ def meters(self):
+ """A list of the available meters"""
+ try:
+ return self.client.meters.list()
+ except Exception as e:
+ self.log.exception("List meters operation failed. Exception: %s", str(e))
+ raise
+
+ @property
+ def alarms(self):
+ """The ceilometer client alarms manager"""
+ return self.client.alarms
+
+ def nfvi_metrics(self, vim_id):
+ """Returns a dict of NFVI metrics for a given VM
+
+ Arguments:
+ vim_id - the VIM ID of the VM to retrieve the metrics for
+
+ Returns:
+ A dict of NFVI metrics
+
+ """
+ def query_latest_sample(counter_name):
+ try:
+ filter = json.dumps({
+ "and": [
+ {"=": {"resource": vim_id}},
+ {"=": {"counter_name": counter_name}}
+ ]
+ })
+ orderby = json.dumps([{"timestamp": "DESC"}])
+ result = self.client.query_samples.query(filter=filter,
+ orderby=orderby,
+ limit=1)
+ return result[0]
+
+ except IndexError:
+ pass
+
+ except Exception as e:
+ self.log.exception("Got exception while querying ceilometer, exception details:%s", str(e))
+
+ return None
+
+ memory_usage = query_latest_sample("memory.usage")
+ disk_usage = query_latest_sample("disk.usage")
+ cpu_util = query_latest_sample("cpu_util")
+
+ metrics = dict()
+
+ if memory_usage is not None:
+ memory_usage.volume = 1e6 * memory_usage.volume
+ metrics["memory_usage"] = memory_usage.to_dict()
+
+ if disk_usage is not None:
+ metrics["disk_usage"] = disk_usage.to_dict()
+
+ if cpu_util is not None:
+ metrics["cpu_util"] = cpu_util.to_dict()
+ # RIFT-14041 when ceilometer returns value of more than 100, make it 100
+ if metrics["cpu_util"]["volume"] > 100:
+ metrics["cpu_util"]["volume"] = 100
+
+ return metrics
+
+ def query_samples(self, vim_instance_id, counter_name, limit=1):
+ """Returns a list of samples
+
+ Arguments:
+ vim_instance_id - the ID of the VIM that the samples are from
+ counter_name - the counter that the samples will come from
+ limit - a limit on the number of samples to return
+ (default: 1)
+
+ Returns:
+ A list of samples
+
+ """
+ try:
+ filter = json.dumps({
+ "and": [
+ {"=": {"resource": vim_instance_id}},
+ {"=": {"counter_name": counter_name}}
+ ]
+ })
+ try:
+ result = self.client.query_samples.query(filter=filter, limit=limit)
+ except Exception as e:
+ self.log.exception("Query samples operation failed. Exception: %s",str(e))
+ return result[-limit:]
+
+ except Exception as e:
+ self.log.exception(e)
+
+ return []
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .cinder_drv import (
+ CinderDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+from cinderclient import client as ciclient
+import cinderclient.exceptions as CinderException
+
+
+class CinderAPIVersionException(Exception):
+ def __init__(self, errors):
+ self.errors = errors
+ super(CinderAPIVersionException, self).__init__("Multiple Exception Received")
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+ for n,e in enumerate(self.errors):
+ msg += "\n"
+ msg += " {}: {}".format(n, str(e))
+ return msg
+
+class CinderDriver(object):
+ """
+ CinderDriver Class for image management
+ """
+ ### List of supported API versions in prioritized order
+ supported_versions = ["2"]
+
+ def __init__(self,
+ sess_handle,
+ region_name = 'RegionOne',
+ service_type = 'volume',
+ logger = None):
+ """
+ Constructor for CinderDriver class
+ Arguments:
+ sess_handle (instance of class SessionDriver)
+ region_name (string ): Region name
+ service_type(string) : Service type name
+ logger (instance of logging.Logger)
+ """
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.cinder')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._sess_handle = sess_handle
+ #### Attempt to use API versions in prioritized order defined in
+ #### CinderDriver.supported_versions
+ def select_version(version):
+ try:
+ self.log.info("Attempting to use Cinder v%s APIs", version)
+ cidrv = ciclient.Client(version=version,
+ region_name = region_name,
+ service_type = service_type,
+ session=self._sess_handle.session)
+ except Exception as e:
+ self.log.info(str(e))
+ raise
+ else:
+ self.log.info("Cinder API v%s selected", version)
+ return (version, cidrv)
+
+ errors = []
+ for v in CinderDriver.supported_versions:
+ try:
+ (self._version, self._ci_drv) = select_version(v)
+ except Exception as e:
+ errors.append(e)
+ else:
+ break
+ else:
+ raise CinderAPIVersionException(errors)
+
+ @property
+ def cinder_endpoint(self):
+ return self._ci_drv.client.get_endpoint()
+
+ @property
+ def project_id(self):
+ return self._sess_handle.project_id
+
+ @property
+ def quota(self):
+ """
+ Returns CinderDriver Quota (a dictionary) for project
+ """
+ try:
+ quota = self._ci_drv.quotas.get(self.project_id)
+ except Exception as e:
+ self.log.exception("Get Cinder quota operation failed. Exception: %s", str(e))
+ raise
+ return quota
+
+ def _get_cinder_connection(self):
+ """
+ Returns instance of object cinderclient.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._ci_drv
+
+ def volume_list(self):
+ """
+ Returns list of dictionaries. Each dictionary contains attributes associated with
+ volumes
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ volumes = []
+ try:
+ volume_info = self._ci_drv.volumes.list()
+ except Exception as e:
+ self.log.error("List volumes operation failed. Exception: %s", str(e))
+ raise
+ volumes = [ volume for volume in volume_info ]
+ return volumes
+
+ def volume_get(self, volume_id):
+ """
+ Get details volume
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ try:
+ vol = self._ci_drv.volumes.get(volume_id)
+ except Exception as e:
+ self.log.error("Get volume operation failed. Exception: %s", str(e))
+ raise
+ return vol
+
+ def volume_set_metadata(self, volume_id, metadata):
+ """
+ Set metadata for volume
+ Metadata is a dictionary of key-value pairs
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ try:
+ self._ci_drv.volumes.set_metadata(volume_id, metadata)
+ except Exception as e:
+ self.log.error("Set metadata operation failed. Exception: %s", str(e))
+ raise
+
+ def volume_delete_metadata(self, volume_id, metadata):
+ """
+ Delete metadata for volume
+ Metadata is a dictionary of key-value pairs
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ try:
+ self._ci_drv.volumes.delete_metadata(volume_id, metadata)
+ except Exception as e:
+ self.log.error("Delete metadata operation failed. Exception: %s", str(e))
+ raise
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .glance_drv import (
+ GlanceDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+from glanceclient import client as glclient
+import glanceclient.exc as GlanceException
+import time
+
+
+
+class GlanceAPIVersionException(Exception):
+ def __init__(self, errors):
+ self.errors = errors
+ super(GlanceAPIVersionException, self).__init__("Multiple Exception Received")
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+ for n,e in enumerate(self.errors):
+ msg += "\n"
+ msg += " {}: {}".format(n, str(e))
+ return msg
+
+class GlanceDriver(object):
+ """
+ GlanceDriver Class for image management
+ """
+ ### List of supported API versions in prioritized order
+ supported_versions = ["2"]
+
+ def __init__(self,
+ sess_handle,
+ region_name = 'RegionOne',
+ service_type = 'image',
+ logger = None):
+ """
+ Constructor for GlanceDriver class
+ Arguments:
+ sess_handle (instance of class SessionDriver)
+ region_name (string ): Region name
+ service_type(string) : Service type name
+ logger (instance of logging.Logger)
+ """
+ self._sess_handle = sess_handle
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.glance')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+
+ #### Attempt to use API versions in prioritized order defined in
+ #### GlanceDriver.supported_versions
+ def select_version(version):
+ try:
+ self.log.info("Attempting to use Glance v%s APIs", version)
+ gldrv = glclient.Client(version = version,
+ region_name = region_name,
+ service_type = service_type,
+ session=self._sess_handle.session)
+ except Exception as e:
+ self.log.info(str(e))
+ raise
+ else:
+ self.log.info("Glance API v%s selected", version)
+ return (version, gldrv)
+
+ errors = []
+ for v in GlanceDriver.supported_versions:
+ try:
+ (self._version, self._gl_drv) = select_version(v)
+ except Exception as e:
+ errors.append(e)
+ else:
+ break
+ else:
+ raise GlanceAPIVersionException(errors)
+
+ @property
+ def glance_endpoint(self):
+ return self._gl_drv.http_client.get_endpoint()
+
+ @property
+ def project_id(self):
+ return self._sess_handle.project_id
+
+ def _get_glance_connection(self):
+ """
+ Returns instance of object glanceclient.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._gl_drv
+
+ def image_list(self):
+ """
+ Returns list of dictionaries. Each dictionary contains attributes associated with
+ image
+
+ Arguments: None
+
+ Returns: List of dictionaries.
+ """
+ images = []
+ try:
+ image_info = self._gl_drv.images.list()
+ except Exception as e:
+ self.log.exception("List Image operation failed. Exception: %s", str(e))
+ raise
+ images = [ img for img in image_info ]
+ return images
+
+ def image_create(self, **kwargs):
+ """
+ Creates an image
+ Arguments:
+ A dictionary of kwargs with following keys
+ {
+ 'name'(string) : Name of the image
+ 'location'(string) : URL (http://....) where image is located
+ 'disk_format'(string) : Disk format
+ Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
+ 'container_format'(string): Container format
+ Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
+ 'tags' : A list of user tags
+ 'checksum' : The image md5 checksum
+ }
+ Returns:
+ image_id (string) : UUID of the image
+
+ """
+ try:
+ image = self._gl_drv.images.create(**kwargs)
+ except Exception as e:
+ self.log.exception("Create Image operation failed. Exception: %s", str(e))
+ raise
+
+ return image.id
+
+ def image_upload(self, image_id, fd):
+ """
+ Upload the image
+
+ Arguments:
+ image_id: UUID of the image
+ fd : File descriptor for the image file
+ Returns: None
+ """
+ try:
+ self._gl_drv.images.upload(image_id, fd)
+ except Exception as e:
+ self.log.exception("Image upload operation failed. Exception: %s",str(e))
+ raise
+
+ def image_add_location(self, image_id, location, metadata):
+ """
+ Add image URL location
+
+ Arguments:
+ image_id : UUID of the image
+ location : http URL for the image
+
+ Returns: None
+ """
+ try:
+ self._gl_drv.images.add_location(image_id, location, metadata)
+ except Exception as e:
+ self.log.exception("Image location add operation failed. Exception: %s",str(e))
+ raise
+
+ def image_update(self, image_id, remove_props = None, **kwargs):
+ """
+ Update an image
+
+ Arguments:
+ image_id: UUID of the image
+ remove_props: list of property names to remove
+ [
+ 'my_custom_property1',
+ 'my_custom_property2'
+ ]
+ kwargs: A dctionary of kwargs with the image attributes and their new values
+ {
+ 'my_custom_property'(name of property) : Value of the custom property
+ }
+
+ If remove_props is not None, it is assumed that the function is called to
+ remove the specified property from the image, and kwargs is None.
+ Otherwise, the image properties are updated with kwargs. Its either-or.
+ """
+ assert image_id == self._image_get(image_id)['id']
+ try:
+ if remove_props is not None:
+ self._gl_drv.images.update(image_id, remove_props=remove_props)
+ else:
+ self._gl_drv.images.update(image_id, **kwargs)
+ except Exception as e:
+ self.log.exception("Update Image operation failed for image_id : %s. Exception: %s",image_id, str(e))
+ raise
+
+ def image_delete(self, image_id):
+ """
+ Delete an image
+
+ Arguments:
+ image_id: UUID of the image
+
+ Returns: None
+
+ """
+ assert image_id == self._image_get(image_id)['id']
+ try:
+ self._gl_drv.images.delete(image_id)
+ except Exception as e:
+ self.log.exception("Delete Image operation failed for image_id : %s. Exception: %s",image_id, str(e))
+ raise
+
+
+ def _image_get(self, image_id):
+ """
+ Returns a dictionary object of VM image attributes
+
+ Arguments:
+ image_id (string): UUID of the image
+
+ Returns:
+ A dictionary of the image attributes
+ """
+ max_retry = 5
+ try:
+ image = self._gl_drv.images.get(image_id)
+ except GlanceException.HTTPBadRequest as e:
+ # RIFT-14241: The get image request occasionally returns the below message. Retry in case of bad request exception.
+ # Error code 400.: Message: Bad request syntax ('0').: Error code explanation: 400 = Bad request syntax or unsupported method. (HTTP 400)
+ self.log.warning("Got bad request response during get_image request. Retrying.")
+ if max_retry > 0:
+ max_retry -= 1
+ time.sleep(2)
+ image = self._gl_drv.images.get(image_id)
+ else:
+ self.log.exception("Get Image operation failed for image_id : %s. Exception: %s", image_id, str(e))
+ raise
+ except Exception as e:
+ self.log.exception("Get Image operation failed for image_id : %s. Exception: %s", image_id, str(e))
+ raise
+
+ return image
+
+ def image_get(self, image_id):
+ """
+ Returns a dictionary object of VM image attributes
+
+ Arguments:
+ image_id (string): UUID of the image
+
+ Returns:
+ A dictionary of the image attributes
+ """
+ return self._image_get(image_id)
+
+ def image_verify(self, image_id):
+ """
+ Verifies if image with image-id exists and is in active state
+
+ Arguments:
+ image_id(string): UUID of the image
+
+ Returns:
+ None
+ Raises except if image not found or not in active state
+ """
+ img = self.image_get(image_id)
+ if img['status'] != 'active':
+ raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s"
+ %(img['id'], img['status']))
+
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .keystone_drv import (
+ KeystoneDriver,
+ KeystoneVersionDiscover
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+from keystoneclient import client as ksclient
+from keystoneclient import discover
+import keystoneclient.exceptions as KeystoneExceptions
+
+
+class KsDrvAPIVersionException(Exception):
+ def __init__(self, errors):
+ self.errors = errors
+ super(KsDrvAPIVersionException, self).__init__("Multiple Exception Received")
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ msg = "{} : Following Exception(s) have occured during keystone API discovery".format(self.__class__)
+ for n,e in enumerate(self.errors):
+ msg += "\n"
+ msg += " {}: {}".format(n, str(e))
+ return msg
+
+class KeystoneVersionDiscover(object):
+ """
+ Class for keystone version discovery
+ """
+ supported_versions = [(2, ), (3, )]
+
+ def __init__(self, auth_url, logger = None):
+ """
+ Constructor for class
+ Arguments
+ auth_url(string): Keystone Auth URL
+ logger (instance of logging.Logger)
+ """
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.keystone')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ try:
+ self._discover = discover.Discover(auth_url=auth_url)
+ except Exception as e:
+ self.log.exception(str(e))
+ self._discover = None
+ raise
+
+ def get_version(self):
+ if self._discover:
+ for v in KeystoneVersionDiscover.supported_versions:
+ try:
+ rsp = self._discover._calculate_version(v, unstable=False)
+ except KeystoneExceptions.VersionNotAvailable as e:
+ self.log.debug(str(e))
+ self.log.info("Keystone API version %d not available", v[0])
+ else:
+ (major, minor) = rsp['version']
+ self.log.info("Found Keystone API major version: %d, minor version: %d", major, minor)
+ return major, minor
+ raise KeystoneExceptions.NotFound("No supported keystone API version found")
+
+
+
+class KeystoneDriver(object):
+ """
+ Driver class for openstack keystone
+ """
+ ### List of supported API versions in prioritized order
+ def __init__(self,
+ version,
+ sess_handle,
+ logger = None):
+ """
+ Constructor for KeystoneDriver class
+ Arguments:
+ version(str): Keystone API version
+ sess_handle (instance of class SessionDriver)
+ logger (instance of logging.Logger)
+ """
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.keystone')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._version = int(float(version))
+ self._sess = sess_handle
+ self._ks_drv = ksclient.Client(version = (self._version, ),
+ session = sess_handle.session)
+
+ @property
+ def keystone_endpoint(self):
+ return self._sess.auth_url
+
+ def _get_keystone_connection(self):
+ """
+ Returns instance of object keystoneclient.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._ks_drv
+
+ def list_users(self):
+ """
+ Returns list of users
+ """
+ return self._ks_drv.users.list()
+
+ def list_projects(self):
+ """
+ Returns list of projects
+ """
+ return self._ks_drv.projects.list()
+
+ def list_roles(self):
+ """
+ Returns list of roles
+ """
+ return self._ks_drv.roles.list()
+
+ def list_regions(self):
+ """
+ Returns list of Regions
+ """
+ return self._ks_drv.regions.list()
+
+ def list_domains(self):
+ """
+ Returns list of domains
+ """
+ return self._ks_drv.domains.list()
+
+
+
+
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .neutron_drv import (
+ NeutronDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+import ipaddress
+from neutronclient.neutron import client as ntclient
+
+import neutronclient.common.exceptions as NeutronException
+
+
+class NeutronAPIVersionException(Exception):
+ def __init__(self, errors):
+ self.errors = errors
+ super(NeutronAPIVersionException, self).__init__("Multiple Exception Received")
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+ for n,e in enumerate(self.errors):
+ msg += "\n"
+ msg += " {}: {}".format(n, str(e))
+ return msg
+
+
+class NeutronDriver(object):
+ """
+ NeutronDriver Class for network orchestration
+ """
+ ### List of supported API versions in prioritized order
+ supported_versions = ["2"]
+
+ def __init__(self,
+ sess_handle,
+ region_name = 'RegionOne',
+ service_type = 'network',
+ logger = None):
+ """
+ Constructor for NeutronDriver class
+ Arguments:
+ sess_handle (instance of class SessionDriver)
+ region_name (string): Region Name
+ service_type(string): Type of service in service catalog
+ logger (instance of logging.Logger)
+ """
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.neutron')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._sess_handle = sess_handle
+
+ #### Attempt to use API versions in prioritized order defined in
+ #### NeutronDriver.supported_versions
+ def select_version(version):
+ try:
+ self.log.info("Attempting to use Neutron v%s APIs", version)
+ ntdrv = ntclient.Client(api_version = version,
+ region_name = region_name,
+ service_type = service_type,
+ session = self._sess_handle.session,
+ logger = self.log)
+ except Exception as e:
+ self.log.info(str(e))
+ raise
+ else:
+ self.log.info("Neutron API v%s selected", version)
+ return (version, ntdrv)
+
+ errors = []
+ for v in NeutronDriver.supported_versions:
+ try:
+ (self._version, self._nt_drv) = select_version(v)
+ except Exception as e:
+ errors.append(e)
+ else:
+ break
+ else:
+ raise NeutronAPIVersionException(errors)
+
+ @property
+ def neutron_endpoint(self):
+ return self._nt_drv.get_auth_info()['endpoint_url']
+
+ @property
+ def project_id(self):
+ return self._sess_handle.project_id
+
+ @property
+ def neutron_quota(self):
+ """
+ Returns Neutron Quota (a dictionary) for project
+ """
+ try:
+ quota = self._nt_drv.show_quota(self.project_id)
+ except Exception as e:
+ self.log.exception("Get Neutron quota operation failed. Exception: %s", str(e))
+ raise
+ return quota
+
+ def extensions_list(self):
+ """
+ Returns a list of available neutron extensions.
+ Arguments:
+ None
+ Returns:
+ A list of dictionaries. Each dictionary contains attributes for a single Neutron extension
+ """
+ try:
+ extensions = self._nt_drv.list_extensions()
+ except Exception as e:
+ self.log.exception("List extension operation failed. Exception: %s", str(e))
+ raise
+ if 'extensions' in extensions:
+ return extensions['extensions']
+ return list()
+
+
+ def _get_neutron_connection(self):
+ """
+ Returns instance of object neutronclient.neutron.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._nt_drv
+
+ def _network_find(self, **kwargs):
+ """
+ Returns a network object dictionary based on the filters provided in kwargs
+
+ Arguments:
+ kwargs (dictionary): A dictionary of key-value pair filters
+
+ Returns:
+ One or more dictionary object associated with network
+ """
+ try:
+ networks = self._nt_drv.list_networks(**kwargs)['networks']
+ except Exception as e:
+ self.log.exception("List network operation failed. Exception: %s", str(e))
+ raise
+ return networks
+
+ def network_list(self):
+ """
+ Returns list of dictionaries. Each dictionary contains the attributes for a network
+ under project
+
+ Arguments: None
+
+ Returns:
+ A list of dictionaries
+ """
+ return self._network_find(**{'tenant_id':self.project_id}) + self._network_find(**{'shared':True})
+
+
+ def network_create(self, **kwargs):
+ """
+ Creates a new network for the project
+
+ Arguments:
+ A dictionary with following key-values
+ {
+ name (string) : Name of the network
+ admin_state_up(Boolean) : True/False (Defaults: True)
+ external_router(Boolean) : Connectivity with external router. True/False (Defaults: False)
+ shared(Boolean) : Shared among tenants. True/False (Defaults: False)
+ physical_network(string) : The physical network where this network object is implemented (optional).
+ network_type : The type of physical network that maps to this network resource (optional).
+ Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
+ segmentation_id : An isolated segment on the physical network. The network_type attribute
+ defines the segmentation model. For example, if the network_type value
+ is vlan, this ID is a vlan identifier. If the network_type value is gre,
+ this ID is a gre key.
+ }
+ """
+ params = {'network':
+ {'name' : kwargs['name'],
+ 'admin_state_up' : kwargs['admin_state_up'],
+ 'tenant_id' : self.project_id,
+ 'shared' : kwargs['shared'],
+ #'port_security_enabled': port_security_enabled,
+ 'router:external' : kwargs['external_router']}}
+
+ if 'physical_network' in kwargs:
+ params['network']['provider:physical_network'] = kwargs['physical_network']
+ if 'network_type' in kwargs:
+ params['network']['provider:network_type'] = kwargs['network_type']
+ if 'segmentation_id' in kwargs:
+ params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
+
+ try:
+ self.log.debug("Calling neutron create_network() with params: %s", str(params))
+ net = self._nt_drv.create_network(params)
+ except Exception as e:
+ self.log.exception("Create Network operation failed. Exception: %s", str(e))
+ raise
+
+ network_id = net['network']['id']
+ if not network_id:
+ raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
+
+ return network_id
+
+ def network_delete(self, network_id):
+ """
+ Deletes a network identified by network_id
+
+ Arguments:
+ network_id (string): UUID of the network
+
+ Returns: None
+ """
+ try:
+ self._nt_drv.delete_network(network_id)
+ except Exception as e:
+ self.log.exception("Delete Network operation failed. Exception: %s",str(e))
+ raise
+
+
+ def network_get(self, network_id='', network_name=''):
+ """
+ Returns a dictionary object describing the attributes of the network
+
+ Arguments:
+ network_id (string): UUID of the network
+
+ Returns:
+ A dictionary object of the network attributes
+ """
+ networks = self._network_find(**{'id': network_id, 'name': network_name})
+ if not networks:
+ raise NeutronException.NotFound("Could not find network. Network id: %s, Network name: %s " %(network_id, network_name))
+ return networks[0]
+
+
+ def subnet_create(self, **kwargs):
+ """
+ Creates a subnet on the network
+
+ Arguments:
+ A dictionary with following key value pairs
+ {
+ network_id(string) : UUID of the network where subnet needs to be created
+ subnet_cidr(string) : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
+ ip_version (integer): 4 for IPv4 and 6 for IPv6
+
+ }
+
+ Returns:
+ subnet_id (string): UUID of the created subnet
+ """
+ params = {}
+ params['network_id'] = kwargs['network_id']
+ params['ip_version'] = kwargs['ip_version']
+
+ # if params['ip_version'] == 6:
+ # assert 0, "IPv6 is not supported"
+
+ if 'subnetpool_id' in kwargs:
+ params['subnetpool_id'] = kwargs['subnetpool_id']
+ else:
+ params['cidr'] = kwargs['cidr']
+
+ if 'gateway_ip' in kwargs:
+ params['gateway_ip'] = kwargs['gateway_ip']
+ else:
+ params['gateway_ip'] = None
+
+ if 'dhcp_params' in kwargs:
+ params['enable_dhcp'] = kwargs['dhcp_params']['enable_dhcp']
+ if 'start_address' in kwargs['dhcp_params'] and 'count' in kwargs['dhcp_params']:
+ end_address = (ipaddress.IPv4Address(kwargs['dhcp_params']['start_address']) + kwargs['dhcp_params']['count']).compressed
+ params['allocation_pools'] = [ {'start': kwargs['dhcp_params']['start_address'] ,
+ 'end' : end_address} ]
+
+ if 'dns_server' in kwargs:
+ params['dns_nameservers'] = []
+ for server in kwargs['dns_server']:
+ params['dns_nameservers'].append(server)
+
+ try:
+ subnet = self._nt_drv.create_subnet({'subnets': [params]})
+ except Exception as e:
+ self.log.exception("Create Subnet operation failed. Exception: %s",str(e))
+ raise
+
+ return subnet['subnets'][0]['id']
+
+ def subnet_list(self, **kwargs):
+ """
+ Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
+
+ Arguments: None
+
+ Returns:
+ A dictionary of the objects of subnet attributes
+ """
+ try:
+ subnets = self._nt_drv.list_subnets(**kwargs)['subnets']
+ except Exception as e:
+ self.log.exception("List Subnet operation failed. Exception: %s", str(e))
+ raise
+ return subnets
+
+ def _subnet_get(self, subnet_id):
+ """
+ Returns a dictionary object describing the attributes of a subnet.
+
+ Arguments:
+ subnet_id (string): UUID of the subnet
+
+ Returns:
+ A dictionary object of the subnet attributes
+ """
+ subnets = self._nt_drv.list_subnets(id=subnet_id)
+ if not subnets['subnets']:
+ self.log.error("Get subnet operation failed for subnet_id: %s", subnet_id)
+ #raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
+ return {'cidr': ''}
+ else:
+ return subnets['subnets'][0]
+
+ def subnet_get(self, subnet_id):
+ """
+ Returns a dictionary object describing the attributes of a subnet.
+
+ Arguments:
+ subnet_id (string): UUID of the subnet
+
+ Returns:
+ A dictionary object of the subnet attributes
+ """
+ return self._subnet_get(subnet_id)
+
+ def subnet_delete(self, subnet_id):
+ """
+ Deletes a subnet identified by subnet_id
+
+ Arguments:
+ subnet_id (string): UUID of the subnet to be deleted
+
+ Returns: None
+ """
+ assert subnet_id == self._subnet_get(self,subnet_id)
+ try:
+ self._nt_drv.delete_subnet(subnet_id)
+ except Exception as e:
+ self.log.exception("Delete Subnet operation failed for subnet_id : %s. Exception: %s", subnet_id, str(e))
+ raise
+
+ def port_list(self, **kwargs):
+ """
+ Returns a list of dictionaries. Each dictionary contains attributes describing the port
+
+ Arguments:
+ kwargs (dictionary): A dictionary for filters for port_list operation
+
+ Returns:
+ A dictionary of the objects of port attributes
+
+ """
+ ports = []
+
+ kwargs['tenant_id'] = self.project_id
+
+ try:
+ ports = self._nt_drv.list_ports(**kwargs)
+ except Exception as e:
+ self.log.exception("List Port operation failed. Exception: %s",str(e))
+ raise
+ return ports['ports']
+
+ def port_create(self, ports):
+ """
+ Create a port in network
+
+ Arguments:
+ Ports
+ List of dictionaries of following
+ {
+ name (string) : Name of the port
+ network_id(string) : UUID of the network_id identifying the network to which port belongs
+ ip_address(string) : (Optional) Static IP address to assign to the port
+ vnic_type(string) : Possible values are "normal", "direct", "macvtap"
+ admin_state_up : True/False
+ port_security_enabled : True/False
+ security_groups : A List of Neutron security group Ids
+ }
+ Returns:
+ A list of port_id (string)
+ """
+ params = dict()
+ params['ports'] = ports
+ self.log.debug("Port create params: {}".format(params))
+ try:
+ ports = self._nt_drv.create_port(params)
+ except Exception as e:
+ self.log.exception("Ports Create operation failed. Exception: %s",str(e))
+ raise
+ return [ p['id'] for p in ports['ports'] ]
+
+
+ def port_update(self, port_id, no_security_groups=None,port_security_enabled=None):
+ """
+ Update a port in network
+ """
+ params = {}
+ params["port"] = {}
+ if no_security_groups:
+ params["port"]["security_groups"] = []
+ if port_security_enabled == False:
+ params["port"]["port_security_enabled"] = False
+ elif port_security_enabled == True:
+ params["port"]["port_security_enabled"] = True
+
+ try:
+ port = self._nt_drv.update_port(port_id,params)
+ except Exception as e:
+ self.log.exception("Port Update operation failed. Exception: %s", str(e))
+ raise
+ return port['port']['id']
+
+ def _port_get(self, port_id):
+ """
+ Returns a dictionary object describing the attributes of the port
+
+ Arguments:
+ port_id (string): UUID of the port
+
+ Returns:
+ A dictionary object of the port attributes
+ """
+ port = self._nt_drv.list_ports(id=port_id)['ports']
+ if not port:
+ raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
+ return port[0]
+
+ def port_get(self, port_id):
+ """
+ Returns a dictionary object describing the attributes of the port
+
+ Arguments:
+ port_id (string): UUID of the port
+
+ Returns:
+ A dictionary object of the port attributes
+ """
+ return self._port_get(port_id)
+
+ def port_delete(self, port_id):
+ """
+ Deletes a port identified by port_id
+
+ Arguments:
+ port_id (string) : UUID of the port
+
+ Returns: None
+ """
+ assert port_id == self._port_get(port_id)['id']
+ try:
+ self._nt_drv.delete_port(port_id)
+ except Exception as e:
+ self.log.exception("Port Delete operation failed for port_id : %s. Exception: %s",port_id, str(e))
+ raise
+
+ def security_group_list(self, **kwargs):
+ """
+ Returns a list of dictionaries. Each dictionary contains attributes describing the security group
+
+ Arguments:
+ None
+
+ Returns:
+ A dictionary of the objects of security group attributes
+ """
+ try:
+ kwargs['tenant_id'] = self.project_id
+ group_list = self._nt_drv.list_security_groups(**kwargs)
+ except Exception as e:
+ self.log.exception("List Security group operation, Exception: %s", str(e))
+ raise
+ return group_list['security_groups']
+
+
+ def subnetpool_list(self, **kwargs):
+ """
+ Returns a list of dictionaries. Each dictionary contains attributes describing a subnet prefix pool
+
+ Arguments:
+ None
+
+ Returns:
+ A dictionary of the objects of subnet prefix pool
+ """
+ try:
+ pool_list = self._nt_drv.list_subnetpools(**kwargs)
+ except Exception as e:
+ self.log.exception("List SubnetPool operation, Exception: %s",str(e))
+ raise
+
+ if 'subnetpools' in pool_list:
+ return pool_list['subnetpools']
+ else:
+ return []
+
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .nova_drv import (
+ NovaDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+from novaclient import client as nvclient
+
+
+class NovaDrvAPIVersionException(Exception):
+ def __init__(self, errors):
+ self.errors = errors
+ super(NovaDrvAPIVersionException, self).__init__("Multiple Exception Received")
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ msg = "{} : Following Exception(s) have occured during Nova API discovery".format(self.__class__)
+ for n,e in enumerate(self.errors):
+ msg += "\n"
+ msg += " {}: {}".format(n, str(e))
+ return msg
+
+
+class NovaDriver(object):
+ """
+ NovaDriver Class for compute orchestration
+ """
+ ### List of supported API versions in prioritized order
+ supported_versions = ["2.1", "2.0"]
+
+ def __init__(self,
+ sess_handle,
+ region_name = 'RegionOne',
+ service_type = 'compute',
+ logger = None):
+ """
+ Constructor for NovaDriver class
+ Arguments:
+ sess_handle (instance of class SessionDriver)
+ region_name (string): Region Name
+ service_type(string): Type of service in service catalog
+ logger (instance of logging.Logger)
+ """
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.nova')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._sess_handle = sess_handle
+
+ #### Attempt to use API versions in prioritized order defined in
+ #### NovaDriver.supported_versions
+ def select_version(version):
+ try:
+ self.log.info("Attempting to use Nova v%s APIs", version)
+ nvdrv = nvclient.Client(version=version,
+ region_name = region_name,
+ service_type = service_type,
+ session = self._sess_handle.session,
+ logger = self.log)
+ except Exception as e:
+ self.log.info(str(e))
+ raise
+ else:
+ self.log.info("Nova API v%s selected", version)
+ return (version, nvdrv)
+
+ errors = []
+ for v in NovaDriver.supported_versions:
+ try:
+ (self._version, self._nv_drv) = select_version(v)
+ except Exception as e:
+ errors.append(e)
+ else:
+ break
+ else:
+ raise NovaDrvAPIVersionException(errors)
+
+ @property
+ def project_id(self):
+ return self._sess_handle.project_id
+
+ @property
+ def nova_endpoint(self):
+ return self._nv_drv.client.get_endpoint()
+
+ @property
+ def nova_quota(self):
+ """
+ Returns Nova Quota (a dictionary) for project
+ """
+ try:
+ quota = self._nv_drv.quotas.get(self.project_id)
+ except Exception as e:
+ self.log.exception("Get Nova quota operation failed. Exception: %s", str(e))
+ raise
+ return quota.to_dict()
+
+ def extensions_list(self):
+ """
+ Returns a list of available nova extensions.
+ Arguments:
+ None
+ Returns:
+ A list of dictionaries. Each dictionary contains attributes for a single NOVA extension
+ """
+ try:
+ extensions = self._nv_drv.list_extensions.show_all()
+ except Exception as e:
+ self.log.exception("List extension operation failed. Exception: %s", str(e))
+ raise
+ return [ ext.to_dict() for ext in extensions ]
+
+
+ def _get_nova_connection(self):
+ """
+ Returns instance of object novaclient.client.Client
+ Use for DEBUG ONLY
+ """
+ return self._nv_drv
+
+ def _flavor_extra_spec_get(self, flavor):
+ """
+ Get extra_specs associated with a flavor
+ Arguments:
+ flavor: Object of novaclient.v2.flavors.Flavor
+
+ Returns:
+ A dictionary of extra_specs (key-value pairs)
+ """
+ try:
+ extra_specs = flavor.get_keys()
+ except Exception as e:
+ self.log.exception("Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s",
+ flavor.id, str(e))
+ raise
+ return extra_specs
+
+ def _flavor_get(self, flavor_id):
+ """
+ Get flavor by flavor_id
+ Arguments:
+ flavor_id(string): UUID of flavor_id
+
+ Returns:
+ dictionary of flavor parameters
+ """
+ try:
+ flavor = self._nv_drv.flavors.get(flavor_id)
+ except Exception as e:
+ self.log.exception("Did not find flavor with flavor_id : %s. Exception: %s",flavor_id, str(e))
+ raise
+ response = flavor.to_dict()
+ response['extra_specs'] = self._flavor_extra_spec_get(flavor)
+ return response
+
+ try:
+ extra_specs = flavor.get_keys()
+ except Exception as e:
+ self.log.exception("Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s",
+ flavor_id, str(e))
+ raise
+
+ response = flavor.to_dict()
+ assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
+ response['extra_specs'] = extra_specs
+ return response
+
+ def flavor_get(self, flavor_id):
+ """
+ Get flavor by flavor_id
+ Arguments:
+ flavor_id(string): UUID of flavor_id
+
+ Returns:
+ dictionary of flavor parameters
+ """
+ return self._flavor_get(flavor_id)
+
+ def flavor_find(self, **kwargs):
+ """
+ Returns list of all flavors (dictionary) matching the filters provided in kwargs
+
+ Arguments:
+ A dictionary in following keys
+ {
+ "vcpus": Number of vcpus required
+ "ram" : Memory in MB
+ "disk" : Secondary storage in GB
+ }
+ Returns:
+ A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+ """
+ try:
+ flavor_list = self._nv_drv.flavors.findall(**kwargs)
+ except Exception as e:
+ self.log.exception("Find Flavor operation failed. Exception: %s",str(e))
+ raise
+
+ flavor_info = list()
+ for f in flavor_list:
+ flavor = f.to_dict()
+ flavor['extra_specs'] = self._flavor_extra_spec_get(f)
+ flavor_info.append(flavor)
+
+ return flavor_info
+
+ def flavor_list(self):
+ """
+ Returns list of all flavors (dictionary per flavor)
+
+ Arguments:
+ None
+ Returns:
+ A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+ """
+ flavors = []
+ flavor_info = []
+
+ try:
+ flavors = self._nv_drv.flavors.list()
+ except Exception as e:
+ self.log.exception("List Flavor operation failed. Exception: %s",str(e))
+ raise
+ if flavors:
+ flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
+ return flavor_info
+
+ def flavor_create(self, name, ram, vcpu, disk, extra_specs):
+ """
+ Create a new flavor
+
+ Arguments:
+ name (string): Name of the new flavor
+ ram (int) : Memory in MB
+ vcpus (int) : Number of VCPUs
+ disk (int) : Secondary storage size in GB
+ extra_specs (dictionary): EPA attributes dictionary
+
+ Returns:
+ flavor_id (string): UUID of flavor created
+ """
+ try:
+ flavor = self._nv_drv.flavors.create(name = name,
+ ram = ram,
+ vcpus = vcpu,
+ disk = disk,
+ flavorid = 'auto',
+ ephemeral = 0,
+ swap = 0,
+ rxtx_factor = 1.0,
+ is_public = True)
+ except Exception as e:
+ self.log.exception("Create Flavor operation failed. Exception: %s",str(e))
+ raise
+
+ if extra_specs:
+ try:
+ flavor.set_keys(extra_specs)
+ except Exception as e:
+ self.log.exception("Set Key operation failed for flavor: %s. Exception: %s",
+ flavor.id, str(e))
+ raise
+ return flavor.id
+
+ def flavor_delete(self, flavor_id):
+ """
+ Deletes a flavor identified by flavor_id
+
+ Arguments:
+ flavor_id (string): UUID of flavor to be deleted
+
+ Returns: None
+ """
+ assert flavor_id == self._flavor_get(flavor_id)['id']
+ try:
+ self._nv_drv.flavors.delete(flavor_id)
+ except Exception as e:
+ self.log.exception("Delete flavor operation failed for flavor: %s. Exception: %s",
+ flavor_id, str(e))
+ raise
+
+
+ def server_list(self):
+ """
+ Returns a list of available VMs for the project
+
+ Arguments: None
+
+ Returns:
+ A list of dictionaries. Each dictionary contains attributes associated
+ with individual VM
+ """
+ servers = []
+ server_info = []
+ try:
+ servers = self._nv_drv.servers.list()
+ except Exception as e:
+ self.log.exception("List Server operation failed. Exception: %s", str(e))
+ raise
+ server_info = [ server.to_dict() for server in servers]
+ return server_info
+
+ def _nova_server_get(self, server_id):
+ """
+ Returns a dictionary of attributes associated with VM identified by service_id
+
+ Arguments:
+ server_id (string): UUID of the VM/server for which information is requested
+
+ Returns:
+ A dictionary object with attributes associated with VM identified by server_id
+ """
+ try:
+ server = self._nv_drv.servers.get(server = server_id)
+ except Exception as e:
+ self.log.exception("Get Server operation failed for server_id: %s. Exception: %s",
+ server_id, str(e))
+ raise
+ else:
+ return server.to_dict()
+
+ def server_get(self, server_id):
+ """
+ Returns a dictionary of attributes associated with VM identified by service_id
+
+ Arguments:
+ server_id (string): UUID of the VM/server for which information is requested
+
+ Returns:
+ A dictionary object with attributes associated with VM identified by server_id
+ """
+ return self._nova_server_get(server_id)
+
+ def server_create(self, **kwargs):
+ """
+ Creates a new VM/server instance
+
+ Arguments:
+ A dictionary of following key-value pairs
+ {
+ server_name(string) : Name of the VM/Server
+ flavor_id (string) : UUID of the flavor to be used for VM
+ image_id (string) : UUID of the image to be used VM/Server instance,
+ This could be None if volumes (with images) are being used
+ network_list(List) : A List of network_ids. A port will be created in these networks
+ port_list (List) : A List of port-ids. These ports will be added to VM.
+ metadata (dict) : A dictionary of arbitrary key-value pairs associated with VM/server
+ userdata (string) : A script which shall be executed during first boot of the VM
+ availability_zone (string) : A name of the availability zone where instance should be launched
+ scheduler_hints (string) : Openstack scheduler_hints to be passed to nova scheduler
+ }
+ Returns:
+ server_id (string): UUID of the VM/server created
+
+ """
+ nics = []
+ if 'network_list' in kwargs:
+ for network_id in kwargs['network_list']:
+ nics.append({'net-id': network_id})
+
+ if 'port_list' in kwargs:
+ for port_id in kwargs['port_list']:
+ nics.append({'port-id': port_id})
+
+ try:
+ server = self._nv_drv.servers.create(
+ kwargs['name'],
+ kwargs['image_id'],
+ kwargs['flavor_id'],
+ meta = kwargs['metadata'] if 'metadata' in kwargs else None,
+ files = kwargs['files'] if 'files' in kwargs else None,
+ reservation_id = None,
+ min_count = None,
+ max_count = None,
+ userdata = kwargs['userdata'] if 'userdata' in kwargs else None,
+ security_groups = kwargs['security_groups'] if 'security_groups' in kwargs else None,
+ availability_zone = kwargs['availability_zone'] if 'availability_zone' in kwargs else None,
+ block_device_mapping_v2 = kwargs['block_device_mapping_v2'] if 'block_device_mapping_v2' in kwargs else None,
+ nics = nics,
+ scheduler_hints = kwargs['scheduler_hints'] if 'scheduler_hints' in kwargs else None,
+ config_drive = kwargs['config_drive'] if 'config_drive' in kwargs else None
+ )
+
+ except Exception as e:
+ self.log.exception("Create Server operation failed. Exception: %s", str(e))
+ raise
+ return server.to_dict()['id']
+
+ def server_delete(self, server_id):
+ """
+ Deletes a server identified by server_id
+
+ Arguments:
+ server_id (string): UUID of the server to be deleted
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.delete(server_id)
+ except Exception as e:
+ self.log.exception("Delete server operation failed for server_id: %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+ def server_start(self, server_id):
+ """
+ Starts a server identified by server_id
+
+ Arguments:
+ server_id (string): UUID of the server to be started
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.start(server_id)
+ except Exception as e:
+ self.log.exception("Start Server operation failed for server_id : %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+ def server_stop(self, server_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server to be stopped
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.stop(server_id)
+ except Exception as e:
+ self.log.exception("Stop Server operation failed for server_id : %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+ def server_pause(self, server_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server to be paused
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.pause(server_id)
+ except Exception as e:
+ self.log.exception("Pause Server operation failed for server_id : %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+ def server_unpause(self, server_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server to be unpaused
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.unpause(server_id)
+ except Exception as e:
+ self.log.exception("Resume Server operation failed for server_id : %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+
+ def server_suspend(self, server_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server to be suspended
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.suspend(server_id)
+ except Exception as e:
+ self.log.exception("Suspend Server operation failed for server_id : %s. Exception: %s",
+ server_id, str(e))
+
+
+ def server_resume(self, server_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server to be resumed
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.resume(server_id)
+ except Exception as e:
+ self.log.exception("Resume Server operation failed for server_id : %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+ def server_reboot(self, server_id, reboot_type):
+ """
+ Arguments:
+ server_id (string) : UUID of the server to be rebooted
+ reboot_type(string):
+ 'SOFT': Soft Reboot
+ 'HARD': Hard Reboot
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.reboot(server_id, reboot_type)
+ except Exception as e:
+ self.log.exception("Reboot Server operation failed for server_id: %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+ def server_console(self, server_id, console_type = 'novnc'):
+ """
+ Arguments:
+ server_id (string) : UUID of the server to be rebooted
+ console_type(string):
+ 'novnc',
+ 'xvpvnc'
+ Returns:
+ A dictionary object response for console information
+ """
+ try:
+ console_info = self._nv_drv.servers.get_vnc_console(server_id, console_type)
+ except Exception as e:
+ self.log.exception("Server Get-Console operation failed for server_id: %s. Exception: %s",
+ server_id, str(e))
+ raise
+ return console_info
+
+ def server_rebuild(self, server_id, image_id):
+ """
+ Arguments:
+ server_id (string) : UUID of the server to be rebooted
+ image_id (string) : UUID of the image to use
+ Returns: None
+ """
+
+ try:
+ self._nv_drv.servers.rebuild(server_id, image_id)
+ except Exception as e:
+ self.log.exception("Rebuild Server operation failed for server_id: %s. Exception: %s",
+ server_id, str(e))
+ raise
+
+
+ def server_add_port(self, server_id, port_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server
+ port_id (string): UUID of the port to be attached
+
+ Returns: None
+ """
+ try:
+ self._nv_drv.servers.interface_attach(server_id,
+ port_id,
+ net_id = None,
+ fixed_ip = None)
+ except Exception as e:
+ self.log.exception("Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s",
+ server_id, port_id, str(e))
+ raise
+
+ def server_delete_port(self, server_id, port_id):
+ """
+ Arguments:
+ server_id (string): UUID of the server
+ port_id (string): UUID of the port to be deleted
+ Returns: None
+
+ """
+ try:
+ self._nv_drv.servers.interface_detach(server_id, port_id)
+ except Exception as e:
+ self.log.exception("Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s",
+ server_id, port_id, str(e))
+ raise
+
+ def floating_ip_list(self):
+ """
+ Arguments:
+ None
+ Returns:
+ List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+ """
+ try:
+ ip_list = self._nv_drv.floating_ips.list()
+ except Exception as e:
+ self.log.exception("Floating IP List operation failed. Exception: %s", str(e))
+ raise
+
+ return ip_list
+
+ def floating_ip_create(self, pool):
+ """
+ Arguments:
+ pool (string): Name of the pool (optional)
+ Returns:
+ An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+ """
+ try:
+ floating_ip = self._nv_drv.floating_ips.create(pool)
+ except Exception as e:
+ self.log.exception("Floating IP Create operation failed. Exception: %s", str(e))
+ raise
+
+ return floating_ip
+
+ def floating_ip_delete(self, floating_ip):
+ """
+ Arguments:
+ floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+ Returns:
+ None
+ """
+ try:
+ floating_ip = self._nv_drv.floating_ips.delete(floating_ip)
+ except Exception as e:
+ self.log.exception("Floating IP Delete operation failed. Exception: %s", str(e))
+ raise
+
+ def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+ """
+ Arguments:
+ server_id (string) : UUID of the server
+ floating_ip (string): IP address string for floating-ip
+ fixed_ip (string) : IP address string for the fixed-ip with which floating ip will be associated
+ Returns:
+ None
+ """
+ try:
+ self._nv_drv.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
+ except Exception as e:
+ self.log.exception("Assign Floating IP operation failed. Exception: %s", str(e))
+ raise
+
+ def floating_ip_release(self, server_id, floating_ip):
+ """
+ Arguments:
+ server_id (string) : UUID of the server
+ floating_ip (string): IP address string for floating-ip
+ Returns:
+ None
+ """
+ try:
+ self._nv_drv.servers.remove_floating_ip(server_id, floating_ip)
+ except Exception as e:
+ self.log.exception("Release Floating IP operation failed. Exception: %s", str(e))
+ raise
+
+ def volume_list(self, server_id):
+ """
+ List of volumes attached to the server
+
+ Arguments:
+ None
+ Returns:
+ List of dictionary objects where dictionary is representation of class (novaclient.v2.volumes.Volume)
+ """
+ try:
+ volumes = self._nv_drv.volumes.get_server_volumes(server_id=server_id)
+ except Exception as e:
+ self.log.exception("Get volume information failed. Exception: %s", str(e))
+ raise
+
+ volume_info = [v.to_dict() for v in volumes]
+ return volume_info
+
+
+ def group_list(self):
+ """
+ List of Server Affinity and Anti-Affinity Groups
+
+ Arguments:
+ None
+ Returns:
+ List of dictionary objects where dictionary is representation of class (novaclient.v2.server_groups.ServerGroup)
+ """
+ try:
+ group_list = self._nv_drv.server_groups.list()
+ except Exception as e:
+ self.log.exception("Server Group List operation failed. Exception: %s", str(e))
+ raise
+
+ group_info = [ group.to_dict() for group in group_list ]
+ return group_info
+
+
+ def security_group_list(self):
+ """
+ List of Security Group
+ Arguments:
+ None
+ Returns:
+ List of dictionary objects representating novaclient.v2.security_groups.SecurityGroup class
+ """
+ try:
+ sec_groups = self._nv_drv.security_groups.list()
+ except Exception as e:
+ self.log.exception("Security Group List operation failed. Exception: %s", str(e))
+ raise
+ sec_info = [ sec_group.to_dict() for sec_group in sec_groups]
+ return sec_info
+
# limitations under the License.
#
-import json
import logging
-import ipaddress
-from keystoneclient import v3 as ksclientv3
-from keystoneclient.v2_0 import client as ksclientv2
-from novaclient import client as nova_client
-from neutronclient.neutron import client as ntclient
-from glanceclient.v2 import client as glclient
-from ceilometerclient import client as ceilo_client
-from cinderclient.v2 import client as cinder_client
+from . import session as sess_drv
+from . import keystone as ks_drv
+from . import nova as nv_drv
+from . import neutron as nt_drv
+from . import glance as gl_drv
+from . import ceilometer as ce_drv
+from . import cinder as ci_drv
+from . import portchain as port_drv
+from . import utils as drv_utils
# Exceptions
-import novaclient.exceptions as NovaException
import keystoneclient.exceptions as KeystoneExceptions
-import neutronclient.common.exceptions as NeutronException
-import glanceclient.exc as GlanceException
-import cinderclient.exceptions as CinderException
-logger = logging.getLogger('rwcal.openstack.drv')
-logger.setLevel(logging.DEBUG)
class ValidationError(Exception):
pass
-class KeystoneDriver(object):
+class DriverUtilities(object):
"""
- Driver base-class for keystoneclient APIs
+ Class with utility method
"""
- def __init__(self, ksclient):
+ def __init__(self, driver):
"""
- Constructor for KeystoneDriver base class
- Arguments: None
- Returns: None
- """
- self.ksclient = ksclient
-
- def get_username(self):
- """
- Returns the username associated with keystoneclient connection
- """
- return self._username
-
- def get_password(self):
- """
- Returns the password associated with keystoneclient connection
- """
- return self._password
-
- def get_tenant_name(self):
- """
- Returns the tenant name associated with keystoneclient connection
- """
- return self._tenant_name
-
- def get_user_domain_name(self):
- """
- Returns None as this field does not exist for v2.
- """
- return None;
-
- def get_project_domain_name(self):
- """
- Returns None as this field does not exist for v2.
- """
- return None;
-
- def _get_keystone_connection(self):
- """
- Returns object of class python-keystoneclient class
- """
- if not hasattr(self, '_keystone_connection'):
- self._keystone_connection = self.ksclient(**self._get_keystone_credentials())
- return self._keystone_connection
-
- def is_auth_token_valid(self, token_expiry, time_fmt):
- """
- Performs validity on auth_token
- Arguments:
- token_expiry (string): Expiry time for token
- time_fmt (string) : Format for expiry string in auth_ref
-
- Returns:
- True/False (Boolean): (auth_token is valid or auth_token is invalid)
- """
- import time
- import datetime
- import dateutil.parser
- try:
- now = datetime.datetime.timetuple(datetime.datetime.utcnow())
- expires_at = dateutil.parser.parse(token_expiry)
- t_now = time.mktime(now)
- t_expiry = time.mktime(expires_at.timetuple())
-
- if (t_expiry <= t_now) or ((t_expiry - t_now) < 300 ):
- ### Token has expired or about to expire (5 minute)
- delattr(self, '_keystone_connection')
- return False
- else:
- return True
- except Exception as e:
- logger.error("Received except %s during auth_token validity check" %str(e))
- logger.info("Can not validate the auth_token. Assuming invalid")
- return False
-
-
- def get_service_endpoint(self, service_type, endpoint_type):
- """
- Returns requested type of endpoint for requested service type
- Arguments:
- service_type (string): Service Type (e.g. computev3, image, network)
- endpoint_type(string): Endpoint Type (e.g. publicURL,adminURL,internalURL)
- Returns:
- service_endpoint(string): Service endpoint string
- """
- endpoint_kwargs = {'service_type' : service_type,
- 'endpoint_type' : endpoint_type}
- try:
- ksconn = self._get_keystone_connection()
- service_endpoint = ksconn.service_catalog.url_for(**endpoint_kwargs)
- except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
- raise
- except Exception as e:
- logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
- raise
- return service_endpoint
-
-
- def get_raw_token(self):
- """
- Returns a valid raw_auth_token string
-
- Returns (string): raw_auth_token string
- """
- ksconn = self._get_keystone_connection()
- try:
- raw_token = ksconn.get_raw_token_from_identity_service(auth_url = self._auth_url,
- token = self.get_auth_token())
- except KeystoneExceptions.AuthorizationFailure as e:
- logger.error("OpenstackDriver: get_raw_token_from_identity_service Failure. Exception: %s" %(str(e)))
- return None
-
- except Exception as e:
- logger.error("OpenstackDriver: Could not retrieve raw_token. Exception: %s" %(str(e)))
-
- return raw_token
-
- def get_tenant_id(self):
- """
- Returns tenant_id for the project/tenant. Tenant name is provided during
- class instantiation
-
- Returns (string): Tenant ID
- """
- ksconn = self._get_keystone_connection()
- return ksconn.tenant_id
-
- def get_security_mode(self):
- """
- Returns certificate_validation policy in case of SSL/TLS connection.
- This policy is provided during class instantiation
-
- Returns (boolean):
- The boolean returned are designed to match the python-client class instantiation ("insecure") value.
- for nova/neutron/glance/keystone clients
-
- True: No certificate validation required -- Insecure mode
- False: Certificate validation required -- Secure mode
- """
- return self._insecure
-
- def tenant_list(self):
- """
- Returns list of tenants
- """
- pass
-
- def tenant_create(self, name):
- """
- Create a new tenant
- """
- pass
-
- def tenant_delete(self, tenant_id):
- """
- Deletes a tenant identified by tenant_id
- """
- pass
-
- def roles_list(self):
- pass
-
- def roles_create(self):
- pass
-
- def roles_delete(self):
- pass
-
-class KeystoneDriverV2(KeystoneDriver):
- """
- Driver class for keystoneclient V2 APIs
- """
- def __init__(self, username, password, auth_url,tenant_name, insecure, region):
- """
- Constructor for KeystoneDriverV3 class
- Arguments:
- username (string) : Username
- password (string) : Password
- auth_url (string) : Authentication URL
- tenant_name(string): Tenant Name
- region (string) : Region name
- Returns: None
- """
- self._username = username
- self._password = password
- self._auth_url = auth_url
- self._tenant_name = tenant_name
- self._insecure = insecure
- self._region = region
- super(KeystoneDriverV2, self).__init__(ksclientv2.Client)
-
- def _get_keystone_credentials(self):
- """
- Returns the dictionary of kwargs required to instantiate python-keystoneclient class
- """
- creds = {}
- #creds['user_domain'] = self._domain_name
- creds['username'] = self._username
- creds['password'] = self._password
- creds['auth_url'] = self._auth_url
- creds['tenant_name'] = self._tenant_name
- creds['insecure'] = self.get_security_mode()
- creds['region_name'] = self._region
- return creds
-
- def get_auth_token(self):
- """
- Returns a valid auth_token
-
- Returns (string): auth_token string
- """
- ksconn = self._get_keystone_connection()
- return ksconn.auth_token
-
- def is_auth_token_valid(self):
- """
- Performs validity on auth_token
- Arguments:
-
- Returns:
- True/False (Boolean): (auth_token is valid or auth_token is invalid)
- """
- ksconn = self._get_keystone_connection()
- result = super(KeystoneDriverV2, self).is_auth_token_valid(ksconn.auth_ref['token']['expires'],
- "%Y-%m-%dT%H:%M:%SZ")
- return result
-
-
-class KeystoneDriverV3(KeystoneDriver):
- """
- Driver class for keystoneclient V3 APIs
- """
- def __init__(self, username,
- password,
- auth_url,
- tenant_name,
- insecure,
- user_domain_name = None,
- project_domain_name = None,
- region = None):
- """
- Constructor for KeystoneDriverV3 class
- Arguments:
- username (string) : Username
- password (string) : Password
- auth_url (string) : Authentication URL
- tenant_name(string): Tenant Name
- user_domain_name (string) : User domain name
- project_domain_name (string): Project domain name
- region (string) : Region name
- Returns: None
- """
- self._username = username
- self._password = password
- self._auth_url = auth_url
- self._tenant_name = tenant_name
- self._insecure = insecure
- self._user_domain_name = user_domain_name
- self._project_domain_name = project_domain_name
- self._region = region
- super(KeystoneDriverV3, self).__init__(ksclientv3.Client)
-
- def _get_keystone_credentials(self):
- """
- Returns the dictionary of kwargs required to instantiate python-keystoneclient class
- """
- creds = {}
- creds['username'] = self._username
- creds['password'] = self._password
- creds['auth_url'] = self._auth_url
- creds['project_name'] = self._tenant_name
- creds['insecure'] = self._insecure
- creds['user_domain_name'] = self._user_domain_name
- creds['project_domain_name'] = self._project_domain_name
- creds['region_name'] = self._region
- return creds
-
- def get_user_domain_name(self):
- """
- Returns the domain_name of the associated OpenStack user account
- """
- return self._user_domain_name;
-
- def get_project_domain_name(self):
- """
- Returns the domain_name of the associated OpenStack project
- """
- return self._project_domain_name;
-
- def get_auth_token(self):
- """
- Returns a valid auth_token
-
- Returns (string): auth_token string
- """
- ksconn = self._get_keystone_connection()
- return ksconn.auth_ref['auth_token']
-
- def is_auth_token_valid(self):
- """
- Performs validity on auth_token
- Arguments:
-
- Returns:
- True/False (Boolean): (auth_token is valid or auth_token is invalid)
- """
- ksconn = self._get_keystone_connection()
- result = super(KeystoneDriverV3, self).is_auth_token_valid(ksconn.auth_ref['expires_at'],
- "%Y-%m-%dT%H:%M:%S.%fZ")
- return result
-
-class NovaDriver(object):
- """
- Driver for openstack nova_client
- """
- def __init__(self, ks_drv, service_name, version):
- """
- Constructor for NovaDriver
- Arguments: KeystoneDriver class object
- """
- self.ks_drv = ks_drv
- self._service_name = service_name
- self._version = version
-
- def _get_nova_credentials(self):
- """
- Returns a dictionary of kwargs required to instantiate python-novaclient class
- """
- creds = {}
- creds['version'] = self._version
- creds['bypass_url'] = self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
- creds['username'] = self.ks_drv.get_username()
- creds['project_id'] = self.ks_drv.get_tenant_name()
- creds['auth_token'] = self.ks_drv.get_auth_token()
- creds['insecure'] = self.ks_drv.get_security_mode()
- #creds['user_domain_name'] = self.ks_drv.get_user_domain_name()
- #creds['project_domain_name'] = self.ks_drv.get_project_domain_name()
-
- return creds
-
- def _get_nova_connection(self):
- """
- Returns an object of class python-novaclient
- """
- if not hasattr(self, '_nova_connection'):
- self._nova_connection = nova_client.Client(**self._get_nova_credentials())
- else:
- # Reinitialize if auth_token is no longer valid
- if not self.ks_drv.is_auth_token_valid():
- self._nova_connection = nova_client.Client(**self._get_nova_credentials())
- return self._nova_connection
-
- def _flavor_get(self, flavor_id):
- """
- Get flavor by flavor_id
- Arguments:
- flavor_id(string): UUID of flavor_id
-
- Returns:
- dictionary of flavor parameters
- """
- nvconn = self._get_nova_connection()
- try:
- flavor = nvconn.flavors.get(flavor_id)
- except Exception as e:
- logger.info("OpenstackDriver: Did not find flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
- raise
-
- try:
- extra_specs = flavor.get_keys()
- except Exception as e:
- logger.info("OpenstackDriver: Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
- raise
-
- response = flavor.to_dict()
- assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
- response['extra_specs'] = extra_specs
- return response
-
- def flavor_get(self, flavor_id):
- """
- Get flavor by flavor_id
- Arguments:
- flavor_id(string): UUID of flavor_id
-
- Returns:
- dictionary of flavor parameters
- """
- return self._flavor_get(flavor_id)
-
- def flavor_list(self):
- """
- Returns list of all flavors (dictionary per flavor)
-
- Arguments:
- None
- Returns:
- A list of dictionaries. Each dictionary contains attributes for a single flavor instance
- """
- flavors = []
- flavor_info = []
- nvconn = self._get_nova_connection()
- try:
- flavors = nvconn.flavors.list()
- except Exception as e:
- logger.error("OpenstackDriver: List Flavor operation failed. Exception: %s"%(str(e)))
- raise
- if flavors:
- flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
- return flavor_info
-
- def flavor_create(self, name, ram, vcpu, disk, extra_specs):
- """
- Create a new flavor
-
- Arguments:
- name (string): Name of the new flavor
- ram (int) : Memory in MB
- vcpus (int) : Number of VCPUs
- disk (int) : Secondary storage size in GB
- extra_specs (dictionary): EPA attributes dictionary
-
- Returns:
- flavor_id (string): UUID of flavor created
- """
- nvconn = self._get_nova_connection()
- try:
- flavor = nvconn.flavors.create(name = name,
- ram = ram,
- vcpus = vcpu,
- disk = disk,
- flavorid = 'auto',
- ephemeral = 0,
- swap = 0,
- rxtx_factor = 1.0,
- is_public = True)
- except Exception as e:
- logger.error("OpenstackDriver: Create Flavor operation failed. Exception: %s"%(str(e)))
- raise
-
- if extra_specs:
- try:
- flavor.set_keys(extra_specs)
- except Exception as e:
- logger.error("OpenstackDriver: Set Key operation failed for flavor: %s. Exception: %s" %(flavor.id, str(e)))
- raise
- return flavor.id
-
- def flavor_delete(self, flavor_id):
- """
- Deletes a flavor identified by flavor_id
-
- Arguments:
- flavor_id (string): UUID of flavor to be deleted
-
- Returns: None
- """
- assert flavor_id == self._flavor_get(flavor_id)['id']
- nvconn = self._get_nova_connection()
- try:
- nvconn.flavors.delete(flavor_id)
- except Exception as e:
- logger.error("OpenstackDriver: Delete flavor operation failed for flavor: %s. Exception: %s" %(flavor_id, str(e)))
- raise
-
-
- def server_list(self):
- """
- Returns a list of available VMs for the project
-
- Arguments: None
-
- Returns:
- A list of dictionaries. Each dictionary contains attributes associated
- with individual VM
- """
- servers = []
- server_info = []
- nvconn = self._get_nova_connection()
- try:
- servers = nvconn.servers.list()
- except Exception as e:
- logger.error("OpenstackDriver: List Server operation failed. Exception: %s" %(str(e)))
- raise
- server_info = [ server.to_dict() for server in servers]
- return server_info
-
- def _nova_server_get(self, server_id):
- """
- Returns a dictionary of attributes associated with VM identified by service_id
-
- Arguments:
- server_id (string): UUID of the VM/server for which information is requested
-
- Returns:
- A dictionary object with attributes associated with VM identified by server_id
- """
- nvconn = self._get_nova_connection()
- try:
- server = nvconn.servers.get(server = server_id)
- except Exception as e:
- logger.info("OpenstackDriver: Get Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
- raise
- else:
- return server.to_dict()
-
- def server_get(self, server_id):
- """
- Returns a dictionary of attributes associated with VM identified by service_id
-
- Arguments:
- server_id (string): UUID of the VM/server for which information is requested
-
- Returns:
- A dictionary object with attributes associated with VM identified by server_id
- """
- return self._nova_server_get(server_id)
-
- def server_create(self, **kwargs):
- """
- Creates a new VM/server instance
-
- Arguments:
- A dictionary of following key-value pairs
- {
- server_name(string) : Name of the VM/Server
- flavor_id (string) : UUID of the flavor to be used for VM
- image_id (string) : UUID of the image to be used VM/Server instance,
- This could be None if volumes (with images) are being used
- network_list(List) : A List of network_ids. A port will be created in these networks
- port_list (List) : A List of port-ids. These ports will be added to VM.
- metadata (dict) : A dictionary of arbitrary key-value pairs associated with VM/server
- userdata (string) : A script which shall be executed during first boot of the VM
- availability_zone (string) : A name of the availability zone where instance should be launched
- scheduler_hints (string) : Openstack scheduler_hints to be passed to nova scheduler
- }
- Returns:
- server_id (string): UUID of the VM/server created
-
- """
- nics = []
- if 'network_list' in kwargs:
- for network_id in kwargs['network_list']:
- nics.append({'net-id': network_id})
-
- if 'port_list' in kwargs:
- for port_id in kwargs['port_list']:
- nics.append({'port-id': port_id})
-
- nvconn = self._get_nova_connection()
-
-
- try:
- server = nvconn.servers.create(kwargs['name'],
- kwargs['image_id'],
- kwargs['flavor_id'],
- meta = kwargs['metadata'],
- files = kwargs['files'],
- reservation_id = None,
- min_count = None,
- max_count = None,
- userdata = kwargs['userdata'],
- security_groups = kwargs['security_groups'],
- availability_zone = kwargs['availability_zone'],
- block_device_mapping_v2 = kwargs['block_device_mapping_v2'],
- nics = nics,
- scheduler_hints = kwargs['scheduler_hints'],
- config_drive = kwargs['config_drive'])
- except Exception as e:
- logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
- raise
- return server.to_dict()['id']
-
- def server_delete(self, server_id):
- """
- Deletes a server identified by server_id
-
- Arguments:
- server_id (string): UUID of the server to be deleted
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.delete(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Delete server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
- raise
-
- def server_start(self, server_id):
- """
- Starts a server identified by server_id
-
- Arguments:
- server_id (string): UUID of the server to be started
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.start(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Start Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
- raise
-
- def server_stop(self, server_id):
- """
- Arguments:
- server_id (string): UUID of the server to be stopped
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.stop(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Stop Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
- raise
-
- def server_pause(self, server_id):
- """
- Arguments:
- server_id (string): UUID of the server to be paused
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.pause(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Pause Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
- raise
-
- def server_unpause(self, server_id):
- """
- Arguments:
- server_id (string): UUID of the server to be unpaused
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.unpause(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
- raise
-
-
- def server_suspend(self, server_id):
- """
- Arguments:
- server_id (string): UUID of the server to be suspended
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.suspend(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Suspend Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-
-
- def server_resume(self, server_id):
- """
- Arguments:
- server_id (string): UUID of the server to be resumed
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.resume(server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
- raise
-
- def server_reboot(self, server_id, reboot_type):
- """
- Arguments:
- server_id (string) : UUID of the server to be rebooted
- reboot_type(string):
- 'SOFT': Soft Reboot
- 'HARD': Hard Reboot
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.reboot(server_id, reboot_type)
- except Exception as e:
- logger.error("OpenstackDriver: Reboot Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
- raise
-
- def server_console(self, server_id, console_type = 'novnc'):
- """
- Arguments:
- server_id (string) : UUID of the server to be rebooted
- console_type(string):
- 'novnc',
- 'xvpvnc'
- Returns:
- A dictionary object response for console information
- """
- nvconn = self._get_nova_connection()
- try:
- console_info = nvconn.servers.get_vnc_console(server_id, console_type)
- except Exception as e:
- logger.error("OpenstackDriver: Server Get-Console operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
- raise
- return console_info
-
- def server_rebuild(self, server_id, image_id):
- """
- Arguments:
- server_id (string) : UUID of the server to be rebooted
- image_id (string) : UUID of the image to use
- Returns: None
- """
-
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.rebuild(server_id, image_id)
- except Exception as e:
- logger.error("OpenstackDriver: Rebuild Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
- raise
-
-
- def server_add_port(self, server_id, port_id):
- """
- Arguments:
- server_id (string): UUID of the server
- port_id (string): UUID of the port to be attached
-
- Returns: None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.interface_attach(server_id,
- port_id,
- net_id = None,
- fixed_ip = None)
- except Exception as e:
- logger.error("OpenstackDriver: Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
- raise
-
- def server_delete_port(self, server_id, port_id):
- """
- Arguments:
- server_id (string): UUID of the server
- port_id (string): UUID of the port to be deleted
- Returns: None
-
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.interface_detach(server_id, port_id)
- except Exception as e:
- logger.error("OpenstackDriver: Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
- raise
-
- def floating_ip_list(self):
- """
- Arguments:
- None
- Returns:
- List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
- """
- nvconn = self._get_nova_connection()
- try:
- ip_list = nvconn.floating_ips.list()
- except Exception as e:
- logger.error("OpenstackDriver: Floating IP List operation failed. Exception: %s" %str(e))
- raise
-
- return ip_list
-
- def floating_ip_create(self, pool):
- """
- Arguments:
- pool (string): Name of the pool (optional)
- Returns:
- An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
- """
- nvconn = self._get_nova_connection()
- try:
- floating_ip = nvconn.floating_ips.create(pool)
- except Exception as e:
- logger.error("OpenstackDriver: Floating IP Create operation failed. Exception: %s" %str(e))
- raise
-
- return floating_ip
-
- def floating_ip_delete(self, floating_ip):
- """
- Arguments:
- floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
- Returns:
- None
- """
- nvconn = self._get_nova_connection()
- try:
- floating_ip = nvconn.floating_ips.delete(floating_ip)
- except Exception as e:
- logger.error("OpenstackDriver: Floating IP Delete operation failed. Exception: %s" %str(e))
- raise
-
- def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
- """
- Arguments:
- server_id (string) : UUID of the server
- floating_ip (string): IP address string for floating-ip
- fixed_ip (string) : IP address string for the fixed-ip with which floating ip will be associated
- Returns:
- None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
- except Exception as e:
- logger.error("OpenstackDriver: Assign Floating IP operation failed. Exception: %s" %str(e))
- raise
-
- def floating_ip_release(self, server_id, floating_ip):
- """
- Arguments:
- server_id (string) : UUID of the server
- floating_ip (string): IP address string for floating-ip
- Returns:
- None
- """
- nvconn = self._get_nova_connection()
- try:
- nvconn.servers.remove_floating_ip(server_id, floating_ip)
- except Exception as e:
- logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s" %str(e))
- raise
-
- def volume_list(self, server_id):
- """
- List of volumes attached to the server
-
- Arguments:
- None
- Returns:
- List of dictionary objects where dictionary is representation of class (novaclient.v2.volumes.Volume)
- """
- nvconn = self._get_nova_connection()
- try:
- volumes = nvconn.volumes.get_server_volumes(server_id=server_id)
- except Exception as e:
- logger.error("OpenstackDriver: Get volume information failed. Exception: %s" %str(e))
- raise
-
- volume_info = [v.to_dict() for v in volumes]
- return volume_info
-
-
- def group_list(self):
- """
- List of Server Affinity and Anti-Affinity Groups
-
- Arguments:
- None
- Returns:
- List of dictionary objects where dictionary is representation of class (novaclient.v2.server_groups.ServerGroup)
- """
- nvconn = self._get_nova_connection()
- try:
- group_list = nvconn.server_groups.list()
- except Exception as e:
- logger.error("OpenstackDriver: Server Group List operation failed. Exception: %s" %str(e))
- raise
-
- group_info = [ group.to_dict() for group in group_list ]
- return group_info
-
-
-
-class NovaDriverV2(NovaDriver):
- """
- Driver class for novaclient V2 APIs
- """
- def __init__(self, ks_drv):
- """
- Constructor for NovaDriver
- Arguments: KeystoneDriver class object
- """
- super(NovaDriverV2, self).__init__(ks_drv, 'compute', '2.0')
-
-class NovaDriverV21(NovaDriver):
- """
- Driver class for novaclient V2 APIs
- """
- def __init__(self, ks_drv):
- """
- Constructor for NovaDriver
- Arguments: KeystoneDriver class object
- """
- super(NovaDriverV21, self).__init__(ks_drv, 'compute', '2.1')
-
-class GlanceDriver(object):
- """
- Driver for openstack glance-client
- """
- def __init__(self, ks_drv, service_name, version):
- """
- Constructor for GlanceDriver
- Arguments: KeystoneDriver class object
- """
- self.ks_drv = ks_drv
- self._service_name = service_name
- self._version = version
-
- def _get_glance_credentials(self):
- """
- Returns a dictionary of kwargs required to instantiate python-glanceclient class
-
- Arguments: None
-
- Returns:
- A dictionary object of arguments
- """
- creds = {}
- creds['version'] = self._version
- creds['endpoint'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
- creds['token'] = self.ks_drv.get_auth_token()
- creds['insecure'] = self.ks_drv.get_security_mode()
- return creds
-
- def _get_glance_connection(self):
- """
- Returns a object of class python-glanceclient
- """
- if not hasattr(self, '_glance_connection'):
- self._glance_connection = glclient.Client(**self._get_glance_credentials())
- else:
- # Reinitialize if auth_token is no longer valid
- if not self.ks_drv.is_auth_token_valid():
- self._glance_connection = glclient.Client(**self._get_glance_credentials())
- return self._glance_connection
-
- def image_list(self):
- """
- Returns list of dictionaries. Each dictionary contains attributes associated with
- image
-
- Arguments: None
-
- Returns: List of dictionaries.
- """
- glconn = self._get_glance_connection()
- images = []
- try:
- image_info = glconn.images.list()
- except Exception as e:
- logger.error("OpenstackDriver: List Image operation failed. Exception: %s" %(str(e)))
- raise
- images = [ img for img in image_info ]
- return images
-
- def image_create(self, **kwargs):
- """
- Creates an image
- Arguments:
- A dictionary of kwargs with following keys
- {
- 'name'(string) : Name of the image
- 'location'(string) : URL (http://....) where image is located
- 'disk_format'(string) : Disk format
- Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
- 'container_format'(string): Container format
- Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
- 'tags' : A list of user tags
- 'checksum' : The image md5 checksum
- }
- Returns:
- image_id (string) : UUID of the image
-
- """
- glconn = self._get_glance_connection()
- try:
- image = glconn.images.create(**kwargs)
- except Exception as e:
- logger.error("OpenstackDriver: Create Image operation failed. Exception: %s" %(str(e)))
- raise
-
- return image.id
-
- def image_upload(self, image_id, fd):
- """
- Upload the image
-
- Arguments:
- image_id: UUID of the image
- fd : File descriptor for the image file
- Returns: None
- """
- glconn = self._get_glance_connection()
- try:
- glconn.images.upload(image_id, fd)
- except Exception as e:
- logger.error("OpenstackDriver: Image upload operation failed. Exception: %s" %(str(e)))
- raise
-
- def image_add_location(self, image_id, location, metadata):
- """
- Add image URL location
-
- Arguments:
- image_id : UUID of the image
- location : http URL for the image
-
- Returns: None
- """
- glconn = self._get_glance_connection()
- try:
- image = glconn.images.add_location(image_id, location, metadata)
- except Exception as e:
- logger.error("OpenstackDriver: Image location add operation failed. Exception: %s" %(str(e)))
- raise
-
- def image_update(self):
- pass
-
- def image_delete(self, image_id):
- """
- Delete an image
-
- Arguments:
- image_id: UUID of the image
-
- Returns: None
-
- """
- assert image_id == self._image_get(image_id)['id']
- glconn = self._get_glance_connection()
- try:
- glconn.images.delete(image_id)
- except Exception as e:
- logger.error("OpenstackDriver: Delete Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
- raise
-
-
- def _image_get(self, image_id):
- """
- Returns a dictionary object of VM image attributes
-
- Arguments:
- image_id (string): UUID of the image
-
- Returns:
- A dictionary of the image attributes
- """
- glconn = self._get_glance_connection()
- try:
- image = glconn.images.get(image_id)
- except GlanceException.HTTPBadRequest:
- # RIFT-14241: The get image request occasionally returns the below message. Retry in case of bad request exception.
- # Error code 400.: Message: Bad request syntax ('0').: Error code explanation: 400 = Bad request syntax or unsupported method. (HTTP 400)
- logger.warning("OpenstackDriver: Got bad request response during get_image request. Retrying.")
- image = glconn.images.get(image_id)
- except Exception as e:
- logger.error("OpenstackDriver: Get Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
- raise
-
- return image
-
- def image_get(self, image_id):
- """
- Returns a dictionary object of VM image attributes
-
- Arguments:
- image_id (string): UUID of the image
-
- Returns:
- A dictionary of the image attributes
- """
- return self._image_get(image_id)
-
-class GlanceDriverV2(GlanceDriver):
- """
- Driver for openstack glance-client V2
- """
- def __init__(self, ks_drv):
- super(GlanceDriverV2, self).__init__(ks_drv, 'image', 2)
-
-class NeutronDriver(object):
- """
- Driver for openstack neutron neutron-client
- """
- def __init__(self, ks_drv, service_name, version):
- """
- Constructor for NeutronDriver
- Arguments: KeystoneDriver class object
- """
- self.ks_drv = ks_drv
- self._service_name = service_name
- self._version = version
-
- def _get_neutron_credentials(self):
- """
- Returns a dictionary of kwargs required to instantiate python-neutronclient class
-
- Returns:
- Dictionary of kwargs
- """
- creds = {}
- creds['api_version'] = self._version
- creds['endpoint_url'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
- creds['token'] = self.ks_drv.get_auth_token()
- creds['tenant_name'] = self.ks_drv.get_tenant_name()
- creds['insecure'] = self.ks_drv.get_security_mode()
- return creds
-
- def _get_neutron_connection(self):
- """
- Returns an object of class python-neutronclient
- """
- if not hasattr(self, '_neutron_connection'):
- self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
- else:
- # Reinitialize if auth_token is no longer valid
- if not self.ks_drv.is_auth_token_valid():
- self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
- return self._neutron_connection
-
- def network_list(self):
- """
- Returns list of dictionaries. Each dictionary contains the attributes for a network
- under project
-
- Arguments: None
-
- Returns:
- A list of dictionaries
- """
- networks = []
- ntconn = self._get_neutron_connection()
- try:
- networks = ntconn.list_networks()
- except Exception as e:
- logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
- raise
- return networks['networks']
-
- def network_create(self, **kwargs):
- """
- Creates a new network for the project
-
- Arguments:
- A dictionary with following key-values
- {
- name (string) : Name of the network
- admin_state_up(Boolean) : True/False (Defaults: True)
- external_router(Boolean) : Connectivity with external router. True/False (Defaults: False)
- shared(Boolean) : Shared among tenants. True/False (Defaults: False)
- physical_network(string) : The physical network where this network object is implemented (optional).
- network_type : The type of physical network that maps to this network resource (optional).
- Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
- segmentation_id : An isolated segment on the physical network. The network_type attribute
- defines the segmentation model. For example, if the network_type value
- is vlan, this ID is a vlan identifier. If the network_type value is gre,
- this ID is a gre key.
- }
- """
- params = {'network':
- {'name' : kwargs['name'],
- 'admin_state_up' : kwargs['admin_state_up'],
- 'tenant_id' : self.ks_drv.get_tenant_id(),
- 'shared' : kwargs['shared'],
- #'port_security_enabled': port_security_enabled,
- 'router:external' : kwargs['external_router']}}
-
- if 'physical_network' in kwargs:
- params['network']['provider:physical_network'] = kwargs['physical_network']
- if 'network_type' in kwargs:
- params['network']['provider:network_type'] = kwargs['network_type']
- if 'segmentation_id' in kwargs:
- params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
-
- ntconn = self._get_neutron_connection()
- try:
- logger.debug("Calling neutron create_network() with params: %s", str(params))
- net = ntconn.create_network(params)
- except Exception as e:
- logger.error("OpenstackDriver: Create Network operation failed. Exception: %s" %(str(e)))
- raise
- logger.debug("Got create_network response from neutron connection: %s", str(net))
- network_id = net['network']['id']
- if not network_id:
- raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
-
- return network_id
-
- def network_delete(self, network_id):
- """
- Deletes a network identified by network_id
-
- Arguments:
- network_id (string): UUID of the network
-
- Returns: None
- """
- assert network_id == self._network_get(network_id)['id']
- ntconn = self._get_neutron_connection()
- try:
- ntconn.delete_network(network_id)
- except Exception as e:
- logger.error("OpenstackDriver: Delete Network operation failed. Exception: %s" %(str(e)))
- raise
-
- def _network_get(self, network_id):
- """
- Returns a dictionary object describing the attributes of the network
-
- Arguments:
- network_id (string): UUID of the network
-
- Returns:
- A dictionary object of the network attributes
- """
- ntconn = self._get_neutron_connection()
- network = ntconn.list_networks(id = network_id)['networks']
- if not network:
- raise NeutronException.NotFound("Network with id %s not found"%(network_id))
-
- return network[0]
-
- def network_get(self, network_id):
- """
- Returns a dictionary object describing the attributes of the network
-
- Arguments:
- network_id (string): UUID of the network
-
- Returns:
- A dictionary object of the network attributes
- """
- return self._network_get(network_id)
-
- def subnet_create(self, **kwargs):
- """
- Creates a subnet on the network
-
+ Constructor of DriverUtilities class
Arguments:
- A dictionary with following key value pairs
- {
- network_id(string) : UUID of the network where subnet needs to be created
- subnet_cidr(string) : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
- ip_version (integer): 4 for IPv4 and 6 for IPv6
-
- }
-
- Returns:
- subnet_id (string): UUID of the created subnet
+ driver: Object of OpenstackDriver
"""
- params = {}
- params['network_id'] = kwargs['network_id']
- params['ip_version'] = kwargs['ip_version']
-
- # if params['ip_version'] == 6:
- # assert 0, "IPv6 is not supported"
+ self.flavor_utils = drv_utils.FlavorUtils(driver)
+ self.network_utils = drv_utils.NetworkUtils(driver)
+ self.image_utils = drv_utils.ImageUtils(driver)
+ self.compute_utils = drv_utils.ComputeUtils(driver)
- if 'subnetpool_id' in kwargs:
- params['subnetpool_id'] = kwargs['subnetpool_id']
- else:
- params['cidr'] = kwargs['cidr']
-
- if 'gateway_ip' in kwargs:
- params['gateway_ip'] = kwargs['gateway_ip']
- else:
- params['gateway_ip'] = None
-
- if 'dhcp_params' in kwargs:
- params['enable_dhcp'] = kwargs['dhcp_params']['enable_dhcp']
- if 'start_address' in kwargs['dhcp_params'] and 'count' in kwargs['dhcp_params']:
- end_address = (ipaddress.IPv4Address(kwargs['dhcp_params']['start_address']) + kwargs['dhcp_params']['count']).compressed
- params['allocation_pools'] = [ {'start': kwargs['dhcp_params']['start_address'] ,
- 'end' : end_address} ]
-
- if 'dns_server' in kwargs:
- params['dns_nameservers'] = []
- for server in kwargs['dns_server']:
- params['dns_nameservers'].append(server)
-
- ntconn = self._get_neutron_connection()
- try:
- subnet = ntconn.create_subnet({'subnets': [params]})
- except Exception as e:
- logger.error("OpenstackDriver: Create Subnet operation failed. Exception: %s" %(str(e)))
- raise
-
- return subnet['subnets'][0]['id']
-
- def subnet_list(self):
- """
- Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
-
- Arguments: None
-
- Returns:
- A dictionary of the objects of subnet attributes
- """
- ntconn = self._get_neutron_connection()
- try:
- subnets = ntconn.list_subnets()['subnets']
- except Exception as e:
- logger.error("OpenstackDriver: List Subnet operation failed. Exception: %s" %(str(e)))
- raise
- return subnets
-
- def _subnet_get(self, subnet_id):
- """
- Returns a dictionary object describing the attributes of a subnet.
-
- Arguments:
- subnet_id (string): UUID of the subnet
-
- Returns:
- A dictionary object of the subnet attributes
- """
- ntconn = self._get_neutron_connection()
- subnets = ntconn.list_subnets(id=subnet_id)
- if not subnets['subnets']:
- logger.error("OpenstackDriver: Get subnet operation failed for subnet_id: %s" %(subnet_id))
- #raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
- return {'cidr': ''}
- else:
- return subnets['subnets'][0]
-
- def subnet_get(self, subnet_id):
- """
- Returns a dictionary object describing the attributes of a subnet.
-
- Arguments:
- subnet_id (string): UUID of the subnet
-
- Returns:
- A dictionary object of the subnet attributes
- """
- return self._subnet_get(subnet_id)
-
- def subnet_delete(self, subnet_id):
- """
- Deletes a subnet identified by subnet_id
-
- Arguments:
- subnet_id (string): UUID of the subnet to be deleted
-
- Returns: None
- """
- ntconn = self._get_neutron_connection()
- assert subnet_id == self._subnet_get(self,subnet_id)
- try:
- ntconn.delete_subnet(subnet_id)
- except Exception as e:
- logger.error("OpenstackDriver: Delete Subnet operation failed for subnet_id : %s. Exception: %s" %(subnet_id, str(e)))
- raise
-
- def port_list(self, **kwargs):
- """
- Returns a list of dictionaries. Each dictionary contains attributes describing the port
-
- Arguments:
- kwargs (dictionary): A dictionary for filters for port_list operation
-
- Returns:
- A dictionary of the objects of port attributes
-
- """
- ports = []
- ntconn = self._get_neutron_connection()
-
- kwargs['tenant_id'] = self.ks_drv.get_tenant_id()
-
- try:
- ports = ntconn.list_ports(**kwargs)
- except Exception as e:
- logger.info("OpenstackDriver: List Port operation failed. Exception: %s" %(str(e)))
- raise
- return ports['ports']
-
- def port_create(self, **kwargs):
- """
- Create a port in network
-
- Arguments:
- A dictionary of following
- {
- name (string) : Name of the port
- network_id(string) : UUID of the network_id identifying the network to which port belongs
- subnet_id(string) : UUID of the subnet_id from which IP-address will be assigned to port
- vnic_type(string) : Possible values are "normal", "direct", "macvtap"
- }
- Returns:
- port_id (string) : UUID of the port
- """
- params = {
- "port": {
- "admin_state_up" : kwargs['admin_state_up'],
- "name" : kwargs['name'],
- "network_id" : kwargs['network_id'],
- "fixed_ips" : [ {"subnet_id": kwargs['subnet_id']}],
- "binding:vnic_type" : kwargs['port_type']}}
- if 'port_security_enabled' in kwargs:
- params["port"]["port_security_enabled"] = kwargs['port_security_enabled']
-
- ntconn = self._get_neutron_connection()
- try:
- port = ntconn.create_port(params)
- except Exception as e:
- logger.error("OpenstackDriver: Port Create operation failed. Exception: %s" %(str(e)))
- raise
- return port['port']['id']
-
- def _port_get(self, port_id):
- """
- Returns a dictionary object describing the attributes of the port
-
- Arguments:
- port_id (string): UUID of the port
-
- Returns:
- A dictionary object of the port attributes
- """
- ntconn = self._get_neutron_connection()
- port = ntconn.list_ports(id=port_id)['ports']
- if not port:
- raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
- return port[0]
-
- def port_get(self, port_id):
- """
- Returns a dictionary object describing the attributes of the port
-
- Arguments:
- port_id (string): UUID of the port
-
- Returns:
- A dictionary object of the port attributes
- """
- return self._port_get(port_id)
-
- def port_delete(self, port_id):
- """
- Deletes a port identified by port_id
-
- Arguments:
- port_id (string) : UUID of the port
+ @property
+ def flavor(self):
+ return self.flavor_utils
- Returns: None
- """
- assert port_id == self._port_get(port_id)['id']
- ntconn = self._get_neutron_connection()
- try:
- ntconn.delete_port(port_id)
- except Exception as e:
- logger.error("Port Delete operation failed for port_id : %s. Exception: %s" %(port_id, str(e)))
- raise
+ @property
+ def compute(self):
+ return self.compute_utils
+
+ @property
+ def network(self):
+ return self.network_utils
+
+ @property
+ def image(self):
+ return self.image_utils
- def security_group_list(self):
+
+class OpenstackDriver(object):
+ """
+ Driver for openstack nova, neutron, glance, keystone, swift, cinder services
+ """
+ def __init__(self, logger = None, **kwargs):
"""
- Returns a list of dictionaries. Each dictionary contains attributes describing the security group
-
+ OpenstackDriver Driver constructor
Arguments:
- None
-
- Returns:
- A dictionary of the objects of security group attributes
- """
- ntconn = self._get_neutron_connection()
- try:
- group_list = ntconn.list_security_groups(tenant_id=self.ks_drv.get_tenant_id())
- except Exception as e:
- logger.error("List Security group operation, Exception: %s" %(str(e)))
- raise
-
- if 'security_groups' in group_list:
- return group_list['security_groups']
+ logger: (instance of logging.Logger)
+ kwargs: A dictionary of
+ {
+ username (string) : Username for project/tenant.
+ password (string) : Password
+ auth_url (string) : Keystone Authentication URL.
+ project (string) : Openstack project name
+ mgmt_network(string, optional) : Management network name. Each VM created with this cloud-account will
+ have a default interface into management network.
+ cert_validate (boolean, optional) : In case of SSL/TLS connection if certificate validation is required or not.
+ user_domain : Domain name for user
+ project_domain : Domain name for project
+ region : Region name
+ }
+ """
+
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.driver')
+ self.log.setLevel(logging.DEBUG)
else:
- return []
-
- def subnetpool_list(self, **kwargs):
- """
- Returns a list of dictionaries. Each dictionary contains attributes describing a subnet prefix pool
+ self.log = logger
+
+ args = dict(auth_url = kwargs['auth_url'],
+ username = kwargs['username'],
+ password = kwargs['password'],
+ project_name = kwargs['project'],
+ project_domain_name = kwargs['project_domain'] if 'project_domain' in kwargs else None,
+ user_domain_name = kwargs['user_domain'] if 'user_domain' in kwargs else None,)
+
+ cert_validate = kwargs['cert_validate'] if 'cert_validate' in kwargs else False
+ region = kwargs['region_name'] if 'region_name' in kwargs else False
+ mgmt_network = kwargs['mgmt_network'] if 'mgmt_network' in kwargs else None
+
+ discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'], logger = self.log)
+ (major, minor) = discover.get_version()
+
+ self.sess_drv = sess_drv.SessionDriver(auth_method = 'password',
+ version = str(major),
+ cert_validate = cert_validate,
+ logger = self.log,
+ **args)
+
+ self.ks_drv = ks_drv.KeystoneDriver(str(major),
+ self.sess_drv,
+ logger = self.log)
+
+ self.nova_drv = nv_drv.NovaDriver(self.sess_drv,
+ region_name = region,
+ logger = self.log)
+
+ self.neutron_drv = nt_drv.NeutronDriver(self.sess_drv,
+ region_name = region,
+ logger = self.log)
+
+ self.glance_drv = gl_drv.GlanceDriver(self.sess_drv,
+ region_name = region,
+ logger = self.log)
+
+ self.cinder_drv = ci_drv.CinderDriver(self.sess_drv,
+ region_name = region,
+ logger = self.log)
+
+ self.ceilo_drv = ce_drv.CeilometerDriver(self.sess_drv,
+ region_name = region,
+ logger = self.log)
+
+ self.portchain_drv = port_drv.L2PortChainDriver(self.sess_drv,
+ self.neutron_drv,
+ logger = self.log)
+ self.utils = DriverUtilities(self)
+
+ self._mgmt_network = mgmt_network
+
+ self._cache = dict(neutron = dict(),
+ nova = dict(),
+ cinder = dict(),
+ glance = dict())
+ self.build_resource_cache()
- Arguments:
- None
+ @property
+ def nova_cache(self):
+ return self._cache['nova']
- Returns:
- A dictionary of the objects of subnet prefix pool
- """
- ntconn = self._get_neutron_connection()
- try:
- pool_list = ntconn.list_subnetpools(**kwargs)
- except Exception as e:
- logger.error("List SubnetPool operation, Exception: %s" %(str(e)))
- raise
+ @property
+ def neutron_cache(self):
+ return self._cache['neutron']
+
+ @property
+ def glance_cache(self):
+ return self._cache['glance']
- if 'subnetpools' in pool_list:
- return pool_list['subnetpools']
+ @property
+ def cinder_cache(self):
+ return self._cache['cinder']
+
+ def build_resource_cache(self):
+ self.build_network_resource_cache()
+ self.build_nova_resource_cache()
+ self.build_cinder_resource_cache()
+ self.build_glance_resource_cache()
+
+ def _cache_populate(self, method, datatype, *args, **kwargs):
+ try:
+ rsp = method(*args, **kwargs)
+ except Exception as e:
+ self.log.exception("Exception %s occured during execution of %s",
+ str(e), method)
+ return datatype
else:
- return []
+ return rsp
-class NeutronDriverV2(NeutronDriver):
- """
- Driver for openstack neutron neutron-client v2
- """
- def __init__(self, ks_drv):
- """
- Constructor for NeutronDriver
- Arguments: KeystoneDriver class object
- """
- super(NeutronDriverV2, self).__init__(ks_drv, 'network', '2.0')
-
+ def _build_nova_security_group_list(self):
+ self.log.info("Building Nova security group cache")
+ self.nova_cache['security_groups'] = self._cache_populate(self.nova_drv.security_group_list,
+ list())
+ return self.nova_cache['security_groups']
+
+ def _build_nova_affinity_group_list(self):
+ self.log.info("Building Nova affinity/anti-affinity group cache")
+ self.nova_cache['affinity_groups'] = self._cache_populate(self.nova_server_group_list,
+ list())
+ return self.nova_cache['affinity_groups']
+
+ def _build_neutron_security_group_list(self):
+ self.log.info("Discovering neutron security group")
+ self.neutron_cache['security_groups'] = self._cache_populate(self.neutron_security_group_list,
+ list())
+ return self.neutron_cache['security_groups']
+
+ def _build_neutron_subnet_prefix_list(self):
+ self.log.info("Discovering subnet prefix pools")
+ self.neutron_cache['subnet_pool'] = self._cache_populate(self.neutron_subnetpool_list,
+ list())
+ return self.neutron_cache['subnet_pool']
+
+ def _get_neutron_mgmt_network(self):
+ if self._mgmt_network:
+ self.log.info("Discovering management network %s", self._mgmt_network)
+ network_list = self._cache_populate(self.neutron_drv.network_get,
+ None,
+ **{'network_name':self._mgmt_network})
+ if network_list:
+ self.neutron_cache['mgmt_net'] = network_list['id']
+ else:
+ raise KeyError("Error")
+
+ def _build_glance_image_list(self):
+ self.log.info("Discovering images")
+ self.glance_cache['images'] = self._cache_populate(self.glance_image_list,
+ list())
+ return self.glance_cache['images']
+
+
+ def build_nova_resource_cache(self):
+ self.log.info("Building nova resource cache")
+ self._build_nova_security_group_list()
+ self._build_nova_affinity_group_list()
+
+
+ def build_network_resource_cache(self):
+ self.log.info("Building network resource cache")
+ self._get_neutron_mgmt_network()
+ self._build_neutron_security_group_list()
+ self._build_neutron_subnet_prefix_list()
-class CeilometerDriver(object):
- """
- Driver for openstack ceilometer client
- """
+ def build_cinder_resource_cache(self):
+ pass
- def __init__(self, ks_drv, service_name, version):
- """
- Constructor for CeilometerDriver
- Arguments: KeystoneDriver class object
- """
- self.ks_drv = ks_drv
- self._service_name = service_name
- self._version = version
- self._client = None
- @property
- def version(self):
- """The version of the ceilometer client used by the driver"""
- return self._version
+ def build_glance_resource_cache(self):
+ self.log.info("Building glance resource cache")
+ self._build_glance_image_list()
+
@property
- def client(self):
- """The instance of ceilometer client used by the driver"""
- if self._client is None or not self.ks_drv.is_auth_token_valid():
- self._client = ceilo_client.Client(**self.credentials)
-
- return self._client
+ def _nova_affinity_group(self):
+ if 'affinity_groups' in self.nova_cache:
+ return self.nova_cache['affinity_groups']
+ else:
+ return self._build_nova_affinity_group_list()
@property
- def auth_token(self):
- """The authorization token for the ceilometer client"""
- try:
- return self.ks_drv.get_auth_token()
- except KeystoneExceptions.EndpointNotFound as e:
- logger.error("OpenstackDriver: unable to get authorization token for ceilometer. Exception: %s" %(str(e)))
- raise
-
+ def _nova_security_groups(self):
+ if 'security_groups' in self.nova_cache:
+ return self.nova_cache['security_groups']
+ else:
+ return self._build_nova_security_group_list()
+
@property
- def security_mode(self):
- """The security mode for the ceilometer client"""
- try:
- return self.ks_drv.get_security_mode()
- except KeystoneExceptions.EndpointNotFound as e:
- logger.error("OpenstackDriver: unable to get security mode for ceilometer. Exception: %s" %(str(e)))
- raise
-
+ def mgmt_network(self):
+ return self._mgmt_network
+
@property
- def endpoint(self):
- """The service endpoint for the ceilometer client"""
- try:
- return self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
- except KeystoneExceptions.EndpointNotFound as e:
- logger.error("OpenstackDriver: unable to get endpoint for ceilometer. Exception: %s" %(str(e)))
- raise
+ def _mgmt_network_id(self):
+ if 'mgmt_net' in self.neutron_cache:
+ return self.neutron_cache['mgmt_net']
+ else:
+ return list()
@property
- def credentials(self):
- """A dictionary of credentials for the ceilometer client"""
- return dict(
- version=self.version,
- endpoint=self.endpoint,
- token=self.auth_token,
- insecure=self.security_mode,
- )
+ def _neutron_security_groups(self):
+ if 'security_groups' in self.neutron_cache:
+ return self.neutron_cache['security_groups']
+ else:
+ return self._build_neutron_security_group_list()
@property
- def meters(self):
- """A list of the available meters"""
- try:
- return self.client.meters.list()
- except Exception as e:
- logger.error("OpenstackDriver: List meters operation failed. Exception: %s" %(str(e)))
- raise
-
+ def _neutron_subnet_prefix_pool(self):
+ if 'subnet_pool' in self.neutron_cache:
+ return self.neutron_cache['subnet_pool']
+ else:
+ return self._build_neutron_subnet_prefix_list()
+
@property
- def alarms(self):
- """The ceilometer client alarms manager"""
- return self.client.alarms
-
- def query_samples(self, vim_instance_id, counter_name, limit=1):
- """Returns a list of samples
-
- Arguments:
- vim_instance_id - the ID of the VIM that the samples are from
- counter_name - the counter that the samples will come from
- limit - a limit on the number of samples to return
- (default: 1)
-
- Returns:
- A list of samples
-
- """
- try:
- filter = json.dumps({
- "and": [
- {"=": {"resource": vim_instance_id}},
- {"=": {"counter_name": counter_name}}
- ]
- })
- result = self.client.query_samples.query(filter=filter, limit=limit)
- return result[-limit:]
-
- except Exception as e:
- logger.exception(e)
-
- return []
-
-
-class CeilometerDriverV2(CeilometerDriver):
- """
- Driver for openstack ceilometer ceilometer-client
- """
- def __init__(self, ks_drv):
- """
- Constructor for CeilometerDriver
- Arguments: CeilometerDriver class object
- """
- super(CeilometerDriverV2, self).__init__(ks_drv, 'metering', '2')
-
-class OpenstackDriver(object):
- """
- Driver for openstack nova, neutron, glance, keystone, swift, cinder services
- """
- def __init__(self, username,
- password,
- auth_url,
- tenant_name,
- mgmt_network = None,
- cert_validate = False,
- user_domain_name = None,
- project_domain_name = None,
- region = None):
- """
- OpenstackDriver Driver constructor
- Arguments:
- username (string) : Username for project/tenant.
- password (string) : Password
- auth_url (string) : Keystone Authentication URL.
- tenant_name (string) : Openstack project name
- mgmt_network(string, optional) : Management network name. Each VM created with this cloud-account will
- have a default interface into management network.
- cert_validate (boolean, optional) : In case of SSL/TLS connection if certificate validation is required or not.
- user_domain_name : Domain name for user
- project_domain_name : Domain name for project
- region : Region name
- """
- insecure = not cert_validate
- if auth_url.find('/v3') != -1:
- self.ks_drv = KeystoneDriverV3(username,
- password,
- auth_url,
- tenant_name,
- insecure,
- user_domain_name,
- project_domain_name,
- region)
- self.glance_drv = GlanceDriverV2(self.ks_drv)
- self.nova_drv = NovaDriverV21(self.ks_drv)
- self.neutron_drv = NeutronDriverV2(self.ks_drv)
- self.ceilo_drv = CeilometerDriverV2(self.ks_drv)
- self.cinder_drv = CinderDriverV2(self.ks_drv)
- elif auth_url.find('/v2') != -1:
-
- self.ks_drv = KeystoneDriverV2(username,
- password,
- auth_url,
- tenant_name,
- insecure,
- region)
- self.glance_drv = GlanceDriverV2(self.ks_drv)
- self.nova_drv = NovaDriverV2(self.ks_drv)
- self.neutron_drv = NeutronDriverV2(self.ks_drv)
- self.ceilo_drv = CeilometerDriverV2(self.ks_drv)
- self.cinder_drv = CinderDriverV2(self.ks_drv)
+ def _glance_image_list(self):
+ if 'images' in self.glance_cache:
+ return self.glance_cache['images']
else:
- logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
- raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
-
- self._mgmt_network_id = None
- if mgmt_network != None:
- self._mgmt_network = mgmt_network
-
- networks = []
- try:
- ntconn = self.neutron_drv._get_neutron_connection()
- networks = ntconn.list_networks()
- except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
- raise
- except Exception as e:
- logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
- raise
-
- network_list = [ network for network in networks['networks'] if network['name'] == mgmt_network ]
-
- if not network_list:
- raise NeutronException.NotFound("Could not find network %s" %(mgmt_network))
- self._mgmt_network_id = network_list[0]['id']
-
+ return self._build_glance_image_list()
+
def validate_account_creds(self):
try:
- ksconn = self.ks_drv._get_keystone_connection()
+ self.sess_drv.invalidate_auth_token()
+ self.sess_drv.auth_token
+ self.build_resource_cache()
except KeystoneExceptions.AuthorizationFailure as e:
- logger.error("OpenstackDriver: Unable to authenticate or validate the existing credentials. Exception: %s" %(str(e)))
+ self.log.error("Unable to authenticate or validate the existing credentials. Exception: %s", str(e))
raise ValidationError("Invalid Credentials: "+ str(e))
except Exception as e:
- logger.error("OpenstackDriver: Could not connect to Openstack. Exception: %s" %(str(e)))
+ self.log.error("Could not connect to Openstack. Exception: %s", str(e))
raise ValidationError("Connection Error: "+ str(e))
- def get_mgmt_network_id(self):
- return self._mgmt_network_id
-
+
def glance_image_create(self, **kwargs):
if not 'disk_format' in kwargs:
kwargs['disk_format'] = 'qcow2'
def glance_image_add_location(self, image_id, location):
self.glance_drv.image_add_location(image_id, location)
+ def glance_image_update(self, image_id, remove_props = None, **kwargs):
+ self.glance_drv.image_update(image_id, remove_props=remove_props, **kwargs)
+
def glance_image_delete(self, image_id):
self.glance_drv.image_delete(image_id)
def glance_image_get(self, image_id):
return self.glance_drv.image_get(image_id)
-
def nova_flavor_list(self):
return self.nova_drv.flavor_list()
- def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs):
- extra_specs = epa_specs if epa_specs else {}
+ def nova_flavor_find(self, **kwargs):
+ return self.nova_drv.flavor_find(**kwargs)
+
+ def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs = dict()):
return self.nova_drv.flavor_create(name,
ram = ram,
vcpu = vcpus,
disk = disk,
- extra_specs = extra_specs)
+ extra_specs = epa_specs)
def nova_flavor_delete(self, flavor_id):
self.nova_drv.flavor_delete(flavor_id)
return self.nova_drv.flavor_get(flavor_id)
def nova_server_create(self, **kwargs):
- def _verify_image(image_id):
- image = self.glance_drv.image_get(image_id)
- if image['status'] != 'active':
- raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
-
- assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
-
- if kwargs['block_device_mapping_v2'] is not None:
- for block_map in kwargs['block_device_mapping_v2']:
- if 'uuid' in block_map:
- _verify_image(block_map['uuid'])
- else:
- _verify_image(kwargs['image_id'])
-
- # if 'network_list' in kwargs:
- # kwargs['network_list'].append(self._mgmt_network_id)
- # else:
- # kwargs['network_list'] = [self._mgmt_network_id]
-
if 'security_groups' not in kwargs:
- nvconn = self.nova_drv._get_nova_connection()
- sec_groups = nvconn.security_groups.list()
- if sec_groups:
- ## Should we add VM in all availability security_groups ???
- kwargs['security_groups'] = [x.name for x in sec_groups]
- else:
- kwargs['security_groups'] = None
-
+ kwargs['security_groups'] = [ s['name'] for s in self._nova_security_groups ]
return self.nova_drv.server_create(**kwargs)
def nova_server_add_port(self, server_id, port_id):
return self.neutron_drv.network_list()
def neutron_network_get(self, network_id):
- return self.neutron_drv.network_get(network_id)
+ return self.neutron_drv.network_get(network_id=network_id)
def neutron_network_create(self, **kwargs):
return self.neutron_drv.network_create(**kwargs)
self.neutron_drv.network_delete(network_id)
def neutron_subnet_list(self):
- return self.neutron_drv.subnet_list()
+ return self.neutron_drv.subnet_list(**{})
def neutron_subnet_get(self, subnet_id):
return self.neutron_drv.subnet_get(subnet_id)
return pool_list[0]
else:
return None
-
+
def neutron_port_list(self, **kwargs):
return self.neutron_drv.port_list(**kwargs)
return self.neutron_drv.port_get(port_id)
def neutron_port_create(self, **kwargs):
- subnets = [subnet for subnet in self.neutron_drv.subnet_list() if subnet['network_id'] == kwargs['network_id']]
- assert len(subnets) == 1
- kwargs['subnet_id'] = subnets[0]['id']
- if not 'admin_state_up' in kwargs:
- kwargs['admin_state_up'] = True
- port_id = self.neutron_drv.port_create(**kwargs)
-
+ port_id = self.neutron_drv.port_create([kwargs])[0]
if 'vm_id' in kwargs:
self.nova_server_add_port(kwargs['vm_id'], port_id)
return port_id
+ def neutron_multi_port_create(self, ports):
+ return self.neutron_drv.port_create(ports)
+
def neutron_security_group_list(self):
- return self.neutron_drv.security_group_list()
+ return self.neutron_drv.security_group_list(**{})
def neutron_security_group_by_name(self, group_name):
- group_list = self.neutron_drv.security_group_list()
- groups = [group for group in group_list if group['name'] == group_name]
- if groups:
- return groups[0]
+ group_list = self.neutron_drv.security_group_list(**{'name': group_name})
+ if group_list:
+ return group_list[0]
else:
return None
A dict of NFVI metrics
"""
- def query_latest_sample(counter_name):
- try:
- filter = json.dumps({
- "and": [
- {"=": {"resource": vim_id}},
- {"=": {"counter_name": counter_name}}
- ]
- })
- orderby = json.dumps([{"timestamp": "DESC"}])
- result = self.ceilo_drv.client.query_samples.query(
- filter=filter,
- orderby=orderby,
- limit=1,
- )
- return result[0]
-
- except IndexError:
- pass
-
- except Exception as e:
- logger.error("Got exception while querying ceilometer, exception details:%s " %str(e))
-
- return None
-
- memory_usage = query_latest_sample("memory.usage")
- disk_usage = query_latest_sample("disk.usage")
- cpu_util = query_latest_sample("cpu_util")
-
- metrics = dict()
-
- if memory_usage is not None:
- memory_usage.volume = 1e6 * memory_usage.volume
- metrics["memory_usage"] = memory_usage.to_dict()
-
- if disk_usage is not None:
- metrics["disk_usage"] = disk_usage.to_dict()
-
- if cpu_util is not None:
- metrics["cpu_util"] = cpu_util.to_dict()
-
- return metrics
+ return self.ceilo_drv.nfvi_metrics(vim_id)
def ceilo_alarm_list(self):
"""Returns a list of ceilometer alarms"""
alarm_actions = actions.get('alarm') if actions is not None else None
insufficient_data_actions = actions.get('insufficient_data') if actions is not None else None
- return self.ceilo_drv.client.alarms.create(
- name=name,
- meter_name=meter,
- statistic=statistic,
- comparison_operator=operation,
- threshold=threshold,
- period=period,
- evaluation_periods=evaluations,
- severity=severity,
- repeat_actions=repeat,
- enabled=enabled,
- ok_actions=ok_actions,
- alarm_actions=alarm_actions,
- insufficient_data_actions=insufficient_data_actions,
- **kwargs
- )
+ return self.ceilo_drv.client.alarms.create(name=name,
+ meter_name=meter,
+ statistic=statistic,
+ comparison_operator=operation,
+ threshold=threshold,
+ period=period,
+ evaluation_periods=evaluations,
+ severity=severity,
+ repeat_actions=repeat,
+ enabled=enabled,
+ ok_actions=ok_actions,
+ alarm_actions=alarm_actions,
+ insufficient_data_actions=insufficient_data_actions,
+ **kwargs)
def ceilo_alarm_update(self, alarm_id, **kwargs):
"""Updates an existing alarm
def ceilo_alarm_delete(self, alarm_id):
self.ceilo_drv.client.alarms.delete(alarm_id)
+ def create_port_chain(self,name,port_lists):
+ "Create port chain"
+ #Create port pair
+ ppgrp_list = list()
+ for index,port_pair in enumerate(port_lists):
+ ppair_list = list()
+ ingress_port,egress_port = port_pair
+ #Disable security group and port security for the port
+ self.neutron_drv.port_update(ingress_port,no_security_groups=True,port_security_enabled=False)
+ if ingress_port != egress_port:
+ self.neutron_drv.port_update(egress_port,no_security_groups=True,port_security_enabled=False)
+
+ ppair_id = self.portchain_drv.create_port_pair(name+'ppair'+str(index),ingress_port,egress_port)
+ ppair_list.append(ppair_id)
+ # Create port pair group
+ ppgrp_id = self.portchain_drv.create_port_pair_group(name+'_ppgrp_'+str(index),ppair_list)
+ ppgrp_list.append(ppgrp_id)
+ #Create port chain
+ port_chain_id = self.portchain_drv.create_port_chain(name,ppgrp_list)
+ return port_chain_id
+
+ def delete_port_chain(self,port_chain_id):
+ "Delete port chain"
+ try:
+ result = self.portchain_drv.get_port_chain(port_chain_id)
+ port_chain = result.json()
+ self.log.debug("Port chain result is %s", port_chain)
+ port_pair_groups = port_chain["port_chain"]["port_pair_groups"]
+ self.portchain_drv.delete_port_chain(port_chain_id)
+
+ # Get port pairs and delete port pair groups
+ port_pairs = list()
+ self.log.debug("Port pair groups during delete is %s", port_pair_groups)
+ for port_pair_group_id in port_pair_groups:
+ result = self.portchain_drv.get_port_pair_group(port_pair_group_id)
+ port_pair_group = result.json()
+ self.log.debug("Port pair group result is %s", port_pair_group)
+ port_pairs.extend(port_pair_group["port_pair_group"]["port_pairs"])
+ self.portchain_drv.delete_port_pair_group(port_pair_group_id)
+
+ self.log.debug("Port pairs during delete is %s",port_pairs)
+
+ for port_pair_id in port_pairs:
+ self.portchain_drv.delete_port_pair(port_pair_id)
+ pass
+ except Exception as e:
+ self.log.error("Error while delete port chain with id %s, exception %s", port_chain_id,str(e))
+
+ def update_port_chain(self,port_chain_id,flow_classifier_list):
+ result = self.portchain_drv.get_port_chain(port_chain_id)
+ result.raise_for_status()
+ port_chain = result.json()['port_chain']
+ new_flow_classifier_list = list()
+ if port_chain and port_chain['flow_classifiers']:
+ new_flow_classifier_list.extend(port_chain['flow_classifiers'])
+ new_flow_classifier_list.extend(flow_classifier_list)
+ port_chain_id = self.portchain_drv.update_port_chain(port_chain['id'],flow_classifiers=new_flow_classifier_list)
+ return port_chain_id
+
+ def create_flow_classifer(self,classifier_name,classifier_dict):
+ "Create flow classifier"
+ flow_classifier_id = self.portchain_drv.create_flow_classifier(classifier_name,classifier_dict)
+ return flow_classifier_id
+
+ def delete_flow_classifier(self,classifier_id):
+ "Create flow classifier"
+ try:
+ self.portchain_drv.delete_flow_classifier(classifier_id)
+ except Exception as e:
+ self.log.error("Error while deleting flow classifier with id %s, exception %s", classifier_id,str(e))
+
+ def get_port_chain_list(self):
+ result = self.portchain_drv.get_port_chain_list()
+ port_chain_list = result.json()
+ if 'port_chains' in port_chain_list:
+ return port_chain_list['port_chains']
+
def cinder_volume_list(self):
return self.cinder_drv.volume_list()
-class CinderDriver(object):
- """
- Driver for openstack cinder-client
- """
- def __init__(self, ks_drv, service_name, version):
- """
- Constructor for CinderDriver
- Arguments: KeystoneDriver class object
- """
- self.ks_drv = ks_drv
- self._service_name = service_name
- self._version = version
-
- def _get_cinder_credentials(self):
- """
- Returns a dictionary of kwargs required to instantiate python-cinderclient class
-
- Arguments: None
-
- Returns:
- A dictionary object of arguments
- """
- creds = {}
- creds['version'] = self._version
- creds['username'] = self.ks_drv.get_username()
- creds['api_key'] = self.ks_drv.get_password()
- creds['auth_url'] = self.ks_drv.get_service_endpoint("identity", "publicURL")
- creds['project_id'] = self.ks_drv.get_tenant_name()
- creds['insecure'] = self.ks_drv.get_security_mode()
-
- return creds
-
- def _get_cinder_connection(self):
- """
- Returns a object of class python-cinderclient
- """
- if not hasattr(self, '_cinder_connection'):
- self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
- else:
- # Reinitialize if auth_token is no longer valid
- if not self.ks_drv.is_auth_token_valid():
- self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
- return self._cinder_connection
-
- def volume_list(self):
- """
- Returns list of dictionaries. Each dictionary contains attributes associated with
- volumes
-
- Arguments: None
-
- Returns: List of dictionaries.
- """
- cinderconn = self._get_cinder_connection()
- volumes = []
- try:
- volume_info = cinderconn.volumes.list()
- except Exception as e:
- logger.error("OpenstackDriver: List volumes operation failed. Exception: %s" %(str(e)))
- raise
- volumes = [ volume for volume in volume_info ]
- return volumes
-
- def volume_get(self, volume_id):
- """
- Get details volume
-
- Arguments: None
-
- Returns: List of dictionaries.
- """
- cinderconn = self._get_cinder_connection()
- try:
- vol = cinderconn.volumes.get(volume_id)
- except Exception as e:
- logger.error("OpenstackDriver: Get volume operation failed. Exception: %s" %(str(e)))
- raise
- return vol
-
- def volume_set_metadata(self, volume_id, metadata):
- """
- Set metadata for volume
- Metadata is a dictionary of key-value pairs
-
- Arguments: None
-
- Returns: List of dictionaries.
- """
- cinderconn = self._get_cinder_connection()
- try:
- cinderconn.volumes.set_metadata(volume_id, metadata)
- except Exception as e:
- logger.error("OpenstackDriver: Set metadata operation failed. Exception: %s" %(str(e)))
- raise
-
- def volume_delete_metadata(self, volume_id, metadata):
- """
- Delete metadata for volume
- Metadata is a dictionary of key-value pairs
-
- Arguments: None
-
- Returns: List of dictionaries.
- """
- cinderconn = self._get_cinder_connection()
- try:
- cinderconn.volumes.delete_metadata(volume_id, metadata)
- except Exception as e:
- logger.error("OpenstackDriver: Delete metadata operation failed. Exception: %s" %(str(e)))
- raise
-
-class CinderDriverV2(CinderDriver):
- """
- Driver for openstack cinder-client V2
- """
- def __init__(self, ks_drv):
- super(CinderDriverV2, self).__init__(ks_drv, 'volumev2', 2)
-
+++ /dev/null
-#!/usr/bin/env python3
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import re
-
-class OpenstackGuestEPAUtils(object):
- """
- Utility class for Host EPA to Openstack flavor extra_specs conversion routines
- """
- def __init__(self):
- self._mano_to_espec_cpu_pinning_policy = {
- 'DEDICATED' : 'dedicated',
- 'SHARED' : 'shared',
- 'ANY' : 'any',
- }
-
- self._espec_to_mano_cpu_pinning_policy = {
- 'dedicated' : 'DEDICATED',
- 'shared' : 'SHARED',
- 'any' : 'ANY',
- }
-
- self._mano_to_espec_mempage_size = {
- 'LARGE' : 'large',
- 'SMALL' : 'small',
- 'SIZE_2MB' : 2048,
- 'SIZE_1GB' : 1048576,
- 'PREFER_LARGE' : 'large',
- }
-
- self._espec_to_mano_mempage_size = {
- 'large' : 'LARGE',
- 'small' : 'SMALL',
- 2048 : 'SIZE_2MB',
- 1048576 : 'SIZE_1GB',
- 'large' : 'PREFER_LARGE',
- }
-
- self._mano_to_espec_cpu_thread_pinning_policy = {
- 'AVOID' : 'avoid',
- 'SEPARATE' : 'separate',
- 'ISOLATE' : 'isolate',
- 'PREFER' : 'prefer',
- }
-
- self._espec_to_mano_cpu_thread_pinning_policy = {
- 'avoid' : 'AVOID',
- 'separate' : 'SEPARATE',
- 'isolate' : 'ISOLATE',
- 'prefer' : 'PREFER',
- }
-
- self._espec_to_mano_numa_memory_policy = {
- 'strict' : 'STRICT',
- 'preferred': 'PREFERRED'
- }
-
- self._mano_to_espec_numa_memory_policy = {
- 'STRICT' : 'strict',
- 'PREFERRED': 'preferred'
- }
-
- def mano_to_extra_spec_cpu_pinning_policy(self, cpu_pinning_policy):
- if cpu_pinning_policy in self._mano_to_espec_cpu_pinning_policy:
- return self._mano_to_espec_cpu_pinning_policy[cpu_pinning_policy]
- else:
- return None
-
- def extra_spec_to_mano_cpu_pinning_policy(self, cpu_pinning_policy):
- if cpu_pinning_policy in self._espec_to_mano_cpu_pinning_policy:
- return self._espec_to_mano_cpu_pinning_policy[cpu_pinning_policy]
- else:
- return None
-
- def mano_to_extra_spec_mempage_size(self, mempage_size):
- if mempage_size in self._mano_to_espec_mempage_size:
- return self._mano_to_espec_mempage_size[mempage_size]
- else:
- return None
-
- def extra_spec_to_mano_mempage_size(self, mempage_size):
- if mempage_size in self._espec_to_mano_mempage_size:
- return self._espec_to_mano_mempage_size[mempage_size]
- else:
- return None
-
- def mano_to_extra_spec_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
- if cpu_thread_pinning_policy in self._mano_to_espec_cpu_thread_pinning_policy:
- return self._mano_to_espec_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
- else:
- return None
-
- def extra_spec_to_mano_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
- if cpu_thread_pinning_policy in self._espec_to_mano_cpu_thread_pinning_policy:
- return self._espec_to_mano_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
- else:
- return None
-
- def mano_to_extra_spec_trusted_execution(self, trusted_execution):
- if trusted_execution:
- return 'trusted'
- else:
- return 'untrusted'
-
- def extra_spec_to_mano_trusted_execution(self, trusted_execution):
- if trusted_execution == 'trusted':
- return True
- elif trusted_execution == 'untrusted':
- return False
- else:
- return None
-
- def mano_to_extra_spec_numa_node_count(self, numa_node_count):
- return numa_node_count
-
- def extra_specs_to_mano_numa_node_count(self, numa_node_count):
- return int(numa_node_count)
-
- def mano_to_extra_spec_numa_memory_policy(self, numa_memory_policy):
- if numa_memory_policy in self._mano_to_espec_numa_memory_policy:
- return self._mano_to_espec_numa_memory_policy[numa_memory_policy]
- else:
- return None
-
- def extra_to_mano_spec_numa_memory_policy(self, numa_memory_policy):
- if numa_memory_policy in self._espec_to_mano_numa_memory_policy:
- return self._espec_to_mano_numa_memory_policy[numa_memory_policy]
- else:
- return None
-
-
-
-
-class OpenstackHostEPAUtils(object):
- """
- Utility class for Host EPA to Openstack flavor extra_specs conversion routines
- """
- def __init__(self):
- self._mano_to_espec_cpumodel = {
- "PREFER_WESTMERE" : "Westmere",
- "REQUIRE_WESTMERE" : "Westmere",
- "PREFER_SANDYBRIDGE" : "SandyBridge",
- "REQUIRE_SANDYBRIDGE" : "SandyBridge",
- "PREFER_IVYBRIDGE" : "IvyBridge",
- "REQUIRE_IVYBRIDGE" : "IvyBridge",
- "PREFER_HASWELL" : "Haswell",
- "REQUIRE_HASWELL" : "Haswell",
- "PREFER_BROADWELL" : "Broadwell",
- "REQUIRE_BROADWELL" : "Broadwell",
- "PREFER_NEHALEM" : "Nehalem",
- "REQUIRE_NEHALEM" : "Nehalem",
- "PREFER_PENRYN" : "Penryn",
- "REQUIRE_PENRYN" : "Penryn",
- "PREFER_CONROE" : "Conroe",
- "REQUIRE_CONROE" : "Conroe",
- "PREFER_CORE2DUO" : "Core2Duo",
- "REQUIRE_CORE2DUO" : "Core2Duo",
- }
-
- self._espec_to_mano_cpumodel = {
- "Westmere" : "REQUIRE_WESTMERE",
- "SandyBridge" : "REQUIRE_SANDYBRIDGE",
- "IvyBridge" : "REQUIRE_IVYBRIDGE",
- "Haswell" : "REQUIRE_HASWELL",
- "Broadwell" : "REQUIRE_BROADWELL",
- "Nehalem" : "REQUIRE_NEHALEM",
- "Penryn" : "REQUIRE_PENRYN",
- "Conroe" : "REQUIRE_CONROE",
- "Core2Duo" : "REQUIRE_CORE2DUO",
- }
-
- self._mano_to_espec_cpuarch = {
- "PREFER_X86" : "x86",
- "REQUIRE_X86" : "x86",
- "PREFER_X86_64" : "x86_64",
- "REQUIRE_X86_64" : "x86_64",
- "PREFER_I686" : "i686",
- "REQUIRE_I686" : "i686",
- "PREFER_IA64" : "ia64",
- "REQUIRE_IA64" : "ia64",
- "PREFER_ARMV7" : "ARMv7",
- "REQUIRE_ARMV7" : "ARMv7",
- "PREFER_ARMV8" : "ARMv8-A",
- "REQUIRE_ARMV8" : "ARMv8-A",
- }
-
- self._espec_to_mano_cpuarch = {
- "x86" : "REQUIRE_X86",
- "x86_64" : "REQUIRE_X86_64",
- "i686" : "REQUIRE_I686",
- "ia64" : "REQUIRE_IA64",
- "ARMv7-A" : "REQUIRE_ARMV7",
- "ARMv8-A" : "REQUIRE_ARMV8",
- }
-
- self._mano_to_espec_cpuvendor = {
- "PREFER_INTEL" : "Intel",
- "REQUIRE_INTEL" : "Intel",
- "PREFER_AMD" : "AMD",
- "REQUIRE_AMD" : "AMD",
- }
-
- self._espec_to_mano_cpuvendor = {
- "Intel" : "REQUIRE_INTEL",
- "AMD" : "REQUIRE_AMD",
- }
-
- self._mano_to_espec_cpufeatures = {
- "PREFER_AES" : "aes",
- "REQUIRE_AES" : "aes",
- "REQUIRE_VME" : "vme",
- "PREFER_VME" : "vme",
- "REQUIRE_DE" : "de",
- "PREFER_DE" : "de",
- "REQUIRE_PSE" : "pse",
- "PREFER_PSE" : "pse",
- "REQUIRE_TSC" : "tsc",
- "PREFER_TSC" : "tsc",
- "REQUIRE_MSR" : "msr",
- "PREFER_MSR" : "msr",
- "REQUIRE_PAE" : "pae",
- "PREFER_PAE" : "pae",
- "REQUIRE_MCE" : "mce",
- "PREFER_MCE" : "mce",
- "REQUIRE_CX8" : "cx8",
- "PREFER_CX8" : "cx8",
- "REQUIRE_APIC" : "apic",
- "PREFER_APIC" : "apic",
- "REQUIRE_SEP" : "sep",
- "PREFER_SEP" : "sep",
- "REQUIRE_MTRR" : "mtrr",
- "PREFER_MTRR" : "mtrr",
- "REQUIRE_PGE" : "pge",
- "PREFER_PGE" : "pge",
- "REQUIRE_MCA" : "mca",
- "PREFER_MCA" : "mca",
- "REQUIRE_CMOV" : "cmov",
- "PREFER_CMOV" : "cmov",
- "REQUIRE_PAT" : "pat",
- "PREFER_PAT" : "pat",
- "REQUIRE_PSE36" : "pse36",
- "PREFER_PSE36" : "pse36",
- "REQUIRE_CLFLUSH" : "clflush",
- "PREFER_CLFLUSH" : "clflush",
- "REQUIRE_DTS" : "dts",
- "PREFER_DTS" : "dts",
- "REQUIRE_ACPI" : "acpi",
- "PREFER_ACPI" : "acpi",
- "REQUIRE_MMX" : "mmx",
- "PREFER_MMX" : "mmx",
- "REQUIRE_FXSR" : "fxsr",
- "PREFER_FXSR" : "fxsr",
- "REQUIRE_SSE" : "sse",
- "PREFER_SSE" : "sse",
- "REQUIRE_SSE2" : "sse2",
- "PREFER_SSE2" : "sse2",
- "REQUIRE_SS" : "ss",
- "PREFER_SS" : "ss",
- "REQUIRE_HT" : "ht",
- "PREFER_HT" : "ht",
- "REQUIRE_TM" : "tm",
- "PREFER_TM" : "tm",
- "REQUIRE_IA64" : "ia64",
- "PREFER_IA64" : "ia64",
- "REQUIRE_PBE" : "pbe",
- "PREFER_PBE" : "pbe",
- "REQUIRE_RDTSCP" : "rdtscp",
- "PREFER_RDTSCP" : "rdtscp",
- "REQUIRE_PNI" : "pni",
- "PREFER_PNI" : "pni",
- "REQUIRE_PCLMULQDQ": "pclmulqdq",
- "PREFER_PCLMULQDQ" : "pclmulqdq",
- "REQUIRE_DTES64" : "dtes64",
- "PREFER_DTES64" : "dtes64",
- "REQUIRE_MONITOR" : "monitor",
- "PREFER_MONITOR" : "monitor",
- "REQUIRE_DS_CPL" : "ds_cpl",
- "PREFER_DS_CPL" : "ds_cpl",
- "REQUIRE_VMX" : "vmx",
- "PREFER_VMX" : "vmx",
- "REQUIRE_SMX" : "smx",
- "PREFER_SMX" : "smx",
- "REQUIRE_EST" : "est",
- "PREFER_EST" : "est",
- "REQUIRE_TM2" : "tm2",
- "PREFER_TM2" : "tm2",
- "REQUIRE_SSSE3" : "ssse3",
- "PREFER_SSSE3" : "ssse3",
- "REQUIRE_CID" : "cid",
- "PREFER_CID" : "cid",
- "REQUIRE_FMA" : "fma",
- "PREFER_FMA" : "fma",
- "REQUIRE_CX16" : "cx16",
- "PREFER_CX16" : "cx16",
- "REQUIRE_XTPR" : "xtpr",
- "PREFER_XTPR" : "xtpr",
- "REQUIRE_PDCM" : "pdcm",
- "PREFER_PDCM" : "pdcm",
- "REQUIRE_PCID" : "pcid",
- "PREFER_PCID" : "pcid",
- "REQUIRE_DCA" : "dca",
- "PREFER_DCA" : "dca",
- "REQUIRE_SSE4_1" : "sse4_1",
- "PREFER_SSE4_1" : "sse4_1",
- "REQUIRE_SSE4_2" : "sse4_2",
- "PREFER_SSE4_2" : "sse4_2",
- "REQUIRE_X2APIC" : "x2apic",
- "PREFER_X2APIC" : "x2apic",
- "REQUIRE_MOVBE" : "movbe",
- "PREFER_MOVBE" : "movbe",
- "REQUIRE_POPCNT" : "popcnt",
- "PREFER_POPCNT" : "popcnt",
- "REQUIRE_TSC_DEADLINE_TIMER" : "tsc_deadline_timer",
- "PREFER_TSC_DEADLINE_TIMER" : "tsc_deadline_timer",
- "REQUIRE_XSAVE" : "xsave",
- "PREFER_XSAVE" : "xsave",
- "REQUIRE_AVX" : "avx",
- "PREFER_AVX" : "avx",
- "REQUIRE_F16C" : "f16c",
- "PREFER_F16C" : "f16c",
- "REQUIRE_RDRAND" : "rdrand",
- "PREFER_RDRAND" : "rdrand",
- "REQUIRE_FSGSBASE" : "fsgsbase",
- "PREFER_FSGSBASE" : "fsgsbase",
- "REQUIRE_BMI1" : "bmi1",
- "PREFER_BMI1" : "bmi1",
- "REQUIRE_HLE" : "hle",
- "PREFER_HLE" : "hle",
- "REQUIRE_AVX2" : "avx2",
- "PREFER_AVX2" : "avx2",
- "REQUIRE_SMEP" : "smep",
- "PREFER_SMEP" : "smep",
- "REQUIRE_BMI2" : "bmi2",
- "PREFER_BMI2" : "bmi2",
- "REQUIRE_ERMS" : "erms",
- "PREFER_ERMS" : "erms",
- "REQUIRE_INVPCID" : "invpcid",
- "PREFER_INVPCID" : "invpcid",
- "REQUIRE_RTM" : "rtm",
- "PREFER_RTM" : "rtm",
- "REQUIRE_MPX" : "mpx",
- "PREFER_MPX" : "mpx",
- "REQUIRE_RDSEED" : "rdseed",
- "PREFER_RDSEED" : "rdseed",
- "REQUIRE_ADX" : "adx",
- "PREFER_ADX" : "adx",
- "REQUIRE_SMAP" : "smap",
- "PREFER_SMAP" : "smap",
- }
-
- self._espec_to_mano_cpufeatures = {
- "aes" : "REQUIRE_AES",
- "vme" : "REQUIRE_VME",
- "de" : "REQUIRE_DE",
- "pse" : "REQUIRE_PSE",
- "tsc" : "REQUIRE_TSC",
- "msr" : "REQUIRE_MSR",
- "pae" : "REQUIRE_PAE",
- "mce" : "REQUIRE_MCE",
- "cx8" : "REQUIRE_CX8",
- "apic" : "REQUIRE_APIC",
- "sep" : "REQUIRE_SEP",
- "mtrr" : "REQUIRE_MTRR",
- "pge" : "REQUIRE_PGE",
- "mca" : "REQUIRE_MCA",
- "cmov" : "REQUIRE_CMOV",
- "pat" : "REQUIRE_PAT",
- "pse36" : "REQUIRE_PSE36",
- "clflush" : "REQUIRE_CLFLUSH",
- "dts" : "REQUIRE_DTS",
- "acpi" : "REQUIRE_ACPI",
- "mmx" : "REQUIRE_MMX",
- "fxsr" : "REQUIRE_FXSR",
- "sse" : "REQUIRE_SSE",
- "sse2" : "REQUIRE_SSE2",
- "ss" : "REQUIRE_SS",
- "ht" : "REQUIRE_HT",
- "tm" : "REQUIRE_TM",
- "ia64" : "REQUIRE_IA64",
- "pbe" : "REQUIRE_PBE",
- "rdtscp" : "REQUIRE_RDTSCP",
- "pni" : "REQUIRE_PNI",
- "pclmulqdq": "REQUIRE_PCLMULQDQ",
- "dtes64" : "REQUIRE_DTES64",
- "monitor" : "REQUIRE_MONITOR",
- "ds_cpl" : "REQUIRE_DS_CPL",
- "vmx" : "REQUIRE_VMX",
- "smx" : "REQUIRE_SMX",
- "est" : "REQUIRE_EST",
- "tm2" : "REQUIRE_TM2",
- "ssse3" : "REQUIRE_SSSE3",
- "cid" : "REQUIRE_CID",
- "fma" : "REQUIRE_FMA",
- "cx16" : "REQUIRE_CX16",
- "xtpr" : "REQUIRE_XTPR",
- "pdcm" : "REQUIRE_PDCM",
- "pcid" : "REQUIRE_PCID",
- "dca" : "REQUIRE_DCA",
- "sse4_1" : "REQUIRE_SSE4_1",
- "sse4_2" : "REQUIRE_SSE4_2",
- "x2apic" : "REQUIRE_X2APIC",
- "movbe" : "REQUIRE_MOVBE",
- "popcnt" : "REQUIRE_POPCNT",
- "tsc_deadline_timer" : "REQUIRE_TSC_DEADLINE_TIMER",
- "xsave" : "REQUIRE_XSAVE",
- "avx" : "REQUIRE_AVX",
- "f16c" : "REQUIRE_F16C",
- "rdrand" : "REQUIRE_RDRAND",
- "fsgsbase" : "REQUIRE_FSGSBASE",
- "bmi1" : "REQUIRE_BMI1",
- "hle" : "REQUIRE_HLE",
- "avx2" : "REQUIRE_AVX2",
- "smep" : "REQUIRE_SMEP",
- "bmi2" : "REQUIRE_BMI2",
- "erms" : "REQUIRE_ERMS",
- "invpcid" : "REQUIRE_INVPCID",
- "rtm" : "REQUIRE_RTM",
- "mpx" : "REQUIRE_MPX",
- "rdseed" : "REQUIRE_RDSEED",
- "adx" : "REQUIRE_ADX",
- "smap" : "REQUIRE_SMAP",
- }
-
- def mano_to_extra_spec_cpu_model(self, cpu_model):
- if cpu_model in self._mano_to_espec_cpumodel:
- return self._mano_to_espec_cpumodel[cpu_model]
- else:
- return None
-
- def extra_specs_to_mano_cpu_model(self, cpu_model):
- if cpu_model in self._espec_to_mano_cpumodel:
- return self._espec_to_mano_cpumodel[cpu_model]
- else:
- return None
-
- def mano_to_extra_spec_cpu_arch(self, cpu_arch):
- if cpu_arch in self._mano_to_espec_cpuarch:
- return self._mano_to_espec_cpuarch[cpu_arch]
- else:
- return None
-
- def extra_specs_to_mano_cpu_arch(self, cpu_arch):
- if cpu_arch in self._espec_to_mano_cpuarch:
- return self._espec_to_mano_cpuarch[cpu_arch]
- else:
- return None
-
- def mano_to_extra_spec_cpu_vendor(self, cpu_vendor):
- if cpu_vendor in self._mano_to_espec_cpuvendor:
- return self._mano_to_espec_cpuvendor[cpu_vendor]
- else:
- return None
-
- def extra_spec_to_mano_cpu_vendor(self, cpu_vendor):
- if cpu_vendor in self._espec_to_mano_cpuvendor:
- return self._espec_to_mano_cpuvendor[cpu_vendor]
- else:
- return None
-
- def mano_to_extra_spec_cpu_socket_count(self, cpu_sockets):
- return cpu_sockets
-
- def extra_spec_to_mano_cpu_socket_count(self, cpu_sockets):
- return int(cpu_sockets)
-
- def mano_to_extra_spec_cpu_core_count(self, cpu_core_count):
- return cpu_core_count
-
- def extra_spec_to_mano_cpu_core_count(self, cpu_core_count):
- return int(cpu_core_count)
-
- def mano_to_extra_spec_cpu_core_thread_count(self, core_thread_count):
- return core_thread_count
-
- def extra_spec_to_mano_cpu_core_thread_count(self, core_thread_count):
- return int(core_thread_count)
-
- def mano_to_extra_spec_cpu_features(self, features):
- cpu_features = []
- epa_feature_str = None
- for f in features:
- if f in self._mano_to_espec_cpufeatures:
- cpu_features.append(self._mano_to_espec_cpufeatures[f])
-
- if len(cpu_features) > 1:
- epa_feature_str = '<all-in> '+ " ".join(cpu_features)
- elif len(cpu_features) == 1:
- epa_feature_str = " ".join(cpu_features)
-
- return epa_feature_str
-
- def extra_spec_to_mano_cpu_features(self, features):
- oper_symbols = ['=', '<in>', '<all-in>', '==', '!=', '>=', '<=', 's==', 's!=', 's<', 's<=', 's>', 's>=']
- cpu_features = []
- result = None
- for oper in oper_symbols:
- regex = '^'+oper+' (.*?)$'
- result = re.search(regex, features)
- if result is not None:
- break
-
- if result is not None:
- feature_list = result.group(1)
- else:
- feature_list = features
-
- for f in feature_list.split():
- if f in self._espec_to_mano_cpufeatures:
- cpu_features.append(self._espec_to_mano_cpufeatures[f])
-
- return cpu_features
-
-
-class OpenstackExtraSpecUtils(object):
- """
- General utility class for flavor Extra Specs processing
- """
- def __init__(self):
- self.host = OpenstackHostEPAUtils()
- self.guest = OpenstackGuestEPAUtils()
- self.extra_specs_keywords = [ 'hw:cpu_policy',
- 'hw:cpu_threads_policy',
- 'hw:mem_page_size',
- 'hw:numa_nodes',
- 'hw:numa_mempolicy',
- 'hw:numa_cpus',
- 'hw:numa_mem',
- 'trust:trusted_host',
- 'pci_passthrough:alias',
- 'capabilities:cpu_info:model',
- 'capabilities:cpu_info:arch',
- 'capabilities:cpu_info:vendor',
- 'capabilities:cpu_info:topology:sockets',
- 'capabilities:cpu_info:topology:cores',
- 'capabilities:cpu_info:topology:threads',
- 'capabilities:cpu_info:features',
- ]
- self.extra_specs_regex = re.compile("^"+"|^".join(self.extra_specs_keywords))
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .portchain_drv import (
+ L2PortChainDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import logging
+import json
+import requests
+
+
+class L2PortChainDriver(object):
+ """
+ Driver for openstack neutron neutron-client v2
+ """
+ PORT_PAIRS_URL='/sfc/port_pairs'
+ PORT_PAIR_GROUPS_URL='/sfc/port_pair_groups'
+ PORT_CHAINS_URL='/sfc/port_chains'
+ FLOW_CLASSIFIERS_URL='/sfc/flow_classifiers'
+
+ def __init__(self, sess_handle, neutron_drv, logger = None):
+ """
+ Constructor for L2PortChainDriver class
+ Arguments:
+ sess_handle (instance of class SessionDriver)
+ neutron_drv
+ logger (instance of logging.Logger)
+ """
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.portchain')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._sess = sess_handle
+ self.neutron_drv = neutron_drv
+ self._neutron_base_url = neutron_drv.neutron_endpoint
+
+ @property
+ def neutron_base_url(self):
+ return self._neutron_base_url
+
+ @property
+ def tenant_id(self):
+ return self._sess.project_id
+
+ @property
+ def auth_token(self):
+ return self._sess.auth_token
+
+ def rest_api_handler(self,url,method,payload=None,refresh_token=True):
+ try:
+ if method == 'GET':
+ result=requests.get(self.neutron_base_url+url,
+ headers={"X-Auth-Token":self.auth_token,
+ "Content-Type": "application/json" })
+ elif method == 'POST':
+ self.log.debug("POST request being sent for url %s has payload %s",
+ self.neutron_base_url+url,payload)
+
+ result=requests.post(self.neutron_base_url+url,
+ headers={"X-Auth-Token":self.auth_token,
+ "Content-Type": "application/json"},
+ data=payload)
+ elif method == 'PUT':
+ result=requests.put(self.neutron_base_url+url,
+ headers={"X-Auth-Token":self.auth_token,
+ "Content-Type": "application/json"},
+ data=payload)
+ elif method == 'DELETE':
+ result=requests.delete(self.neutron_base_url+url,
+ headers={"X-Auth-Token": self.auth_token,
+ "Content-Type": "application/json"})
+ else:
+ raise("Invalid method name %s",method)
+
+ result.raise_for_status()
+
+ except requests.exceptions.HTTPError as e:
+ if result.status_code == 401 and refresh_token:
+ self._sess.invalidate_auth_token()
+ result = self.rest_api_handler(url,method,payload=payload,refresh_token=False)
+ else:
+ self.log.exception(e)
+ raise
+
+ return result
+
+ def create_port_pair(self,name,ingress_port,egress_port):
+ """
+ Create port pair
+ """
+ port_pair_dict = {}
+ port_pair = {}
+ port_pair_dict["name"] = name
+ port_pair_dict['tenant_id'] = self.tenant_id
+ port_pair_dict['ingress'] = ingress_port
+ port_pair_dict['egress'] = egress_port
+ port_pair["port_pair"] = port_pair_dict
+ port_pair_json = json.dumps(port_pair)
+
+ try:
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL, 'POST', port_pair_json)
+ result.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ if (result.status_code == 400 and 'NeutronError' in result.json()
+ and result.json()['NeutronError']['type'] == 'PortPairIngressEgressInUse'):
+ self.log.info("Port pair with same ingress and egress port already exists")
+ result = self.get_port_pair_list()
+ port_pair_list = result.json()['port_pairs']
+ port_pair_ids = [ pp['id'] for pp in port_pair_list if pp['ingress'] == ingress_port and pp['egress'] == egress_port]
+ return port_pair_ids[0]
+ else:
+ self.log.exception(e)
+ raise
+
+ self.log.debug("Port Pair response received is status code: %s, response: %s",
+ result.status_code, result.json())
+ return result.json()['port_pair']['id']
+
+ def delete_port_pair(self,port_pair_id):
+ try:
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL+'/{}'.format(port_pair_id), 'DELETE')
+ result.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ if (result.status_code == 409 and 'NeutronError' in result.json()
+ and result.json()['NeutronError']['type'] == 'PortPairInUse'):
+ self.log.info("Port pair is in use")
+ else:
+ self.log.exception(e)
+ raise
+ self.log.debug("Delete Port Pair response received is status code: %s", result.status_code)
+
+ def get_port_pair(self,port_pair_id):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL+'/{}'.format(port_pair_id), 'GET')
+ result.raise_for_status()
+ self.log.debug("Get Port Pair response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
+
+ def get_port_pair_list(self):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL, 'GET')
+ result.raise_for_status()
+ self.log.debug("Get Port Pair list response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
+
+ def create_port_pair_group(self,name,port_pairs):
+ """
+ Create port pair group
+ """
+ port_pair_group_dict = {}
+ port_pair_group_dict["name"] = name
+ port_pair_group_dict['tenant_id'] = self.tenant_id
+ port_pair_group_dict['port_pairs'] = list()
+ port_pair_group_dict['port_pairs'].extend(port_pairs)
+ port_pair_group = {}
+ port_pair_group["port_pair_group"] = port_pair_group_dict
+ port_pair_group_json = json.dumps(port_pair_group)
+
+ try:
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL, 'POST', port_pair_group_json)
+ result.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ if (result.status_code == 409 and 'NeutronError' in result.json()
+ and result.json()['NeutronError']['type'] == 'PortPairInUse'):
+ self.log.info("Port pair group with same port pair already exists")
+ result = self.get_port_pair_group_list()
+ port_pair_group_list = result.json()['port_pair_groups']
+ port_pair_group_ids = [ppg['id'] for ppg in port_pair_group_list
+ if ppg['port_pairs'] == port_pairs]
+ return port_pair_group_ids[0]
+ else:
+ self.log.exception(e)
+ raise
+
+ self.log.debug("Create Port Pair group response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result.json()['port_pair_group']['id']
+
+ def delete_port_pair_group(self,port_pair_group_id):
+ try:
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL+'/{}'.format(port_pair_group_id), 'DELETE')
+ result.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ if (result.status_code == 409 and 'NeutronError' in result.json()
+ and result.json()['NeutronError']['type'] == 'PortPairGroupInUse'):
+ self.log.info("Port pair group is in use")
+ else:
+ self.log.exception(e)
+ raise
+ self.log.debug("Delete Port Pair group response received is status code: %s",
+ result.status_code)
+
+ def get_port_pair_group(self,port_pair_group_id):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL+'/{}'.format(port_pair_group_id), 'GET')
+ result.raise_for_status()
+ self.log.debug("Get Port Pair group response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
+
+ def get_port_pair_group_list(self):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL, 'GET')
+ result.raise_for_status()
+ self.log.debug("Get Port Pair group list response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
+
+ def create_port_chain(self,name,port_pair_groups,flow_classifiers=None):
+ """
+ Create port chain
+ """
+ port_chain_dict = {}
+ port_chain_dict["name"]=name
+ port_chain_dict['tenant_id'] = self.tenant_id
+ port_chain_dict['port_pair_groups'] = list()
+ port_chain_dict['port_pair_groups'].extend(port_pair_groups)
+ if flow_classifiers:
+ port_chain_dict['flow_classifiers'] = list()
+ port_chain_dict['flow_classifiers'].extend(flow_classifiers)
+ port_chain = {}
+ port_chain["port_chain"] = port_chain_dict
+ port_chain_json = json.dumps(port_chain)
+
+ try:
+ result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL, 'POST', port_chain_json)
+ result.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ if (result.status_code == 409 and 'NeutronError' in result.json()
+ and result.json()['NeutronError']['type'] == 'InvalidPortPairGroups'):
+ self.log.info("Port chain with same port pair group already exists")
+ result = self.get_port_chain_list()
+ port_chain_list = result.json()['port_chains']
+ port_chain_ids = [ pc['id'] for pc in port_chain_list
+ if pc['port_pair_groups'] == port_pair_groups ]
+ return port_chain_ids[0]
+ else:
+ self.log.exception(e)
+ raise()
+
+ self.log.debug("Create Port chain response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+
+ return result.json()['port_chain']['id']
+
+ def delete_port_chain(self,port_chain_id):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL+'/{}'.format(port_chain_id), 'DELETE')
+ result.raise_for_status()
+ self.log.debug("Delete Port chain response received is status code: %s", result.status_code)
+
+ def get_port_chain(self,port_chain_id):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL+'/{}'.format(port_chain_id), 'GET')
+ result.raise_for_status()
+ self.log.debug("Get Port Chain response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
+
+ def get_port_chain_list(self):
+ result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL, 'GET')
+ result.raise_for_status()
+ self.log.debug("Get Port Chain list response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
+
+ def update_port_chain(self,port_chain_id,port_pair_groups=None,flow_classifiers=None):
+ port_chain_dict = {}
+ if flow_classifiers:
+ port_chain_dict['flow_classifiers'] = list()
+ port_chain_dict['flow_classifiers'].extend(flow_classifiers)
+ if port_pair_groups:
+ port_chain_dict['port_pair_groups'] = list()
+ port_chain_dict['port_pair_groups'].extend(port_pair_groups)
+ port_chain = {}
+ port_chain["port_chain"] = port_chain_dict
+ port_chain_json = json.dumps(port_chain)
+
+ result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL+'/{}'.format(port_chain_id), 'PUT', port_chain_json)
+ result.raise_for_status()
+ self.log.debug("Update Port chain response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result.json()['port_chain']['id']
+
+ def create_flow_classifier(self,name,classifier_dict):
+ """
+ Create flow classifier
+ """
+ classifier_fields = [ 'ethertype',
+ 'protocol',
+ 'source_port_range_min',
+ 'source_port_range_max',
+ 'destination_port_range_min',
+ 'destination_port_range_max',
+ 'source_ip_prefix',
+ 'destination_ip_prefix',
+ 'logical_source_port' ]
+
+ flow_classifier_dict = {}
+ flow_classifier_dict = {k: v for k, v in classifier_dict.items()
+ if k in classifier_fields}
+ flow_classifier_dict["name"]= name
+ flow_classifier_dict['tenant_id']= self.tenant_id
+
+ #flow_classifier_dict['ethertype']= 'IPv4'
+ #flow_classifier_dict['protocol']= 'TCP'
+ #flow_classifier_dict['source_port_range_min']= 80
+ #flow_classifier_dict['source_port_range_max']= 80
+ #flow_classifier_dict['destination_port_range_min']= 80
+ #flow_classifier_dict['destination_port_range_max']= 80
+ #flow_classifier_dict['source_ip_prefix']= '11.0.6.5/32'
+ #flow_classifier_dict['destination_ip_prefix']= '11.0.6.6/32'
+ #flow_classifier_dict['logical_source_port']= source_neutron_port
+ #flow_classifier_dict['logical_destination_port']= ''
+ flow_classifier = {}
+ flow_classifier["flow_classifier"] = flow_classifier_dict
+ flow_classifier_json = json.dumps(flow_classifier)
+
+ result = self.rest_api_handler(L2PortChainDriver.FLOW_CLASSIFIERS_URL, 'POST', flow_classifier_json)
+ result.raise_for_status()
+ self.log.debug("Create flow classifier response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result.json()['flow_classifier']['id']
+
+ def delete_flow_classifier(self,flow_classifier_id):
+ result = self.rest_api_handler(L2PortChainDriver.FLOW_CLASSIFIERS_URL+'/{}'.format(flow_classifier_id), 'DELETE')
+ result.raise_for_status()
+ self.log.debug("Delete flow classifier response received is status code: %s",
+ result.status_code)
+
+ def get_flow_classifier(self,flow_classifier_id):
+ result = self.rest_api_handler(L2PortChainDriver.FLOW_CLASSIFIERS_URL+'/{}'.format(flow_classifier_id), 'GET')
+ result.raise_for_status()
+ self.log.debug("Get flow classifier response received is status code: %s, response: %s",
+ result.status_code,
+ result.json())
+ return result
logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(2)
-
- drv = openstack_drv.OpenstackDriver(username = argument.username,
- password = argument.password,
- auth_url = argument.auth_url,
- tenant_name = argument.tenant_name,
- mgmt_network = argument.mgmt_network,
- user_domain_name = argument.user_domain,
- project_domain_name = argument.project_domain,
- region = argument.region)
-
+ kwargs = dict(username = argument.username,
+ password = argument.password,
+ auth_url = argument.auth_url,
+ project = argument.tenant_name,
+ mgmt_network = argument.mgmt_network,
+ cert_validate = False,
+ user_domain = argument.user_domain,
+ project_domain = argument.project_domain,
+ region = argument.region)
+
+ drv = openstack_drv.OpenstackDriver(logger = logger, **kwargs)
prepare_vm_after_boot(drv, argument)
sys.exit(0)
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .session_drv import (
+ SessionDriver,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from keystoneauth1.identity import v3
+from keystoneauth1.identity import v2
+import logging
+
+
+class TokenDriver(object):
+ """
+ Class for token based authentication for openstack.
+
+ This is just placeholder for now
+ """
+ def __init__(self, version, logger=None, **kwargs):
+ """
+ Constructor for class
+ """
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.keystone.token')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+
+ @property
+ def auth_handle(self):
+ return None
+
+class PasswordDriver(object):
+ """
+ Class for password based authentication for openstack
+ """
+ def __init__(self, version, logger=None, **kwargs):
+ """
+ Constructor for class
+ Arguments:
+ version (str): Keystone API version to use
+ logger (instance of logging.Logger)
+ A dictionary of following key-value pairs
+ {
+ auth_url (string) : Keystone Auth URL
+ username (string) : Username for authentication
+ password (string) : Password for authentication
+ project_name (string) : Name of the project or tenant
+ project_domain_name (string) : Name of the project domain
+ user_domain_name (string) : Name of the user domain
+ logger (instance of logging.Logger)
+ }
+ Returns:
+ None
+ """
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.keystone.password')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self.log = logger
+ version = int(float(version))
+
+ if version == 3:
+ self.log.info("Using keystone version 3 for authentication at URL: %s", kwargs['auth_url'])
+ self._auth = v3.Password(auth_url = kwargs['auth_url'],
+ username = kwargs['username'],
+ password = kwargs['password'],
+ project_name = kwargs['project_name'],
+ project_domain_name = kwargs['project_domain_name'],
+ user_domain_name = kwargs['user_domain_name'])
+ elif version == 2:
+ self.log.info("Using keystone version 2 for authentication at URL: %s", kwargs['auth_url'])
+ self._auth = v2.Password(auth_url = kwargs['auth_url'],
+ username = kwargs['username'],
+ password = kwargs['password'],
+ tenant_name = kwargs['project_name'])
+ @property
+ def auth_handle(self):
+ return self._auth
+
+
+class AuthDriver(object):
+ """
+ Driver class for handling authentication plugins for openstack
+ """
+ AuthMethod = dict(
+ password=PasswordDriver,
+ token=TokenDriver,
+ )
+ def __init__(self, auth_type, version, logger = None, **kwargs):
+ """
+ auth_type (string): At this point, only "password" based
+ authentication is supported.
+ version (string): Keystone API version
+ logger (instance of logging.Logger)
+
+ kwargs a dictionary of following key/value pairs
+ {
+ username (string) : Username
+ password (string) : Password
+ auth_url (string) : Authentication URL
+ tenant_name(string): Tenant Name
+ user_domain_name (string) : User domain name
+ project_domain_name (string): Project domain name
+ region (string) : Region name
+ }
+ """
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.auth')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+
+ self.log.info("Using %s authentication method", auth_type)
+ if auth_type not in AuthDriver.AuthMethod:
+ self.log.error("Unsupported authentication method %s", auth_type)
+ raise KeyError("Unsupported authentication method %s", auth_type)
+ else:
+ self._auth_method = AuthDriver.AuthMethod[auth_type](version, self.log, **kwargs)
+
+ @property
+ def auth_handle(self):
+ return self._auth_method.auth_handle
+
+
+
+
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+from .auth_drv import AuthDriver
+from keystoneauth1 import session
+
+
+class SessionDriver(object):
+ """
+ Authentication session class for openstack
+ """
+ def __init__(self, auth_method, version, cert_validate, logger = None, **kwargs):
+ """
+ Constructor for class SessionDriver
+ auth_method (string): At this point, only "password" based
+ authentication is supported. See AuthDriver.AuthMethod
+ for more details
+ version (string): Keystone API version
+ cert_validate (boolean): Boolean to indicate if certificate validation is required
+ logger (instance of logging.Logger)
+ kwargs a dictionary of following key/value pairs
+ {
+ username (string) : Username
+ password (string) : Password
+ auth_url (string) : Authentication URL
+ tenant_name(string): Tenant Name
+ user_domain_name (string) : User domain name
+ project_domain_name (string): Project domain name
+ region (string) : Region name
+ }
+
+ """
+ if logger is None:
+ self.log = logging.getLogger('rwcal.openstack.session')
+ self.log.setLevel(logging.DEBUG)
+ else:
+ self.log = logger
+
+ self._auth_url = kwargs['auth_url']
+
+ self._auth = AuthDriver(auth_method, version, logger, **kwargs)
+ self._sess = session.Session(auth=self._auth.auth_handle,
+ verify = cert_validate)
+
+ @property
+ def session(self):
+ return self._sess
+
+ @property
+ def auth_token(self):
+ """
+ Returns a valid Auth-Token
+ """
+ if not self._sess.auth.get_auth_state():
+ return self._sess.get_token()
+ else:
+ if self.will_expire_after():
+ self._sess.invalidate()
+ return self._sess.get_token()
+ else:
+ return self._sess.get_token()
+ @property
+ def auth_url(self):
+ return self._auth_url
+
+ def invalidate_auth_token(self):
+ """
+ This method will return a fresh token (in case of HTTP 401 response)
+ """
+ self._sess.invalidate()
+
+ @property
+ def auth_header(self):
+ return self._sess.auth.get_headers(self._sess)
+
+ @property
+ def project_id(self):
+ return self._sess.get_project_id()
+
+ @property
+ def user_id(self):
+ return self._sess.get_user_id()
+
+ def get_auth_state(self):
+ return self._sess.auth.get_auth_state()
+
+ def will_expire_after(self, timeout=180):
+ return self._sess.auth.auth_ref.will_expire_soon(stale_duration=timeout)
+
+
+
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .flavor import (
+ FlavorUtils,
+)
+
+from .network import (
+ NetworkUtils,
+)
+
+from .image import (
+ ImageUtils,
+)
+
+from .compute import(
+ ComputeUtils,
+)
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import uuid
+import gi
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+
+class ImageValidateError(Exception):
+ pass
+
+class VolumeValidateError(Exception):
+ pass
+
+class AffinityGroupError(Exception):
+ pass
+
+
+class ComputeUtils(object):
+ """
+ Utility class for compute operations
+ """
+ epa_types = ['vm_flavor',
+ 'guest_epa',
+ 'host_epa',
+ 'host_aggregate',
+ 'hypervisor_epa',
+ 'vswitch_epa']
+ def __init__(self, driver):
+ """
+ Constructor for class
+ Arguments:
+ driver: object of OpenstackDriver()
+ """
+ self._driver = driver
+ self.log = driver.log
+
+ @property
+ def driver(self):
+ return self._driver
+
+ def search_vdu_flavor(self, vdu_params):
+ """
+ Function to search a matching flavor for VDU instantiation
+ from already existing flavors
+
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ flavor_id(string): Flavor id for VDU instantiation
+ None if no flavor could be found
+ """
+ kwargs = { 'vcpus': vdu_params.vm_flavor.vcpu_count,
+ 'ram' : vdu_params.vm_flavor.memory_mb,
+ 'disk' : vdu_params.vm_flavor.storage_gb,}
+
+ flavors = self.driver.nova_flavor_find(**kwargs)
+ flavor_list = list()
+ for flv in flavors:
+ flavor_list.append(self.driver.utils.flavor.parse_flavor_info(flv))
+
+ flavor_id = self.driver.utils.flavor.match_resource_flavor(vdu_params, flavor_list)
+ return flavor_id
+
+ def select_vdu_flavor(self, vdu_params):
+ """
+ This function attempts to find a pre-existing flavor matching required
+ parameters for VDU instantiation. If no such flavor is found, a new one
+ is created.
+
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ flavor_id(string): Flavor id for VDU instantiation
+ """
+ flavor_id = self.search_vdu_flavor(vdu_params)
+ if flavor_id is not None:
+ self.log.info("Found flavor with id: %s matching requirements for VDU: %s",
+ flavor_id, vdu_params.name)
+ return flavor_id
+
+ flavor = RwcalYang.FlavorInfoItem()
+ flavor.name = str(uuid.uuid4())
+
+ epa_dict = { k: v for k, v in vdu_params.as_dict().items()
+ if k in ComputeUtils.epa_types }
+
+ flavor.from_dict(epa_dict)
+
+ flavor_id = self.driver.nova_flavor_create(name = flavor.name,
+ ram = flavor.vm_flavor.memory_mb,
+ vcpus = flavor.vm_flavor.vcpu_count,
+ disk = flavor.vm_flavor.storage_gb,
+ epa_specs = self.driver.utils.flavor.get_extra_specs(flavor))
+ return flavor_id
+
+ def make_vdu_flavor_args(self, vdu_params):
+ """
+ Creates flavor related arguments for VDU operation
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary {'flavor_id': <flavor-id>}
+ """
+ return {'flavor_id': self.select_vdu_flavor(vdu_params)}
+
+
+ def make_vdu_image_args(self, vdu_params):
+ """
+ Creates image related arguments for VDU operation
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary {'image_id': <image-id>}
+
+ """
+ kwargs = dict()
+ if vdu_params.has_field('image_name'):
+ kwargs['image_id'] = self.resolve_image_n_validate(vdu_params.image_name,
+ vdu_params.image_checksum)
+ elif vdu_params.has_field('image_id'):
+ kwargs['image_id'] = vdu_params.image_id
+
+ return kwargs
+
+ def resolve_image_n_validate(self, image_name, checksum = None):
+ """
+ Resolve the image_name to image-object by matching image_name and checksum
+
+ Arguments:
+ image_name (string): Name of image
+ checksums (string): Checksum associated with image
+
+ Raises ImageValidateError in case of Errors
+ """
+ image_info = [ i for i in self.driver._glance_image_list if i['name'] == image_name]
+
+ if not image_info:
+ self.log.error("No image with name: %s found", image_name)
+ raise ImageValidateError("No image with name %s found" %(image_name))
+
+ for image in image_info:
+ if 'status' not in image or image['status'] != 'active':
+ self.log.error("Image %s not in active state. Current state: %s",
+ image_name, image['status'])
+ raise ImageValidateError("Image with name %s found in incorrect (%s) state"
+ %(image_name, image['status']))
+ if not checksum or checksum == image['checksum']:
+ break
+ else:
+ self.log.info("No image found with matching name: %s and checksum: %s",
+ image_name, checksum)
+ raise ImageValidateError("No image found with matching name: %s and checksum: %s"
+ %(image_name, checksum))
+ return image['id']
+
+ def make_vdu_volume_args(self, volume, vdu_params):
+ """
+ Arguments:
+ volume: Protobuf GI object RwcalYang.VDUInitParams_Volumes()
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary required to create volume for VDU
+
+ Raises VolumeValidateError in case of Errors
+ """
+ kwargs = dict()
+
+ if volume.has_field('volume_ref'):
+ self.log.error("Unsupported option <Volume Reference> found for volume: %s", volume.name)
+ raise VolumeValidateError("Unsupported option <Volume Reference> found for volume: %s"
+ %(volume.name))
+
+ kwargs['boot_index'] = volume.boot_priority
+ if "image" in volume:
+ # Support image->volume
+ if volume.image is not None:
+ kwargs['source_type'] = "image"
+ kwargs['uuid'] = self.resolve_image_n_validate(volume.image, volume.image_checksum)
+ else:
+ # Support blank->volume
+ kwargs['source_type'] = "blank"
+ kwargs['device_name'] = volume.name
+ kwargs['destination_type'] = "volume"
+ kwargs['volume_size'] = volume.size
+ kwargs['delete_on_termination'] = True
+
+ if volume.has_field('device_type'):
+ if volume.device_type == 'cdrom':
+ kwargs['device_type'] = 'cdrom'
+ elif volume.device_bus == 'ide':
+ kwargs['disk_bus'] = 'ide'
+ else:
+ self.log.error("Unsupported device_type <%s> found for volume: %s",
+ volume.device_type, volume.name)
+ raise VolumeValidateError("Unsupported device_type <%s> found for volume: %s"
+ %(volume.device_type, volume.name))
+ else:
+ self.log.error("Mandatory field <device_type> not specified for volume: %s",
+ volume.name)
+ raise VolumeValidateError("Mandatory field <device_type> not specified for volume: %s"
+ %(volume.name))
+ return kwargs
+
+ def make_vdu_storage_args(self, vdu_params):
+ """
+ Creates volume related arguments for VDU operation
+
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary required for volumes creation for VDU instantiation
+ """
+ kwargs = dict()
+ if vdu_params.has_field('volumes'):
+ kwargs['block_device_mapping_v2'] = list()
+ for volume in vdu_params.volumes:
+ kwargs['block_device_mapping_v2'].append(self.make_vdu_volume_args(volume, vdu_params))
+ return kwargs
+
+ def make_vdu_network_args(self, vdu_params):
+ """
+ Creates VDU network related arguments for VDU operation
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary {'port_list' : [ports], 'network_list': [networks]}
+
+ """
+ kwargs = dict()
+ kwargs['port_list'], kwargs['network_list'] = self.driver.utils.network.setup_vdu_networking(vdu_params)
+ return kwargs
+
+
+ def make_vdu_boot_config_args(self, vdu_params):
+ """
+ Creates VDU boot config related arguments for VDU operation
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary {
+ 'userdata' : <cloud-init> ,
+ 'config_drive': True/False,
+ 'files' : [ file name ],
+ 'metadata' : <metadata string>
+ }
+ """
+ kwargs = dict()
+ if vdu_params.has_field('vdu_init') and vdu_params.vdu_init.has_field('userdata'):
+ kwargs['userdata'] = vdu_params.vdu_init.userdata
+ else:
+ kwargs['userdata'] = ''
+
+ if not vdu_params.has_field('supplemental_boot_data'):
+ return kwargs
+
+ if vdu_params.supplemental_boot_data.has_field('config_file'):
+ files = dict()
+ for cf in vdu_params.supplemental_boot_data.config_file:
+ files[cf.dest] = cf.source
+ kwargs['files'] = files
+
+ if vdu_params.supplemental_boot_data.has_field('boot_data_drive'):
+ kwargs['config_drive'] = vdu_params.supplemental_boot_data.boot_data_drive
+ else:
+ kwargs['config_drive'] = False
+
+ if vdu_params.supplemental_boot_data.has_field('custom_meta_data'):
+ metadata = dict()
+ for cm in vdu_params.supplemental_boot_data.custom_meta_data:
+ metadata[cm] = cm.value
+ kwargs['metadata'] = metadata
+
+ return kwargs
+
+ def _select_affinity_group(self, group_name):
+ """
+ Selects the affinity group based on name and return its id
+ Arguments:
+ group_name (string): Name of the Affinity/Anti-Affinity group
+ Returns:
+ Id of the matching group
+
+ Raises exception AffinityGroupError if no matching group is found
+ """
+ groups = [g['id'] for g in self.driver._nova_affinity_group if g['name'] == group_name]
+ if not groups:
+ self.log.error("No affinity/anti-affinity group with name: %s found", group_name)
+ raise AffinityGroupError("No affinity/anti-affinity group with name: %s found" %(group_name))
+ return groups[0]
+
+
+ def make_vdu_server_placement_args(self, vdu_params):
+ """
+ Function to create kwargs required for nova server placement
+
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+ Returns:
+ A dictionary { 'availability_zone' : < Zone >, 'scheduler_hints': <group-id> }
+
+ """
+ kwargs = dict()
+
+ if vdu_params.has_field('availability_zone') \
+ and vdu_params.availability_zone.has_field('name'):
+ kwargs['availability_zone'] = vdu_params.availability_zone
+
+ if vdu_params.has_field('server_group'):
+ kwargs['scheduler_hints'] = {
+ 'group': self._select_affinity_group(vdu_params.server_group)
+ }
+ return kwargs
+
+ def make_vdu_server_security_args(self, vdu_params, account):
+ """
+ Function to create kwargs required for nova security group
+
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ account: Protobuf GI object RwcalYang.CloudAccount()
+
+ Returns:
+ A dictionary {'security_groups' : < group > }
+ """
+ kwargs = dict()
+ if account.openstack.security_groups:
+ kwargs['security_groups'] = account.openstack.security_groups
+ return kwargs
+
+
+ def make_vdu_create_args(self, vdu_params, account):
+ """
+ Function to create kwargs required for nova_server_create API
+
+ Arguments:
+ vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ account: Protobuf GI object RwcalYang.CloudAccount()
+
+ Returns:
+ A kwargs dictionary for VDU create operation
+ """
+ kwargs = dict()
+
+ kwargs['name'] = vdu_params.name
+
+ kwargs.update(self.make_vdu_flavor_args(vdu_params))
+ kwargs.update(self.make_vdu_storage_args(vdu_params))
+ kwargs.update(self.make_vdu_image_args(vdu_params))
+ kwargs.update(self.make_vdu_network_args(vdu_params))
+ kwargs.update(self.make_vdu_boot_config_args(vdu_params))
+ kwargs.update(self.make_vdu_server_placement_args(vdu_params))
+ kwargs.update(self.make_vdu_server_security_args(vdu_params, account))
+ return kwargs
+
+
+ def _parse_vdu_mgmt_address_info(self, vm_info):
+ """
+ Get management_ip and public_ip for VDU
+
+ Arguments:
+ vm_info : A dictionary object return by novaclient library listing VM attributes
+
+ Returns:
+ A tuple of mgmt_ip (string) and public_ip (string)
+ """
+ mgmt_ip = None
+ public_ip = None
+ if 'addresses' in vm_info:
+ for network_name, network_info in vm_info['addresses'].items():
+ if network_info and network_name == self.driver.mgmt_network:
+ for interface in network_info:
+ if 'OS-EXT-IPS:type' in interface:
+ if interface['OS-EXT-IPS:type'] == 'fixed':
+ mgmt_ip = interface['addr']
+ elif interface['OS-EXT-IPS:type'] == 'floating':
+ public_ip = interface['addr']
+ return (mgmt_ip, public_ip)
+
+ def get_vdu_epa_info(self, vm_info):
+ """
+ Get flavor information (including EPA) for VDU
+
+ Arguments:
+ vm_info : A dictionary returned by novaclient library listing VM attributes
+ Returns:
+ flavor_info: A dictionary object returned by novaclient library listing flavor attributes
+ """
+ if 'flavor' in vm_info and 'id' in vm_info['flavor']:
+ try:
+ flavor_info = self.driver.nova_flavor_get(vm_info['flavor']['id'])
+ return flavor_info
+ except Exception as e:
+ self.log.exception("Exception %s occured during get-flavor", str(e))
+ return dict()
+
+ def _parse_vdu_cp_info(self, vdu_id):
+ """
+ Get connection point information for VDU identified by vdu_id
+ Arguments:
+ vdu_id (string) : VDU Id (vm_info['id'])
+ Returns:
+ A List of object RwcalYang.VDUInfoParams_ConnectionPoints()
+
+ """
+ cp_list = []
+ # Fill the port information
+ port_list = self.driver.neutron_port_list(**{'device_id': vdu_id})
+ for port in port_list:
+ cp_info = self.driver.utils.network._parse_cp(port)
+ cp = RwcalYang.VDUInfoParams_ConnectionPoints()
+ cp.from_dict(cp_info.as_dict())
+ cp_list.append(cp)
+ return cp_list
+
+ def _parse_vdu_state_info(self, vm_info):
+ """
+ Get VDU state information
+
+ Arguments:
+ vm_info : A dictionary returned by novaclient library listing VM attributes
+
+ Returns:
+ state (string): State of the VDU
+ """
+ if 'status' in vm_info:
+ if vm_info['status'] == 'ACTIVE':
+ vdu_state = 'active'
+ elif vm_info['status'] == 'ERROR':
+ vdu_state = 'failed'
+ else:
+ vdu_state = 'inactive'
+ else:
+ vdu_state = 'unknown'
+ return vdu_state
+
+ def _parse_vdu_server_group_info(self, vm_info):
+ """
+ Get VDU server group information
+ Arguments:
+ vm_info : A dictionary returned by novaclient library listing VM attributes
+
+ Returns:
+ server_group_name (string): Name of the server group to which VM belongs, else empty string
+
+ """
+ server_group = [ v['name']
+ for v in self.driver.nova_server_group_list()
+ if vm_info['id'] in v['members'] ]
+ if server_group:
+ return server_group[0]
+ else:
+ return str()
+
+ def _parse_vdu_volume_info(self, vm_info):
+ """
+ Get VDU server group information
+ Arguments:
+ vm_info : A dictionary returned by novaclient library listing VM attributes
+
+ Returns:
+ List of RwcalYang.VDUInfoParams_Volumes()
+ """
+ volumes = list()
+
+ try:
+ volume_list = self.driver.nova_volume_list(vm_info['id'])
+ except Exception as e:
+ self.log.exception("Exception %s occured during nova-volume-list", str(e))
+ return volumes
+
+ for v in volume_list:
+ volume = RwcalYang.VDUInfoParams_Volumes()
+ try:
+ volume.name = (v['device']).split('/')[2]
+ volume.volume_id = v['volumeId']
+ details = self.driver.cinder_volume_get(volume.volume_id)
+ for k, v in details.metadata.items():
+ vd = volume.custom_meta_data.add()
+ vd.name = k
+ vd.value = v
+ except Exception as e:
+ self.log.exception("Exception %s occured during volume list parsing", str(e))
+ continue
+ else:
+ volumes.append(volume)
+ return volumes
+
+ def _parse_vdu_console_url(self, vm_info):
+ """
+ Get VDU console URL
+ Arguments:
+ vm_info : A dictionary returned by novaclient library listing VM attributes
+
+ Returns:
+ console_url(string): Console URL for VM
+ """
+ console_url = None
+ if self._parse_vdu_state_info(vm_info) == 'ACTIVE':
+ try:
+ console_url = self.driver.nova_server_console(vm_info['id'])
+ except Exception as e:
+ self.log.exception("Exception %s occured during volume list parsing", str(e))
+ return console_url
+
+ def parse_cloud_vdu_info(self, vm_info):
+ """
+ Parse vm_info dictionary (return by python-client) and put values in GI object for VDU
+
+ Arguments:
+ vm_info : A dictionary object return by novaclient library listing VM attributes
+
+ Returns:
+ Protobuf GI Object of type RwcalYang.VDUInfoParams()
+ """
+ vdu = RwcalYang.VDUInfoParams()
+ vdu.name = vm_info['name']
+ vdu.vdu_id = vm_info['id']
+ vdu.cloud_type = 'openstack'
+
+ if 'config_drive' in vm_info:
+ vdu.supplemental_boot_data.boot_data_drive = vm_info['config_drive']
+
+ if 'image' in vm_info and 'id' in vm_info['image']:
+ vdu.image_id = vm_info['image']['id']
+
+ if 'availability_zone' in vm_info:
+ vdu.availability_zone = vm_info['availability_zone']
+
+ vdu.state = self._parse_vdu_state_info(vm_info)
+ management_ip,public_ip = self._parse_vdu_mgmt_address_info(vm_info)
+
+ if management_ip:
+ vdu.management_ip = management_ip
+
+ if public_ip:
+ vdu.public_ip = public_ip
+
+ if 'flavor' in vm_info and 'id' in vm_info['flavor']:
+ vdu.flavor_id = vm_info['flavor']['id']
+ flavor_info = self.get_vdu_epa_info(vm_info)
+ vm_flavor = self.driver.utils.flavor.parse_vm_flavor_epa_info(flavor_info)
+ guest_epa = self.driver.utils.flavor.parse_guest_epa_info(flavor_info)
+ host_epa = self.driver.utils.flavor.parse_host_epa_info(flavor_info)
+ host_aggregates = self.driver.utils.flavor.parse_host_aggregate_epa_info(flavor_info)
+
+ vdu.vm_flavor.from_dict(vm_flavor.as_dict())
+ vdu.guest_epa.from_dict(guest_epa.as_dict())
+ vdu.host_epa.from_dict(host_epa.as_dict())
+ for aggr in host_aggregates:
+ ha = vdu.host_aggregate.add()
+ ha.from_dict(aggr.as_dict())
+
+ cp_list = self._parse_vdu_cp_info(vdu.vdu_id)
+ for cp in cp_list:
+ vdu.connection_points.append(cp)
+
+ vdu.server_group.name = self._parse_vdu_server_group_info(vm_info)
+
+ for v in self._parse_vdu_volume_info(vm_info):
+ vdu.volumes.append(v)
+
+ vdu.console_url = self._parse_vdu_console_url(vm_info)
+ return vdu
+
+
+ def perform_vdu_network_cleanup(self, vdu_id):
+ """
+ This function cleans up networking resources related to VDU
+ Arguments:
+ vdu_id(string): VDU id
+ Returns:
+ None
+ """
+ ### Get list of floating_ips associated with this instance and delete them
+ floating_ips = [ f for f in self.driver.nova_floating_ip_list() if f.instance_id == vdu_id ]
+ for f in floating_ips:
+ self.driver.nova_floating_ip_delete(f)
+
+ ### Get list of port on VM and delete them.
+ port_list = self.driver.neutron_port_list(**{'device_id': vdu_id})
+
+ for port in port_list:
+ if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+ self.driver.neutron_port_delete(port['id'])
+
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+import gi
+
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+
+class GuestEPAUtils(object):
+ """
+ Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+ """
+ def __init__(self):
+ self._mano_to_espec_cpu_pinning_policy = {
+ 'DEDICATED' : 'dedicated',
+ 'SHARED' : 'shared',
+ 'ANY' : 'any',
+ }
+
+ self._espec_to_mano_cpu_pinning_policy = {
+ 'dedicated' : 'DEDICATED',
+ 'shared' : 'SHARED',
+ 'any' : 'ANY',
+ }
+
+ self._mano_to_espec_mempage_size = {
+ 'LARGE' : 'large',
+ 'SMALL' : 'small',
+ 'SIZE_2MB' : 2048,
+ 'SIZE_1GB' : 1048576,
+ 'PREFER_LARGE' : 'large',
+ }
+
+ self._espec_to_mano_mempage_size = {
+ 'large' : 'LARGE',
+ 'small' : 'SMALL',
+ 2048 : 'SIZE_2MB',
+ 1048576 : 'SIZE_1GB',
+ 'large' : 'PREFER_LARGE',
+ }
+
+ self._mano_to_espec_cpu_thread_pinning_policy = {
+ 'AVOID' : 'avoid',
+ 'SEPARATE' : 'separate',
+ 'ISOLATE' : 'isolate',
+ 'PREFER' : 'prefer',
+ }
+
+ self._espec_to_mano_cpu_thread_pinning_policy = {
+ 'avoid' : 'AVOID',
+ 'separate' : 'SEPARATE',
+ 'isolate' : 'ISOLATE',
+ 'prefer' : 'PREFER',
+ }
+
+ self._espec_to_mano_numa_memory_policy = {
+ 'strict' : 'STRICT',
+ 'preferred': 'PREFERRED'
+ }
+
+ self._mano_to_espec_numa_memory_policy = {
+ 'STRICT' : 'strict',
+ 'PREFERRED': 'preferred'
+ }
+
+ def mano_to_extra_spec_cpu_pinning_policy(self, cpu_pinning_policy):
+ if cpu_pinning_policy in self._mano_to_espec_cpu_pinning_policy:
+ return self._mano_to_espec_cpu_pinning_policy[cpu_pinning_policy]
+ else:
+ return None
+
+ def extra_spec_to_mano_cpu_pinning_policy(self, cpu_pinning_policy):
+ if cpu_pinning_policy in self._espec_to_mano_cpu_pinning_policy:
+ return self._espec_to_mano_cpu_pinning_policy[cpu_pinning_policy]
+ else:
+ return None
+
+ def mano_to_extra_spec_mempage_size(self, mempage_size):
+ if mempage_size in self._mano_to_espec_mempage_size:
+ return self._mano_to_espec_mempage_size[mempage_size]
+ else:
+ return None
+
+ def extra_spec_to_mano_mempage_size(self, mempage_size):
+ if mempage_size in self._espec_to_mano_mempage_size:
+ return self._espec_to_mano_mempage_size[mempage_size]
+ else:
+ return None
+
+ def mano_to_extra_spec_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+ if cpu_thread_pinning_policy in self._mano_to_espec_cpu_thread_pinning_policy:
+ return self._mano_to_espec_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+ else:
+ return None
+
+ def extra_spec_to_mano_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+ if cpu_thread_pinning_policy in self._espec_to_mano_cpu_thread_pinning_policy:
+ return self._espec_to_mano_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+ else:
+ return None
+
+ def mano_to_extra_spec_trusted_execution(self, trusted_execution):
+ if trusted_execution:
+ return 'trusted'
+ else:
+ return 'untrusted'
+
+ def extra_spec_to_mano_trusted_execution(self, trusted_execution):
+ if trusted_execution == 'trusted':
+ return True
+ elif trusted_execution == 'untrusted':
+ return False
+ else:
+ return None
+
+ def mano_to_extra_spec_numa_node_count(self, numa_node_count):
+ return numa_node_count
+
+ def extra_specs_to_mano_numa_node_count(self, numa_node_count):
+ return int(numa_node_count)
+
+ def mano_to_extra_spec_numa_memory_policy(self, numa_memory_policy):
+ if numa_memory_policy in self._mano_to_espec_numa_memory_policy:
+ return self._mano_to_espec_numa_memory_policy[numa_memory_policy]
+ else:
+ return None
+
+ def extra_to_mano_spec_numa_memory_policy(self, numa_memory_policy):
+ if numa_memory_policy in self._espec_to_mano_numa_memory_policy:
+ return self._espec_to_mano_numa_memory_policy[numa_memory_policy]
+ else:
+ return None
+
+
+
+
+class HostEPAUtils(object):
+ """
+ Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+ """
+ def __init__(self):
+ self._mano_to_espec_cpumodel = {
+ "PREFER_WESTMERE" : "Westmere",
+ "REQUIRE_WESTMERE" : "Westmere",
+ "PREFER_SANDYBRIDGE" : "SandyBridge",
+ "REQUIRE_SANDYBRIDGE" : "SandyBridge",
+ "PREFER_IVYBRIDGE" : "IvyBridge",
+ "REQUIRE_IVYBRIDGE" : "IvyBridge",
+ "PREFER_HASWELL" : "Haswell",
+ "REQUIRE_HASWELL" : "Haswell",
+ "PREFER_BROADWELL" : "Broadwell",
+ "REQUIRE_BROADWELL" : "Broadwell",
+ "PREFER_NEHALEM" : "Nehalem",
+ "REQUIRE_NEHALEM" : "Nehalem",
+ "PREFER_PENRYN" : "Penryn",
+ "REQUIRE_PENRYN" : "Penryn",
+ "PREFER_CONROE" : "Conroe",
+ "REQUIRE_CONROE" : "Conroe",
+ "PREFER_CORE2DUO" : "Core2Duo",
+ "REQUIRE_CORE2DUO" : "Core2Duo",
+ }
+
+ self._espec_to_mano_cpumodel = {
+ "Westmere" : "REQUIRE_WESTMERE",
+ "SandyBridge" : "REQUIRE_SANDYBRIDGE",
+ "IvyBridge" : "REQUIRE_IVYBRIDGE",
+ "Haswell" : "REQUIRE_HASWELL",
+ "Broadwell" : "REQUIRE_BROADWELL",
+ "Nehalem" : "REQUIRE_NEHALEM",
+ "Penryn" : "REQUIRE_PENRYN",
+ "Conroe" : "REQUIRE_CONROE",
+ "Core2Duo" : "REQUIRE_CORE2DUO",
+ }
+
+ self._mano_to_espec_cpuarch = {
+ "PREFER_X86" : "x86",
+ "REQUIRE_X86" : "x86",
+ "PREFER_X86_64" : "x86_64",
+ "REQUIRE_X86_64" : "x86_64",
+ "PREFER_I686" : "i686",
+ "REQUIRE_I686" : "i686",
+ "PREFER_IA64" : "ia64",
+ "REQUIRE_IA64" : "ia64",
+ "PREFER_ARMV7" : "ARMv7",
+ "REQUIRE_ARMV7" : "ARMv7",
+ "PREFER_ARMV8" : "ARMv8-A",
+ "REQUIRE_ARMV8" : "ARMv8-A",
+ }
+
+ self._espec_to_mano_cpuarch = {
+ "x86" : "REQUIRE_X86",
+ "x86_64" : "REQUIRE_X86_64",
+ "i686" : "REQUIRE_I686",
+ "ia64" : "REQUIRE_IA64",
+ "ARMv7-A" : "REQUIRE_ARMV7",
+ "ARMv8-A" : "REQUIRE_ARMV8",
+ }
+
+ self._mano_to_espec_cpuvendor = {
+ "PREFER_INTEL" : "Intel",
+ "REQUIRE_INTEL" : "Intel",
+ "PREFER_AMD" : "AMD",
+ "REQUIRE_AMD" : "AMD",
+ }
+
+ self._espec_to_mano_cpuvendor = {
+ "Intel" : "REQUIRE_INTEL",
+ "AMD" : "REQUIRE_AMD",
+ }
+
+ self._mano_to_espec_cpufeatures = {
+ "PREFER_AES" : "aes",
+ "REQUIRE_AES" : "aes",
+ "REQUIRE_VME" : "vme",
+ "PREFER_VME" : "vme",
+ "REQUIRE_DE" : "de",
+ "PREFER_DE" : "de",
+ "REQUIRE_PSE" : "pse",
+ "PREFER_PSE" : "pse",
+ "REQUIRE_TSC" : "tsc",
+ "PREFER_TSC" : "tsc",
+ "REQUIRE_MSR" : "msr",
+ "PREFER_MSR" : "msr",
+ "REQUIRE_PAE" : "pae",
+ "PREFER_PAE" : "pae",
+ "REQUIRE_MCE" : "mce",
+ "PREFER_MCE" : "mce",
+ "REQUIRE_CX8" : "cx8",
+ "PREFER_CX8" : "cx8",
+ "REQUIRE_APIC" : "apic",
+ "PREFER_APIC" : "apic",
+ "REQUIRE_SEP" : "sep",
+ "PREFER_SEP" : "sep",
+ "REQUIRE_MTRR" : "mtrr",
+ "PREFER_MTRR" : "mtrr",
+ "REQUIRE_PGE" : "pge",
+ "PREFER_PGE" : "pge",
+ "REQUIRE_MCA" : "mca",
+ "PREFER_MCA" : "mca",
+ "REQUIRE_CMOV" : "cmov",
+ "PREFER_CMOV" : "cmov",
+ "REQUIRE_PAT" : "pat",
+ "PREFER_PAT" : "pat",
+ "REQUIRE_PSE36" : "pse36",
+ "PREFER_PSE36" : "pse36",
+ "REQUIRE_CLFLUSH" : "clflush",
+ "PREFER_CLFLUSH" : "clflush",
+ "REQUIRE_DTS" : "dts",
+ "PREFER_DTS" : "dts",
+ "REQUIRE_ACPI" : "acpi",
+ "PREFER_ACPI" : "acpi",
+ "REQUIRE_MMX" : "mmx",
+ "PREFER_MMX" : "mmx",
+ "REQUIRE_FXSR" : "fxsr",
+ "PREFER_FXSR" : "fxsr",
+ "REQUIRE_SSE" : "sse",
+ "PREFER_SSE" : "sse",
+ "REQUIRE_SSE2" : "sse2",
+ "PREFER_SSE2" : "sse2",
+ "REQUIRE_SS" : "ss",
+ "PREFER_SS" : "ss",
+ "REQUIRE_HT" : "ht",
+ "PREFER_HT" : "ht",
+ "REQUIRE_TM" : "tm",
+ "PREFER_TM" : "tm",
+ "REQUIRE_IA64" : "ia64",
+ "PREFER_IA64" : "ia64",
+ "REQUIRE_PBE" : "pbe",
+ "PREFER_PBE" : "pbe",
+ "REQUIRE_RDTSCP" : "rdtscp",
+ "PREFER_RDTSCP" : "rdtscp",
+ "REQUIRE_PNI" : "pni",
+ "PREFER_PNI" : "pni",
+ "REQUIRE_PCLMULQDQ": "pclmulqdq",
+ "PREFER_PCLMULQDQ" : "pclmulqdq",
+ "REQUIRE_DTES64" : "dtes64",
+ "PREFER_DTES64" : "dtes64",
+ "REQUIRE_MONITOR" : "monitor",
+ "PREFER_MONITOR" : "monitor",
+ "REQUIRE_DS_CPL" : "ds_cpl",
+ "PREFER_DS_CPL" : "ds_cpl",
+ "REQUIRE_VMX" : "vmx",
+ "PREFER_VMX" : "vmx",
+ "REQUIRE_SMX" : "smx",
+ "PREFER_SMX" : "smx",
+ "REQUIRE_EST" : "est",
+ "PREFER_EST" : "est",
+ "REQUIRE_TM2" : "tm2",
+ "PREFER_TM2" : "tm2",
+ "REQUIRE_SSSE3" : "ssse3",
+ "PREFER_SSSE3" : "ssse3",
+ "REQUIRE_CID" : "cid",
+ "PREFER_CID" : "cid",
+ "REQUIRE_FMA" : "fma",
+ "PREFER_FMA" : "fma",
+ "REQUIRE_CX16" : "cx16",
+ "PREFER_CX16" : "cx16",
+ "REQUIRE_XTPR" : "xtpr",
+ "PREFER_XTPR" : "xtpr",
+ "REQUIRE_PDCM" : "pdcm",
+ "PREFER_PDCM" : "pdcm",
+ "REQUIRE_PCID" : "pcid",
+ "PREFER_PCID" : "pcid",
+ "REQUIRE_DCA" : "dca",
+ "PREFER_DCA" : "dca",
+ "REQUIRE_SSE4_1" : "sse4_1",
+ "PREFER_SSE4_1" : "sse4_1",
+ "REQUIRE_SSE4_2" : "sse4_2",
+ "PREFER_SSE4_2" : "sse4_2",
+ "REQUIRE_X2APIC" : "x2apic",
+ "PREFER_X2APIC" : "x2apic",
+ "REQUIRE_MOVBE" : "movbe",
+ "PREFER_MOVBE" : "movbe",
+ "REQUIRE_POPCNT" : "popcnt",
+ "PREFER_POPCNT" : "popcnt",
+ "REQUIRE_TSC_DEADLINE_TIMER" : "tsc_deadline_timer",
+ "PREFER_TSC_DEADLINE_TIMER" : "tsc_deadline_timer",
+ "REQUIRE_XSAVE" : "xsave",
+ "PREFER_XSAVE" : "xsave",
+ "REQUIRE_AVX" : "avx",
+ "PREFER_AVX" : "avx",
+ "REQUIRE_F16C" : "f16c",
+ "PREFER_F16C" : "f16c",
+ "REQUIRE_RDRAND" : "rdrand",
+ "PREFER_RDRAND" : "rdrand",
+ "REQUIRE_FSGSBASE" : "fsgsbase",
+ "PREFER_FSGSBASE" : "fsgsbase",
+ "REQUIRE_BMI1" : "bmi1",
+ "PREFER_BMI1" : "bmi1",
+ "REQUIRE_HLE" : "hle",
+ "PREFER_HLE" : "hle",
+ "REQUIRE_AVX2" : "avx2",
+ "PREFER_AVX2" : "avx2",
+ "REQUIRE_SMEP" : "smep",
+ "PREFER_SMEP" : "smep",
+ "REQUIRE_BMI2" : "bmi2",
+ "PREFER_BMI2" : "bmi2",
+ "REQUIRE_ERMS" : "erms",
+ "PREFER_ERMS" : "erms",
+ "REQUIRE_INVPCID" : "invpcid",
+ "PREFER_INVPCID" : "invpcid",
+ "REQUIRE_RTM" : "rtm",
+ "PREFER_RTM" : "rtm",
+ "REQUIRE_MPX" : "mpx",
+ "PREFER_MPX" : "mpx",
+ "REQUIRE_RDSEED" : "rdseed",
+ "PREFER_RDSEED" : "rdseed",
+ "REQUIRE_ADX" : "adx",
+ "PREFER_ADX" : "adx",
+ "REQUIRE_SMAP" : "smap",
+ "PREFER_SMAP" : "smap",
+ }
+
+ self._espec_to_mano_cpufeatures = {
+ "aes" : "REQUIRE_AES",
+ "vme" : "REQUIRE_VME",
+ "de" : "REQUIRE_DE",
+ "pse" : "REQUIRE_PSE",
+ "tsc" : "REQUIRE_TSC",
+ "msr" : "REQUIRE_MSR",
+ "pae" : "REQUIRE_PAE",
+ "mce" : "REQUIRE_MCE",
+ "cx8" : "REQUIRE_CX8",
+ "apic" : "REQUIRE_APIC",
+ "sep" : "REQUIRE_SEP",
+ "mtrr" : "REQUIRE_MTRR",
+ "pge" : "REQUIRE_PGE",
+ "mca" : "REQUIRE_MCA",
+ "cmov" : "REQUIRE_CMOV",
+ "pat" : "REQUIRE_PAT",
+ "pse36" : "REQUIRE_PSE36",
+ "clflush" : "REQUIRE_CLFLUSH",
+ "dts" : "REQUIRE_DTS",
+ "acpi" : "REQUIRE_ACPI",
+ "mmx" : "REQUIRE_MMX",
+ "fxsr" : "REQUIRE_FXSR",
+ "sse" : "REQUIRE_SSE",
+ "sse2" : "REQUIRE_SSE2",
+ "ss" : "REQUIRE_SS",
+ "ht" : "REQUIRE_HT",
+ "tm" : "REQUIRE_TM",
+ "ia64" : "REQUIRE_IA64",
+ "pbe" : "REQUIRE_PBE",
+ "rdtscp" : "REQUIRE_RDTSCP",
+ "pni" : "REQUIRE_PNI",
+ "pclmulqdq": "REQUIRE_PCLMULQDQ",
+ "dtes64" : "REQUIRE_DTES64",
+ "monitor" : "REQUIRE_MONITOR",
+ "ds_cpl" : "REQUIRE_DS_CPL",
+ "vmx" : "REQUIRE_VMX",
+ "smx" : "REQUIRE_SMX",
+ "est" : "REQUIRE_EST",
+ "tm2" : "REQUIRE_TM2",
+ "ssse3" : "REQUIRE_SSSE3",
+ "cid" : "REQUIRE_CID",
+ "fma" : "REQUIRE_FMA",
+ "cx16" : "REQUIRE_CX16",
+ "xtpr" : "REQUIRE_XTPR",
+ "pdcm" : "REQUIRE_PDCM",
+ "pcid" : "REQUIRE_PCID",
+ "dca" : "REQUIRE_DCA",
+ "sse4_1" : "REQUIRE_SSE4_1",
+ "sse4_2" : "REQUIRE_SSE4_2",
+ "x2apic" : "REQUIRE_X2APIC",
+ "movbe" : "REQUIRE_MOVBE",
+ "popcnt" : "REQUIRE_POPCNT",
+ "tsc_deadline_timer" : "REQUIRE_TSC_DEADLINE_TIMER",
+ "xsave" : "REQUIRE_XSAVE",
+ "avx" : "REQUIRE_AVX",
+ "f16c" : "REQUIRE_F16C",
+ "rdrand" : "REQUIRE_RDRAND",
+ "fsgsbase" : "REQUIRE_FSGSBASE",
+ "bmi1" : "REQUIRE_BMI1",
+ "hle" : "REQUIRE_HLE",
+ "avx2" : "REQUIRE_AVX2",
+ "smep" : "REQUIRE_SMEP",
+ "bmi2" : "REQUIRE_BMI2",
+ "erms" : "REQUIRE_ERMS",
+ "invpcid" : "REQUIRE_INVPCID",
+ "rtm" : "REQUIRE_RTM",
+ "mpx" : "REQUIRE_MPX",
+ "rdseed" : "REQUIRE_RDSEED",
+ "adx" : "REQUIRE_ADX",
+ "smap" : "REQUIRE_SMAP",
+ }
+
+ def mano_to_extra_spec_cpu_model(self, cpu_model):
+ if cpu_model in self._mano_to_espec_cpumodel:
+ return self._mano_to_espec_cpumodel[cpu_model]
+ else:
+ return None
+
+ def extra_specs_to_mano_cpu_model(self, cpu_model):
+ if cpu_model in self._espec_to_mano_cpumodel:
+ return self._espec_to_mano_cpumodel[cpu_model]
+ else:
+ return None
+
+ def mano_to_extra_spec_cpu_arch(self, cpu_arch):
+ if cpu_arch in self._mano_to_espec_cpuarch:
+ return self._mano_to_espec_cpuarch[cpu_arch]
+ else:
+ return None
+
+ def extra_specs_to_mano_cpu_arch(self, cpu_arch):
+ if cpu_arch in self._espec_to_mano_cpuarch:
+ return self._espec_to_mano_cpuarch[cpu_arch]
+ else:
+ return None
+
+ def mano_to_extra_spec_cpu_vendor(self, cpu_vendor):
+ if cpu_vendor in self._mano_to_espec_cpuvendor:
+ return self._mano_to_espec_cpuvendor[cpu_vendor]
+ else:
+ return None
+
+ def extra_spec_to_mano_cpu_vendor(self, cpu_vendor):
+ if cpu_vendor in self._espec_to_mano_cpuvendor:
+ return self._espec_to_mano_cpuvendor[cpu_vendor]
+ else:
+ return None
+
+ def mano_to_extra_spec_cpu_socket_count(self, cpu_sockets):
+ return cpu_sockets
+
+ def extra_spec_to_mano_cpu_socket_count(self, cpu_sockets):
+ return int(cpu_sockets)
+
+ def mano_to_extra_spec_cpu_core_count(self, cpu_core_count):
+ return cpu_core_count
+
+ def extra_spec_to_mano_cpu_core_count(self, cpu_core_count):
+ return int(cpu_core_count)
+
+ def mano_to_extra_spec_cpu_core_thread_count(self, core_thread_count):
+ return core_thread_count
+
+ def extra_spec_to_mano_cpu_core_thread_count(self, core_thread_count):
+ return int(core_thread_count)
+
+ def mano_to_extra_spec_cpu_features(self, features):
+ cpu_features = []
+ epa_feature_str = None
+ for f in features:
+ if f in self._mano_to_espec_cpufeatures:
+ cpu_features.append(self._mano_to_espec_cpufeatures[f])
+
+ if len(cpu_features) > 1:
+ epa_feature_str = '<all-in> '+ " ".join(cpu_features)
+ elif len(cpu_features) == 1:
+ epa_feature_str = " ".join(cpu_features)
+
+ return epa_feature_str
+
+ def extra_spec_to_mano_cpu_features(self, features):
+ oper_symbols = ['=', '<in>', '<all-in>', '==', '!=', '>=', '<=', 's==', 's!=', 's<', 's<=', 's>', 's>=']
+ cpu_features = []
+ result = None
+ for oper in oper_symbols:
+ regex = '^'+oper+' (.*?)$'
+ result = re.search(regex, features)
+ if result is not None:
+ break
+
+ if result is not None:
+ feature_list = result.group(1)
+ else:
+ feature_list = features
+
+ for f in feature_list.split():
+ if f in self._espec_to_mano_cpufeatures:
+ cpu_features.append(self._espec_to_mano_cpufeatures[f])
+
+ return cpu_features
+
+
+class ExtraSpecUtils(object):
+ """
+ General utility class for flavor Extra Specs processing
+ """
+ def __init__(self):
+ self.host = HostEPAUtils()
+ self.guest = GuestEPAUtils()
+ self.extra_specs_keywords = [ 'hw:cpu_policy',
+ 'hw:cpu_threads_policy',
+ 'hw:mem_page_size',
+ 'hw:numa_nodes',
+ 'hw:numa_mempolicy',
+ 'hw:numa_cpus',
+ 'hw:numa_mem',
+ 'trust:trusted_host',
+ 'pci_passthrough:alias',
+ 'capabilities:cpu_info:model',
+ 'capabilities:cpu_info:arch',
+ 'capabilities:cpu_info:vendor',
+ 'capabilities:cpu_info:topology:sockets',
+ 'capabilities:cpu_info:topology:cores',
+ 'capabilities:cpu_info:topology:threads',
+ 'capabilities:cpu_info:features',
+ ]
+ self.extra_specs_regex = re.compile("^"+"|^".join(self.extra_specs_keywords))
+
+
+
+class FlavorUtils(object):
+ """
+ Utility class for handling the flavor
+ """
+ def __init__(self, driver):
+ """
+ Constructor for class
+ Arguments:
+ driver: object of OpenstackDriver()
+ """
+ self._epa = ExtraSpecUtils()
+ self._driver = driver
+ self.log = driver.log
+
+ @property
+ def driver(self):
+ return self._driver
+
+ def _get_guest_epa_specs(self, guest_epa):
+ """
+ Returns EPA Specs dictionary for guest_epa attributes
+ """
+ epa_specs = dict()
+ if guest_epa.has_field('mempage_size'):
+ mempage_size = self._epa.guest.mano_to_extra_spec_mempage_size(guest_epa.mempage_size)
+ if mempage_size is not None:
+ epa_specs['hw:mem_page_size'] = mempage_size
+
+ if guest_epa.has_field('cpu_pinning_policy'):
+ cpu_pinning_policy = self._epa.guest.mano_to_extra_spec_cpu_pinning_policy(guest_epa.cpu_pinning_policy)
+ if cpu_pinning_policy is not None:
+ epa_specs['hw:cpu_policy'] = cpu_pinning_policy
+
+ if guest_epa.has_field('cpu_thread_pinning_policy'):
+ cpu_thread_pinning_policy = self._epa.guest.mano_to_extra_spec_cpu_thread_pinning_policy(guest_epa.cpu_thread_pinning_policy)
+ if cpu_thread_pinning_policy is None:
+ epa_specs['hw:cpu_threads_policy'] = cpu_thread_pinning_policy
+
+ if guest_epa.has_field('trusted_execution'):
+ trusted_execution = self._epa.guest.mano_to_extra_spec_trusted_execution(guest_epa.trusted_execution)
+ if trusted_execution is not None:
+ epa_specs['trust:trusted_host'] = trusted_execution
+
+ if guest_epa.has_field('numa_node_policy'):
+ if guest_epa.numa_node_policy.has_field('node_cnt'):
+ numa_node_count = self._epa.guest.mano_to_extra_spec_numa_node_count(guest_epa.numa_node_policy.node_cnt)
+ if numa_node_count is not None:
+ epa_specs['hw:numa_nodes'] = numa_node_count
+
+ if guest_epa.numa_node_policy.has_field('mem_policy'):
+ numa_memory_policy = self._epa.guest.mano_to_extra_spec_numa_memory_policy(guest_epa.numa_node_policy.mem_policy)
+ if numa_memory_policy is not None:
+ epa_specs['hw:numa_mempolicy'] = numa_memory_policy
+
+ if guest_epa.numa_node_policy.has_field('node'):
+ for node in guest_epa.numa_node_policy.node:
+ if node.has_field('vcpu') and node.vcpu:
+ epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j.id) for j in node.vcpu])
+ if node.memory_mb:
+ epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
+
+ if guest_epa.has_field('pcie_device'):
+ pci_devices = []
+ for device in guest_epa.pcie_device:
+ pci_devices.append(device.device_id +':'+str(device.count))
+ epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
+
+ return epa_specs
+
+ def _get_host_epa_specs(self,host_epa):
+ """
+ Returns EPA Specs dictionary for host_epa attributes
+ """
+ epa_specs = dict()
+
+ if host_epa.has_field('cpu_model'):
+ cpu_model = self._epa.host.mano_to_extra_spec_cpu_model(host_epa.cpu_model)
+ if cpu_model is not None:
+ epa_specs['capabilities:cpu_info:model'] = cpu_model
+
+ if host_epa.has_field('cpu_arch'):
+ cpu_arch = self._epa.host.mano_to_extra_spec_cpu_arch(host_epa.cpu_arch)
+ if cpu_arch is not None:
+ epa_specs['capabilities:cpu_info:arch'] = cpu_arch
+
+ if host_epa.has_field('cpu_vendor'):
+ cpu_vendor = self._epa.host.mano_to_extra_spec_cpu_vendor(host_epa.cpu_vendor)
+ if cpu_vendor is not None:
+ epa_specs['capabilities:cpu_info:vendor'] = cpu_vendor
+
+ if host_epa.has_field('cpu_socket_count'):
+ cpu_socket_count = self._epa.host.mano_to_extra_spec_cpu_socket_count(host_epa.cpu_socket_count)
+ if cpu_socket_count is not None:
+ epa_specs['capabilities:cpu_info:topology:sockets'] = cpu_socket_count
+
+ if host_epa.has_field('cpu_core_count'):
+ cpu_core_count = self._epa.host.mano_to_extra_spec_cpu_core_count(host_epa.cpu_core_count)
+ if cpu_core_count is not None:
+ epa_specs['capabilities:cpu_info:topology:cores'] = cpu_core_count
+
+ if host_epa.has_field('cpu_core_thread_count'):
+ cpu_core_thread_count = self._epa.host.mano_to_extra_spec_cpu_core_thread_count(host_epa.cpu_core_thread_count)
+ if cpu_core_thread_count is not None:
+ epa_specs['capabilities:cpu_info:topology:threads'] = cpu_core_thread_count
+
+ if host_epa.has_field('cpu_feature'):
+ cpu_features = []
+ espec_cpu_features = []
+ for feature in host_epa.cpu_feature:
+ cpu_features.append(feature.feature)
+ espec_cpu_features = self._epa.host.mano_to_extra_spec_cpu_features(cpu_features)
+ if espec_cpu_features is not None:
+ epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
+ return epa_specs
+
+ def _get_hypervisor_epa_specs(self,guest_epa):
+ """
+ Returns EPA Specs dictionary for hypervisor_epa attributes
+ """
+ hypervisor_epa = dict()
+ return hypervisor_epa
+
+ def _get_vswitch_epa_specs(self, guest_epa):
+ """
+ Returns EPA Specs dictionary for vswitch_epa attributes
+ """
+ vswitch_epa = dict()
+ return vswitch_epa
+
+ def _get_host_aggregate_epa_specs(self, host_aggregate):
+ """
+ Returns EPA Specs dictionary for host aggregates
+ """
+ epa_specs = dict()
+ for aggregate in host_aggregate:
+ epa_specs['aggregate_instance_extra_specs:'+aggregate.metadata_key] = aggregate.metadata_value
+
+ return epa_specs
+
+ def get_extra_specs(self, flavor):
+ """
+ Returns epa_specs dictionary based on flavor information
+ Arguments
+ flavor -- Protobuf GI object for flavor_info (RwcalYang.FlavorInfoItem())
+ Returns:
+ A dictionary of extra_specs in format understood by novaclient library
+ """
+ epa_specs = dict()
+ if flavor.has_field('guest_epa'):
+ guest_epa = self._get_guest_epa_specs(flavor.guest_epa)
+ epa_specs.update(guest_epa)
+ if flavor.has_field('host_epa'):
+ host_epa = self._get_host_epa_specs(flavor.host_epa)
+ epa_specs.update(host_epa)
+ if flavor.has_field('hypervisor_epa'):
+ hypervisor_epa = self._get_hypervisor_epa_specs(flavor.hypervisor_epa)
+ epa_specs.update(hypervisor_epa)
+ if flavor.has_field('vswitch_epa'):
+ vswitch_epa = self._get_vswitch_epa_specs(flavor.vswitch_epa)
+ epa_specs.update(vswitch_epa)
+ if flavor.has_field('host_aggregate'):
+ host_aggregate = self._get_host_aggregate_epa_specs(flavor.host_aggregate)
+ epa_specs.update(host_aggregate)
+ return epa_specs
+
+
+ def parse_vm_flavor_epa_info(self, flavor_info):
+ """
+ Parse the flavor_info dictionary (returned by python-client) for vm_flavor
+
+ Arguments:
+ flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+ Returns:
+ vm_flavor = RwcalYang.FlavorInfoItem_VmFlavor()
+ """
+ vm_flavor = RwcalYang.FlavorInfoItem_VmFlavor()
+
+ if 'vcpus' in flavor_info and flavor_info['vcpus']:
+ vm_flavor.vcpu_count = flavor_info['vcpus']
+
+ if 'ram' in flavor_info and flavor_info['ram']:
+ vm_flavor.memory_mb = flavor_info['ram']
+
+ if 'disk' in flavor_info and flavor_info['disk']:
+ vm_flavor.storage_gb = flavor_info['disk']
+
+ return vm_flavor
+
+ def parse_guest_epa_info(self, flavor_info):
+ """
+ Parse the flavor_info dictionary (returned by python-client) for guest_epa
+
+ Arguments:
+ flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+ Returns:
+ guest_epa = RwcalYang.FlavorInfoItem_GuestEpa()
+ """
+ guest_epa = RwcalYang.FlavorInfoItem_GuestEpa()
+ for attr in flavor_info['extra_specs']:
+ if attr == 'hw:cpu_policy':
+ cpu_pinning_policy = self._epa.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
+ if cpu_pinning_policy is not None:
+ guest_epa.cpu_pinning_policy = cpu_pinning_policy
+
+ elif attr == 'hw:cpu_threads_policy':
+ cpu_thread_pinning_policy = self._epa.guest.extra_spec_to_mano_cpu_thread_pinning_policy(flavor_info['extra_specs']['hw:cpu_threads_policy'])
+ if cpu_thread_pinning_policy is not None:
+ guest_epa.cpu_thread_pinning_policy = cpu_thread_pinning_policy
+
+ elif attr == 'hw:mem_page_size':
+ mempage_size = self._epa.guest.extra_spec_to_mano_mempage_size(flavor_info['extra_specs']['hw:mem_page_size'])
+ if mempage_size is not None:
+ guest_epa.mempage_size = mempage_size
+
+ elif attr == 'hw:numa_nodes':
+ numa_node_count = self._epa.guest.extra_specs_to_mano_numa_node_count(flavor_info['extra_specs']['hw:numa_nodes'])
+ if numa_node_count is not None:
+ guest_epa.numa_node_policy.node_cnt = numa_node_count
+
+ elif attr.startswith('hw:numa_cpus.'):
+ node_id = attr.split('.')[1]
+ nodes = [ n for n in guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+ if nodes:
+ numa_node = nodes[0]
+ else:
+ numa_node = guest_epa.numa_node_policy.node.add()
+ numa_node.id = int(node_id)
+
+ for x in flavor_info['extra_specs'][attr].split(','):
+ numa_node_vcpu = numa_node.vcpu.add()
+ numa_node_vcpu.id = int(x)
+
+ elif attr.startswith('hw:numa_mem.'):
+ node_id = attr.split('.')[1]
+ nodes = [ n for n in guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+ if nodes:
+ numa_node = nodes[0]
+ else:
+ numa_node = guest_epa.numa_node_policy.node.add()
+ numa_node.id = int(node_id)
+
+ numa_node.memory_mb = int(flavor_info['extra_specs'][attr])
+
+ elif attr == 'hw:numa_mempolicy':
+ numa_memory_policy = self._epa.guest.extra_to_mano_spec_numa_memory_policy(flavor_info['extra_specs']['hw:numa_mempolicy'])
+ if numa_memory_policy is not None:
+ guest_epa.numa_node_policy.mem_policy = numa_memory_policy
+
+ elif attr == 'trust:trusted_host':
+ trusted_execution = self._epa.guest.extra_spec_to_mano_trusted_execution(flavor_info['extra_specs']['trust:trusted_host'])
+ if trusted_execution is not None:
+ guest_epa.trusted_execution = trusted_execution
+
+ elif attr == 'pci_passthrough:alias':
+ device_types = flavor_info['extra_specs']['pci_passthrough:alias']
+ for device in device_types.split(','):
+ dev = guest_epa.pcie_device.add()
+ dev.device_id = device.split(':')[0]
+ dev.count = int(device.split(':')[1])
+ return guest_epa
+
+ def parse_host_epa_info(self, flavor_info):
+ """
+ Parse the flavor_info dictionary (returned by python-client) for host_epa
+
+ Arguments:
+ flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+ Returns:
+ host_epa = RwcalYang.FlavorInfoItem_HostEpa()
+ """
+ host_epa = RwcalYang.FlavorInfoItem_HostEpa()
+ for attr in flavor_info['extra_specs']:
+ if attr == 'capabilities:cpu_info:model':
+ cpu_model = self._epa.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
+ if cpu_model is not None:
+ host_epa.cpu_model = cpu_model
+
+ elif attr == 'capabilities:cpu_info:arch':
+ cpu_arch = self._epa.host.extra_specs_to_mano_cpu_arch(flavor_info['extra_specs']['capabilities:cpu_info:arch'])
+ if cpu_arch is not None:
+ host_epa.cpu_arch = cpu_arch
+
+ elif attr == 'capabilities:cpu_info:vendor':
+ cpu_vendor = self._epa.host.extra_spec_to_mano_cpu_vendor(flavor_info['extra_specs']['capabilities:cpu_info:vendor'])
+ if cpu_vendor is not None:
+ host_epa.cpu_vendor = cpu_vendor
+
+ elif attr == 'capabilities:cpu_info:topology:sockets':
+ cpu_sockets = self._epa.host.extra_spec_to_mano_cpu_socket_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:sockets'])
+ if cpu_sockets is not None:
+ host_epa.cpu_socket_count = cpu_sockets
+
+ elif attr == 'capabilities:cpu_info:topology:cores':
+ cpu_cores = self._epa.host.extra_spec_to_mano_cpu_core_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:cores'])
+ if cpu_cores is not None:
+ host_epa.cpu_core_count = cpu_cores
+
+ elif attr == 'capabilities:cpu_info:topology:threads':
+ cpu_threads = self._epa.host.extra_spec_to_mano_cpu_core_thread_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:threads'])
+ if cpu_threads is not None:
+ host_epa.cpu_core_thread_count = cpu_threads
+
+ elif attr == 'capabilities:cpu_info:features':
+ cpu_features = self._epa.host.extra_spec_to_mano_cpu_features(flavor_info['extra_specs']['capabilities:cpu_info:features'])
+ if cpu_features is not None:
+ for feature in cpu_features:
+ host_epa.cpu_feature.append(feature)
+ return host_epa
+
+ def parse_host_aggregate_epa_info(self, flavor_info):
+ """
+ Parse the flavor_info dictionary (returned by python-client) for host_aggregate
+
+ Arguments:
+ flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+ Returns:
+ A list of objects host_aggregate of type RwcalYang.FlavorInfoItem_HostAggregate()
+ """
+ host_aggregates = list()
+ for attr in flavor_info['extra_specs']:
+ if attr.startswith('aggregate_instance_extra_specs:'):
+ aggregate = RwcalYang.FlavorInfoItem_HostAggregate()
+ aggregate.metadata_key = ":".join(attr.split(':')[1::])
+ aggregate.metadata_value = flavor_info['extra_specs'][attr]
+ host_aggregates.append(aggregate)
+ return host_aggregates
+
+
+ def parse_flavor_info(self, flavor_info):
+ """
+ Parse the flavor_info dictionary and put value in RIFT GI object for flavor
+ Arguments:
+ flavor_info: A dictionary object returned by novaclient library listing flavor attributes
+
+ Returns:
+ Protobuf GI Object of type RwcalYang.FlavorInfoItem()
+
+ """
+ flavor = RwcalYang.FlavorInfoItem()
+ if 'name' in flavor_info and flavor_info['name']:
+ flavor.name = flavor_info['name']
+ if 'id' in flavor_info and flavor_info['id']:
+ flavor.id = flavor_info['id']
+
+ ### If extra_specs in flavor_info
+ if 'extra_specs' in flavor_info:
+ flavor.vm_flavor = self.parse_vm_flavor_epa_info(flavor_info)
+ flavor.guest_epa = self.parse_guest_epa_info(flavor_info)
+ flavor.host_epa = self.parse_host_epa_info(flavor_info)
+ for aggr in self.parse_host_aggregate_epa_info(flavor_info):
+ ha = flavor.host_aggregate.add()
+ ha.from_dict(aggr.as_dict())
+ return flavor
+
+ def _match_vm_flavor(self, required, available):
+ self.log.info("Matching VM Flavor attributes")
+ if available.vcpu_count != required.vcpu_count:
+ self.log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
+ required.vcpu_count,
+ available.vcpu_count)
+ return False
+ if available.memory_mb != required.memory_mb:
+ self.log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
+ required.memory_mb,
+ available.memory_mb)
+ return False
+ if available.storage_gb != required.storage_gb:
+ self.log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
+ required.storage_gb,
+ available.storage_gb)
+ return False
+ self.log.debug("VM Flavor match found")
+ return True
+
+ def _match_guest_epa(self, required, available):
+ self.log.info("Matching Guest EPA attributes")
+ if required.has_field('pcie_device'):
+ self.log.debug("Matching pcie_device")
+ if available.has_field('pcie_device') == False:
+ self.log.debug("Matching pcie_device failed. Not available in flavor")
+ return False
+ else:
+ for dev in required.pcie_device:
+ if not [ d for d in available.pcie_device
+ if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
+ self.log.debug("Matching pcie_device failed. Required: %s, Available: %s",
+ required.pcie_device, available.pcie_device)
+ return False
+ elif available.has_field('pcie_device'):
+ self.log.debug("Rejecting available flavor because pcie_device not required but available")
+ return False
+
+
+ if required.has_field('mempage_size'):
+ self.log.debug("Matching mempage_size")
+ if available.has_field('mempage_size') == False:
+ self.log.debug("Matching mempage_size failed. Not available in flavor")
+ return False
+ else:
+ if required.mempage_size != available.mempage_size:
+ self.log.debug("Matching mempage_size failed. Required: %s, Available: %s",
+ required.mempage_size, available.mempage_size)
+ return False
+ elif available.has_field('mempage_size'):
+ self.log.debug("Rejecting available flavor because mempage_size not required but available")
+ return False
+
+ if required.has_field('cpu_pinning_policy'):
+ self.log.debug("Matching cpu_pinning_policy")
+ if required.cpu_pinning_policy != 'ANY':
+ if available.has_field('cpu_pinning_policy') == False:
+ self.log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
+ return False
+ else:
+ if required.cpu_pinning_policy != available.cpu_pinning_policy:
+ self.log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s",
+ required.cpu_pinning_policy, available.cpu_pinning_policy)
+ return False
+ elif available.has_field('cpu_pinning_policy'):
+ self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
+ return False
+
+ if required.has_field('cpu_thread_pinning_policy'):
+ self.log.debug("Matching cpu_thread_pinning_policy")
+ if available.has_field('cpu_thread_pinning_policy') == False:
+ self.log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
+ return False
+ else:
+ if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
+ self.log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s",
+ required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
+ return False
+ elif available.has_field('cpu_thread_pinning_policy'):
+ self.log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
+ return False
+
+ if required.has_field('trusted_execution'):
+ self.log.debug("Matching trusted_execution")
+ if required.trusted_execution == True:
+ if available.has_field('trusted_execution') == False:
+ self.log.debug("Matching trusted_execution failed. Not available in flavor")
+ return False
+ else:
+ if required.trusted_execution != available.trusted_execution:
+ self.log.debug("Matching trusted_execution failed. Required: %s, Available: %s",
+ required.trusted_execution, available.trusted_execution)
+ return False
+ elif available.has_field('trusted_execution'):
+ self.log.debug("Rejecting available flavor because trusted_execution not required but available")
+ return False
+
+ if required.has_field('numa_node_policy'):
+ self.log.debug("Matching numa_node_policy")
+ if available.has_field('numa_node_policy') == False:
+ self.log.debug("Matching numa_node_policy failed. Not available in flavor")
+ return False
+ else:
+ if required.numa_node_policy.has_field('node_cnt'):
+ self.log.debug("Matching numa_node_policy node_cnt")
+ if available.numa_node_policy.has_field('node_cnt') == False:
+ self.log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
+ return False
+ else:
+ if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
+ self.log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",
+ required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
+ return False
+ elif available.numa_node_policy.has_field('node_cnt'):
+ self.log.debug("Rejecting available flavor because numa node count not required but available")
+ return False
+
+ if required.numa_node_policy.has_field('mem_policy'):
+ self.log.debug("Matching numa_node_policy mem_policy")
+ if available.numa_node_policy.has_field('mem_policy') == False:
+ self.log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
+ return False
+ else:
+ if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
+ self.log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s",
+ required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
+ return False
+ elif available.numa_node_policy.has_field('mem_policy'):
+ self.log.debug("Rejecting available flavor because num node mem_policy not required but available")
+ return False
+
+ if required.numa_node_policy.has_field('node'):
+ self.log.debug("Matching numa_node_policy nodes configuration")
+ if available.numa_node_policy.has_field('node') == False:
+ self.log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
+ return False
+ for required_node in required.numa_node_policy.node:
+ self.log.debug("Matching numa_node_policy nodes configuration for node %s",
+ required_node)
+ numa_match = False
+ for available_node in available.numa_node_policy.node:
+ if required_node.id != available_node.id:
+ self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s",
+ required_node, available_node)
+ continue
+ if required_node.vcpu != available_node.vcpu:
+ self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s",
+ required_node, available_node)
+ continue
+ if required_node.memory_mb != available_node.memory_mb:
+ self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s",
+ required_node, available_node)
+ continue
+ numa_match = True
+ if numa_match == False:
+ return False
+ elif available.numa_node_policy.has_field('node'):
+ self.log.debug("Rejecting available flavor because numa nodes not required but available")
+ return False
+ elif available.has_field('numa_node_policy'):
+ self.log.debug("Rejecting available flavor because numa_node_policy not required but available")
+ return False
+ self.log.info("Successful match for Guest EPA attributes")
+ return True
+
+ def _match_vswitch_epa(self, required, available):
+ self.log.debug("VSwitch EPA match found")
+ return True
+
+ def _match_hypervisor_epa(self, required, available):
+ self.log.debug("Hypervisor EPA match found")
+ return True
+
+ def _match_host_epa(self, required, available):
+ self.log.info("Matching Host EPA attributes")
+ if required.has_field('cpu_model'):
+ self.log.debug("Matching CPU model")
+ if available.has_field('cpu_model') == False:
+ self.log.debug("Matching CPU model failed. Not available in flavor")
+ return False
+ else:
+ #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+ if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
+ self.log.debug("Matching CPU model failed. Required: %s, Available: %s",
+ required.cpu_model, available.cpu_model)
+ return False
+ elif available.has_field('cpu_model'):
+ self.log.debug("Rejecting available flavor because cpu_model not required but available")
+ return False
+
+ if required.has_field('cpu_arch'):
+ self.log.debug("Matching CPU architecture")
+ if available.has_field('cpu_arch') == False:
+ self.log.debug("Matching CPU architecture failed. Not available in flavor")
+ return False
+ else:
+ #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+ if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
+ self.log.debug("Matching CPU architecture failed. Required: %s, Available: %s",
+ required.cpu_arch, available.cpu_arch)
+ return False
+ elif available.has_field('cpu_arch'):
+ self.log.debug("Rejecting available flavor because cpu_arch not required but available")
+ return False
+
+ if required.has_field('cpu_vendor'):
+ self.log.debug("Matching CPU vendor")
+ if available.has_field('cpu_vendor') == False:
+ self.log.debug("Matching CPU vendor failed. Not available in flavor")
+ return False
+ else:
+ #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+ if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
+ self.log.debug("Matching CPU vendor failed. Required: %s, Available: %s",
+ required.cpu_vendor, available.cpu_vendor)
+ return False
+ elif available.has_field('cpu_vendor'):
+ self.log.debug("Rejecting available flavor because cpu_vendor not required but available")
+ return False
+
+ if required.has_field('cpu_socket_count'):
+ self.log.debug("Matching CPU socket count")
+ if available.has_field('cpu_socket_count') == False:
+ self.log.debug("Matching CPU socket count failed. Not available in flavor")
+ return False
+ else:
+ if required.cpu_socket_count != available.cpu_socket_count:
+ self.log.debug("Matching CPU socket count failed. Required: %s, Available: %s",
+ required.cpu_socket_count, available.cpu_socket_count)
+ return False
+ elif available.has_field('cpu_socket_count'):
+ self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
+ return False
+
+ if required.has_field('cpu_core_count'):
+ self.log.debug("Matching CPU core count")
+ if available.has_field('cpu_core_count') == False:
+ self.log.debug("Matching CPU core count failed. Not available in flavor")
+ return False
+ else:
+ if required.cpu_core_count != available.cpu_core_count:
+ self.log.debug("Matching CPU core count failed. Required: %s, Available: %s",
+ required.cpu_core_count, available.cpu_core_count)
+ return False
+ elif available.has_field('cpu_core_count'):
+ self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
+ return False
+
+ if required.has_field('cpu_core_thread_count'):
+ self.log.debug("Matching CPU core thread count")
+ if available.has_field('cpu_core_thread_count') == False:
+ self.log.debug("Matching CPU core thread count failed. Not available in flavor")
+ return False
+ else:
+ if required.cpu_core_thread_count != available.cpu_core_thread_count:
+ self.log.debug("Matching CPU core thread count failed. Required: %s, Available: %s",
+ required.cpu_core_thread_count, available.cpu_core_thread_count)
+ return False
+ elif available.has_field('cpu_core_thread_count'):
+ self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
+ return False
+
+ if required.has_field('cpu_feature'):
+ self.log.debug("Matching CPU feature list")
+ if available.has_field('cpu_feature') == False:
+ self.log.debug("Matching CPU feature list failed. Not available in flavor")
+ return False
+ else:
+ for feature in required.cpu_feature:
+ if feature not in available.cpu_feature:
+ self.log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s",
+ feature, available.cpu_feature)
+ return False
+ elif available.has_field('cpu_feature'):
+ self.log.debug("Rejecting available flavor because cpu_feature not required but available")
+ return False
+ self.log.info("Successful match for Host EPA attributes")
+ return True
+
+
+ def _match_placement_group_inputs(self, required, available):
+ self.log.info("Matching Host aggregate attributes")
+
+ if not required and not available:
+ # Host aggregate not required and not available => success
+ self.log.info("Successful match for Host Aggregate attributes")
+ return True
+ if required and available:
+ # Host aggregate requested and available => Do a match and decide
+ xx = [ x.as_dict() for x in required ]
+ yy = [ y.as_dict() for y in available ]
+ for i in xx:
+ if i not in yy:
+ self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s",
+ required, available)
+ return False
+ self.log.info("Successful match for Host Aggregate attributes")
+ return True
+ else:
+ # Either of following conditions => Failure
+ # - Host aggregate required but not available
+ # - Host aggregate not required but available
+ self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s",
+ required, available)
+ return False
+
+
+ def _match_epa_params(self, resource_info, request_params):
+ """
+ Match EPA attributes
+ Arguments:
+ resource_info: Protobuf GI object RwcalYang.FlavorInfoItem()
+ Following attributes would be accessed
+ - vm_flavor
+ - guest_epa
+ - host_epa
+ - host_aggregate
+
+ request_params: Protobuf GI object RwcalYang.VDUInitParams().
+ Following attributes would be accessed
+ - vm_flavor
+ - guest_epa
+ - host_epa
+ - host_aggregate
+ Returns:
+ True -- Match between resource_info and request_params
+ False -- No match between resource_info and request_params
+ """
+ result = False
+ result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
+ getattr(resource_info, 'vm_flavor'))
+ if result == False:
+ self.log.debug("VM Flavor mismatched")
+ return False
+
+ result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
+ getattr(resource_info, 'guest_epa'))
+ if result == False:
+ self.log.debug("Guest EPA mismatched")
+ return False
+
+ result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
+ getattr(resource_info, 'vswitch_epa'))
+ if result == False:
+ self.log.debug("Vswitch EPA mismatched")
+ return False
+
+ result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
+ getattr(resource_info, 'hypervisor_epa'))
+ if result == False:
+ self.log.debug("Hypervisor EPA mismatched")
+ return False
+
+ result = self._match_host_epa(getattr(request_params, 'host_epa'),
+ getattr(resource_info, 'host_epa'))
+ if result == False:
+ self.log.debug("Host EPA mismatched")
+ return False
+
+ result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
+ getattr(resource_info, 'host_aggregate'))
+
+ if result == False:
+ self.log.debug("Host Aggregate mismatched")
+ return False
+
+ return True
+
+ def match_resource_flavor(self, vdu_init, flavor_list):
+ """
+ Arguments:
+ vdu_init: Protobuf GI object RwcalYang.VDUInitParams().
+ flavor_list: List of Protobuf GI object RwcalYang.FlavorInfoItem()
+
+ Returns:
+ Flavor_ID -- If match is found between vdu_init and one of flavor_info from flavor_list
+ None -- No match between vdu_init and one of flavor_info from flavor_list
+
+ Select a existing flavor if it matches the request or create new flavor
+ """
+ for flv in flavor_list:
+ self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+ vdu_init.name, flv)
+ if self._match_epa_params(flv, vdu_init):
+ self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+ vdu_init.name, flv.name, flv.id)
+ return flv.id
+ return None
+
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import gi
+
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+
+
+class ImageUtils(object):
+ """
+ Utility class for image operations
+ """
+ def __init__(self, driver):
+ """
+ Constructor for class
+ Arguments:
+ driver: object of OpenstackDriver()
+ """
+ self._driver = driver
+ self.log = driver.log
+
+ def make_image_args(self, image):
+ """
+ Function to create kwargs required for glance_image_create API
+
+ Arguments:
+ image: Protobuf GI object for RwcalYang.ImageInfoItem()
+
+ Returns:
+ A kwargs dictionary for glance operation
+ """
+ kwargs = dict()
+ kwargs['name'] = image.name
+ if image.disk_format:
+ kwargs['disk_format'] = image.disk_format
+ if image.container_format:
+ kwargs['container_format'] = image.container_format
+ return kwargs
+
+ def create_image_handle(self, image):
+ """
+ Function to create a image-file handle
+
+ Arguments:
+ image: Protobuf GI object for RwcalYang.ImageInfoItem()
+
+ Returns:
+ An object of _io.BufferedReader (file handle)
+ """
+ try:
+ if image.has_field("fileno"):
+ new_fileno = os.dup(image.fileno)
+ hdl = os.fdopen(new_fileno, 'rb')
+ else:
+ hdl = open(image.location, "rb")
+ except Exception as e:
+ self.log.exception("Could not open file for upload. Exception received: %s", str(e))
+ raise
+ return hdl
+
+ def parse_cloud_image_info(self, image_info):
+ """
+ Parse image_info dictionary (return by python-client) and put values in GI object for image
+
+ Arguments:
+ image_info : A dictionary object return by glanceclient library listing image attributes
+
+ Returns:
+ Protobuf GI Object of type RwcalYang.ImageInfoItem()
+ """
+ image = RwcalYang.ImageInfoItem()
+ if 'name' in image_info and image_info['name']:
+ image.name = image_info['name']
+ if 'id' in image_info and image_info['id']:
+ image.id = image_info['id']
+ if 'checksum' in image_info and image_info['checksum']:
+ image.checksum = image_info['checksum']
+ if 'disk_format' in image_info and image_info['disk_format']:
+ image.disk_format = image_info['disk_format']
+ if 'container_format' in image_info and image_info['container_format']:
+ image.container_format = image_info['container_format']
+
+ image.state = 'inactive'
+ if 'status' in image_info and image_info['status']:
+ if image_info['status'] == 'active':
+ image.state = 'active'
+
+ return image
+
+
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import gi
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+import neutronclient.common.exceptions as NeutronException
+
+
+class NetworkUtils(object):
+ """
+ Utility class for network operations
+ """
+ def __init__(self, driver):
+ """
+ Constructor for class
+ Arguments:
+ driver: object of OpenstackDriver()
+ """
+ self._driver = driver
+ self.log = driver.log
+
+ @property
+ def driver(self):
+ return self._driver
+
+ def _parse_cp(self, cp_info):
+ """
+ Parse the port_info dictionary returned by neutronclient
+ Arguments:
+ cp_info: A dictionary object representing port attributes
+
+ Returns:
+ Protobuf GI oject of type RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+ """
+ cp = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+ if 'name' in cp_info and cp_info['name']:
+ cp.name = cp_info['name']
+
+ if 'id' in cp_info and cp_info['id']:
+ cp.connection_point_id = cp_info['id']
+
+ if ('fixed_ips' in cp_info) and (len(cp_info['fixed_ips']) >= 1):
+ if 'ip_address' in cp_info['fixed_ips'][0]:
+ cp.ip_address = cp_info['fixed_ips'][0]['ip_address']
+
+ if 'mac_address' in cp_info and cp_info['mac_address']:
+ cp.mac_addr = cp_info['mac_address']
+
+ if cp_info['status'] == 'ACTIVE':
+ cp.state = 'active'
+ else:
+ cp.state = 'inactive'
+
+ if 'network_id' in cp_info and cp_info['network_id']:
+ cp.virtual_link_id = cp_info['network_id']
+
+ if 'device_id' in cp_info and cp_info['device_id']:
+ cp.vdu_id = cp_info['device_id']
+ return cp
+
+ def parse_cloud_virtual_link_info(self, vlink_info, port_list, subnet):
+ """
+ Parse vlink_info dictionary (return by python-client) and put values in GI object for Virtual Link
+
+ Arguments:
+ vlink_info : A dictionary object return by neutronclient library listing network attributes
+
+ Returns:
+ Protobuf GI Object of type RwcalYang.VirtualLinkInfoParams()
+ """
+ link = RwcalYang.VirtualLinkInfoParams()
+ link.name = vlink_info['name']
+ if 'status' in vlink_info and vlink_info['status'] == 'ACTIVE':
+ link.state = 'active'
+ else:
+ link.state = 'inactive'
+
+ link.virtual_link_id = vlink_info['id']
+ for port in port_list:
+ if ('device_owner' in port) and (port['device_owner'] == 'compute:None'):
+ link.connection_points.append(self._parse_cp(port))
+
+ if subnet is not None:
+ link.subnet = subnet['cidr']
+
+ if ('provider:network_type' in vlink_info) and (vlink_info['provider:network_type'] != None):
+ link.provider_network.overlay_type = vlink_info['provider:network_type'].upper()
+ if ('provider:segmentation_id' in vlink_info) and (vlink_info['provider:segmentation_id']):
+ link.provider_network.segmentation_id = vlink_info['provider:segmentation_id']
+ if ('provider:physical_network' in vlink_info) and (vlink_info['provider:physical_network']):
+ link.provider_network.physical_network = vlink_info['provider:physical_network'].upper()
+
+ return link
+
+ def setup_vdu_networking(self, vdu_params):
+ """
+ This function validates the networking/connectivity setup.
+
+ Arguments:
+ vdu_params: object of RwcalYang.VDUInitParams()
+
+ Returns:
+ A list of port_ids and network_ids for VDU
+
+ """
+ port_args = list()
+ network_ids = list()
+ add_mgmt_net = False
+ for cp in vdu_params.connection_points:
+ if cp.virtual_link_id == self.driver._mgmt_network_id:
+ ### Remove mgmt_network_id from net_ids
+ add_mgmt_net = True
+ port_args.append(self._create_cp_args(cp))
+
+ if not add_mgmt_net:
+ network_ids.append(self.driver._mgmt_network_id)
+
+ ### Create ports and collect port ids
+ port_ids = self.driver.neutron_multi_port_create(port_args)
+ return port_ids, network_ids
+
+
+ def _create_cp_args(self, cp):
+ """
+ Creates a request dictionary for port create call
+ Arguments:
+ cp: Object of RwcalYang.VDUInitParams_ConnectionPoints()
+ Returns:
+ dict() of request params
+ """
+ args = dict()
+ args['name'] = cp.name
+ args['network_id'] = cp.virtual_link_id
+ args['admin_state_up'] = True
+
+ if cp.type_yang == 'VIRTIO' or cp.type_yang == 'E1000':
+ args['binding:vnic_type'] = 'normal'
+ elif cp.type_yang == 'SR_IOV':
+ args['binding:vnic_type'] = 'direct'
+ else:
+ raise NotImplementedError("Port Type: %s not supported" %(cp.type_yang))
+
+ if cp.static_ip_address:
+ args["fixed_ips"] = [{"ip_address" : cp.static_ip_address}]
+
+ if 'port_security_enabled' in cp:
+ args['port_security_enabled'] = cp.port_security_enabled
+
+ if cp.has_field('security_group'):
+ if self.driver._neutron_security_groups:
+ gid = self.driver._neutron_security_groups[0]['id']
+ args['security_groups'] = [ gid ]
+ return args
+
+ def make_virtual_link_args(self, link_params):
+ """
+ Function to create kwargs required for neutron_network_create API
+
+ Arguments:
+ link_params: Protobuf GI object RwcalYang.VirtualLinkReqParams()
+
+ Returns:
+ A kwargs dictionary for network operation
+ """
+ kwargs = dict()
+ kwargs['name'] = link_params.name
+ kwargs['admin_state_up'] = True
+ kwargs['external_router'] = False
+ kwargs['shared'] = False
+
+ if link_params.has_field('provider_network'):
+ if link_params.provider_network.has_field('physical_network'):
+ kwargs['physical_network'] = link_params.provider_network.physical_network
+ if link_params.provider_network.has_field('overlay_type'):
+ kwargs['network_type'] = link_params.provider_network.overlay_type.lower()
+ if link_params.provider_network.has_field('segmentation_id'):
+ kwargs['segmentation_id'] = link_params.provider_network.segmentation_id
+
+ return kwargs
+
+ def make_subnet_args(self, link_params, network_id):
+ """
+ Function to create kwargs required for neutron_subnet_create API
+
+ Arguments:
+ link_params: Protobuf GI object RwcalYang.VirtualLinkReqParams()
+
+ Returns:
+ A kwargs dictionary for subnet operation
+ """
+ kwargs = {'network_id' : network_id,
+ 'dhcp_params': {'enable_dhcp': True},
+ 'gateway_ip' : None,}
+
+ if link_params.ip_profile_params.has_field('ip_version'):
+ kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
+ else:
+ kwargs['ip_version'] = 4
+
+ if link_params.ip_profile_params.has_field('subnet_address'):
+ kwargs['cidr'] = link_params.ip_profile_params.subnet_address
+ elif link_params.ip_profile_params.has_field('subnet_prefix_pool'):
+ name = link_params.ip_profile_params.subnet_prefix_pool
+ pools = [ p['id'] for p in self.driver._neutron_subnet_prefix_pool if p['name'] == name ]
+ if not pools:
+ self.log.error("Could not find subnet pool with name :%s to be used for network: %s",
+ link_params.ip_profile_params.subnet_prefix_pool,
+ link_params.name)
+ raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
+
+ kwargs['subnetpool_id'] = pools[0]
+
+ elif link_params.has_field('subnet'):
+ kwargs['cidr'] = link_params.subnet
+ else:
+ raise NeutronException.NeutronException("No IP Prefix or Pool name specified")
+
+ if link_params.ip_profile_params.has_field('dhcp_params'):
+ if link_params.ip_profile_params.dhcp_params.has_field('enabled'):
+ kwargs['dhcp_params']['enable_dhcp'] = link_params.ip_profile_params.dhcp_params.enabled
+ if link_params.ip_profile_params.dhcp_params.has_field('start_address'):
+ kwargs['dhcp_params']['start_address'] = link_params.ip_profile_params.dhcp_params.start_address
+ if link_params.ip_profile_params.dhcp_params.has_field('count'):
+ kwargs['dhcp_params']['count'] = link_params.ip_profile_params.dhcp_params.count
+
+ if link_params.ip_profile_params.has_field('dns_server'):
+ kwargs['dns_server'] = []
+ for server in link_params.ip_profile_params.dns_server:
+ kwargs['dns_server'].append(server.address)
+
+ if link_params.ip_profile_params.has_field('gateway_address'):
+ kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
+
+ return kwargs
import logging
import os
import subprocess
-import uuid
import tempfile
import yaml
+import gi
+gi.require_version('RwSdn', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwcalYang', '1.0')
+
import rift.rwcal.openstack as openstack_drv
+
+
import rw_status
import rift.cal.rwcal_status as rwcal_status
import rwlogger
import neutronclient.common.exceptions as NeutronException
import keystoneclient.exceptions as KeystoneExceptions
-import tornado
-import gi
-gi.require_version('RwSdn', '1.0')
from gi.repository import (
GObject,
RwCal,
+ RwSdn, # Vala package
+ RwsdnYang,
RwTypes,
RwcalYang)
rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
-espec_utils = openstack_drv.OpenstackExtraSpecUtils()
-
class OpenstackCALOperationFailure(Exception):
pass
pass
+class RwcalAccountDriver(object):
+ """
+ Container class per cloud account
+ """
+ def __init__(self, logger, **kwargs):
+ self.log = logger
+ try:
+ self._driver = openstack_drv.OpenstackDriver(logger = self.log, **kwargs)
+ except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
+ NeutronException.NotFound) as e:
+ raise
+ except Exception as e:
+ self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
+ raise
+
+ @property
+ def driver(self):
+ return self._driver
+
class RwcalOpenstackPlugin(GObject.Object, RwCal.Cloud):
"""This class implements the CAL VALA methods for openstack."""
self._driver_class = openstack_drv.OpenstackDriver
self.log = logging.getLogger('rwcal.openstack.%s' % RwcalOpenstackPlugin.instance_num)
self.log.setLevel(logging.DEBUG)
-
self._rwlog_handler = None
+ self._account_drivers = dict()
RwcalOpenstackPlugin.instance_num += 1
- @contextlib.contextmanager
def _use_driver(self, account):
if self._rwlog_handler is None:
raise UninitializedPluginError("Must call init() in CAL plugin before use.")
- with rwlogger.rwlog_root_handler(self._rwlog_handler):
- try:
- drv = self._driver_class(username = account.openstack.key,
- password = account.openstack.secret,
- auth_url = account.openstack.auth_url,
- tenant_name = account.openstack.tenant,
- mgmt_network = account.openstack.mgmt_network,
- cert_validate = account.openstack.cert_validate,
- user_domain_name = account.openstack.user_domain,
- project_domain_name = account.openstack.project_domain,
- region = account.openstack.region)
- except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
- NeutronException.NotFound) as e:
- raise
- except Exception as e:
- self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
- raise
-
- yield drv
-
+ if account.name not in self._account_drivers:
+ self.log.debug("Creating OpenstackDriver")
+ kwargs = dict(username = account.openstack.key,
+ password = account.openstack.secret,
+ auth_url = account.openstack.auth_url,
+ project = account.openstack.tenant,
+ mgmt_network = account.openstack.mgmt_network,
+ cert_validate = account.openstack.cert_validate,
+ user_domain = account.openstack.user_domain,
+ project_domain = account.openstack.project_domain,
+ region = account.openstack.region)
+ drv = RwcalAccountDriver(self.log, **kwargs)
+ self._account_drivers[account.name] = drv
+ return drv.driver
+ else:
+ return self._account_drivers[account.name].driver
+
@rwstatus
def do_init(self, rwlog_ctx):
- self._rwlog_handler = rwlogger.RwLogger(
- category="rw-cal-log",
- subcategory="openstack",
- log_hdl=rwlog_ctx,
- )
+ self._rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+ subcategory="openstack",
+ log_hdl=rwlog_ctx,)
self.log.addHandler(self._rwlog_handler)
self.log.propagate = False
Validation Code and Details String
"""
status = RwcalYang.CloudConnectionStatus()
+ drv = self._use_driver(account)
try:
- with self._use_driver(account) as drv:
- drv.validate_account_creds()
-
+ drv.validate_account_creds()
except KeystoneExceptions.Unauthorized as e:
- self.log.error("Invalid credentials given for VIM account %s" %account.name)
+ self.log.error("Invalid credentials given for VIM account %s", account.name)
status.status = "failure"
status.details = "Invalid Credentials: %s" % str(e)
except KeystoneExceptions.AuthorizationFailure as e:
- self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s" % (
- account.name, account.openstack.auth_url))
+ self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s",
+ account.name, account.openstack.auth_url)
status.status = "failure"
status.details = "Invalid auth url: %s" % str(e)
except NeutronException.NotFound as e:
- self.log.error("Given management network %s could not be found for VIM account %s" % (
- account.openstack.mgmt_network, account.name))
+ self.log.error("Given management network %s could not be found for VIM account %s",
+ account.openstack.mgmt_network,
+ account.name)
status.status = "failure"
status.details = "mgmt network does not exist: %s" % str(e)
Returns:
The image id
"""
+ drv = self._use_driver(account)
+ fd = drv.utils.image.create_image_handle(image)
+ kwargs = drv.utils.image.make_image_args(image)
try:
- # If the use passed in a file descriptor, use that to
- # upload the image.
- if image.has_field("fileno"):
- new_fileno = os.dup(image.fileno)
- hdl = os.fdopen(new_fileno, 'rb')
- else:
- hdl = open(image.location, "rb")
+ # Create Image
+ image_id = drv.glance_image_create(**kwargs)
+ drv.glance_image_upload(image_id, fd)
except Exception as e:
- self.log.error("Could not open file for upload. Exception received: %s", str(e))
+ self.log.exception("Exception %s occured during image create", str(e))
raise
-
- with hdl as fd:
- kwargs = {}
- kwargs['name'] = image.name
-
- if image.disk_format:
- kwargs['disk_format'] = image.disk_format
- if image.container_format:
- kwargs['container_format'] = image.container_format
-
- with self._use_driver(account) as drv:
- # Create Image
- image_id = drv.glance_image_create(**kwargs)
- # Upload the Image
- drv.glance_image_upload(image_id, fd)
-
- if image.checksum:
- stored_image = drv.glance_image_get(image_id)
- if stored_image.checksum != image.checksum:
- drv.glance_image_delete(image_id=image_id)
- raise ImageUploadError(
- "image checksum did not match (actual: %s, expected: %s). Deleting." %
- (stored_image.checksum, image.checksum)
- )
+ finally:
+ fd.close()
+
+ # Update image properties, if they are provided
+ try:
+ if image.has_field("properties") and image.properties is not None:
+ for key in image.properties:
+ drv.glance_image_update(image_id, **{key.name: key.property_value})
+ except Exception as e:
+ self.log.exception("Exception %s occured during image update", str(e))
+ raise
+
+ if image.checksum:
+ try:
+ stored_image = drv.glance_image_get(image_id)
+ if stored_image.checksum != image.checksum:
+ drv.glance_image_delete(image_id=image_id)
+ raise ImageUploadError("image checksum did not match (actual: %s, expected: %s). Deleting." %
+ (stored_image.checksum, image.checksum))
+ except Exception as e:
+ self.log.exception("Exception %s occured during image checksum verification", str(e))
+ raise
return image_id
account - a cloud account
image_id - id of the image to delete
"""
- with self._use_driver(account) as drv:
+ drv = self._use_driver(account)
+ try:
drv.glance_image_delete(image_id=image_id)
+ except Exception as e:
+ self.log.exception("Exception %s occured during image deletion", str(e))
+ raise
- @staticmethod
- def _fill_image_info(img_info):
- """Create a GI object from image info dictionary
-
- Converts image information dictionary object returned by openstack
- driver into Protobuf Gi Object
-
- Arguments:
- account - a cloud account
- img_info - image information dictionary object from openstack
-
- Returns:
- The ImageInfoItem
- """
- img = RwcalYang.ImageInfoItem()
- img.name = img_info['name']
- img.id = img_info['id']
- img.checksum = img_info['checksum']
- img.disk_format = img_info['disk_format']
- img.container_format = img_info['container_format']
- if img_info['status'] == 'active':
- img.state = 'active'
- else:
- img.state = 'inactive'
- return img
-
@rwstatus(ret_on_failure=[[]])
def do_get_image_list(self, account):
"""Return a list of the names of all available images.
The the list of images in VimResources object
"""
response = RwcalYang.VimResources()
- with self._use_driver(account) as drv:
+ drv = self._use_driver(account)
+ try:
images = drv.glance_image_list()
- for img in images:
- response.imageinfo_list.append(RwcalOpenstackPlugin._fill_image_info(img))
+ for img in images:
+ response.imageinfo_list.append(drv.utils.image.parse_cloud_image_info(img))
+ except Exception as e:
+ self.log.exception("Exception %s occured during get-image-list", str(e))
+ raise
return response
@rwstatus(ret_on_failure=[None])
Returns:
ImageInfoItem object containing image information.
"""
- with self._use_driver(account) as drv:
- image = drv.glance_image_get(image_id)
- return RwcalOpenstackPlugin._fill_image_info(image)
+ drv = self._use_driver(account)
+ try:
+ image_info = drv.glance_image_get(image_id)
+ image = drv.utils.image.parse_cloud_image_info(image_info)
+ except Exception as e:
+ self.log.exception("Exception %s occured during get-image", str(e))
+ raise
+ return image
+
# This is being deprecated. Please do not use for new SW development
@rwstatus(ret_on_failure=[""])
Returns:
The image id
"""
+ from warnings import warn
+ warn("This function is deprecated")
kwargs = {}
kwargs['name'] = vminfo.vm_name
kwargs['flavor_id'] = vminfo.flavor_id
if vminfo.has_field('image_id'):
kwargs['image_id'] = vminfo.image_id
- with self._use_driver(account) as drv:
- ### If floating_ip is required and we don't have one, better fail before any further allocation
- pool_name = None
- floating_ip = False
- if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
- if account.openstack.has_field('floating_ip_pool'):
- pool_name = account.openstack.floating_ip_pool
- floating_ip = True
+ ### If floating_ip is required and we don't have one, better fail before any further allocation
+ pool_name = None
+ floating_ip = False
+ if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
+ if account.openstack.has_field('floating_ip_pool'):
+ pool_name = account.openstack.floating_ip_pool
+ floating_ip = True
if vminfo.has_field('cloud_init') and vminfo.cloud_init.has_field('userdata'):
kwargs['userdata'] = vminfo.cloud_init.userdata
else:
kwargs['scheduler_hints'] = None
- with self._use_driver(account) as drv:
- vm_id = drv.nova_server_create(**kwargs)
- if floating_ip:
- self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+ drv = self._use_driver(account)
+ vm_id = drv.nova_server_create(**kwargs)
+ if floating_ip:
+ self.prepare_vdu_on_boot(account, vm_id, floating_ip)
return vm_id
account - a cloud account
vm_id - an id of the VM
"""
- with self._use_driver(account) as drv:
- drv.nova_server_start(vm_id)
+ drv = self._use_driver(account)
+ drv.nova_server_start(vm_id)
@rwstatus
def do_stop_vm(self, account, vm_id):
account - a cloud account
vm_id - an id of the VM
"""
- with self._use_driver(account) as drv:
- drv.nova_server_stop(vm_id)
+ drv = self._use_driver(account)
+ drv.nova_server_stop(vm_id)
@rwstatus
def do_delete_vm(self, account, vm_id):
account - a cloud account
vm_id - an id of the VM
"""
- with self._use_driver(account) as drv:
- drv.nova_server_delete(vm_id)
+ drv = self._use_driver(account)
+ drv.nova_server_delete(vm_id)
@rwstatus
def do_reboot_vm(self, account, vm_id):
account - a cloud account
vm_id - an id of the VM
"""
- with self._use_driver(account) as drv:
- drv.nova_server_reboot(vm_id)
+ drv = self._use_driver(account)
+ drv.nova_server_reboot(vm_id)
@staticmethod
def _fill_vm_info(vm_info, mgmt_network):
if network_info:
if network_name == mgmt_network:
vm.public_ip = next((item['addr']
- for item in network_info
+ for item in network_info
if item['OS-EXT-IPS:type'] == 'floating'),
network_info[0]['addr'])
vm.management_ip = network_info[0]['addr']
List containing VM information
"""
response = RwcalYang.VimResources()
- with self._use_driver(account) as drv:
- vms = drv.nova_server_list()
+ drv = self._use_driver(account)
+ vms = drv.nova_server_list()
for vm in vms:
response.vminfo_list.append(RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network))
return response
Returns:
VM information
"""
- with self._use_driver(account) as drv:
- vm = drv.nova_server_get(id)
+ drv = self._use_driver(account)
+ vm = drv.nova_server_get(id)
return RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network)
- @staticmethod
- def _get_guest_epa_specs(guest_epa):
- """
- Returns EPA Specs dictionary for guest_epa attributes
- """
- epa_specs = {}
- if guest_epa.has_field('mempage_size'):
- mempage_size = espec_utils.guest.mano_to_extra_spec_mempage_size(guest_epa.mempage_size)
- if mempage_size is not None:
- epa_specs['hw:mem_page_size'] = mempage_size
-
- if guest_epa.has_field('cpu_pinning_policy'):
- cpu_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_pinning_policy(guest_epa.cpu_pinning_policy)
- if cpu_pinning_policy is not None:
- epa_specs['hw:cpu_policy'] = cpu_pinning_policy
-
- if guest_epa.has_field('cpu_thread_pinning_policy'):
- cpu_thread_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_thread_pinning_policy(guest_epa.cpu_thread_pinning_policy)
- if cpu_thread_pinning_policy is None:
- epa_specs['hw:cpu_threads_policy'] = cpu_thread_pinning_policy
-
- if guest_epa.has_field('trusted_execution'):
- trusted_execution = espec_utils.guest.mano_to_extra_spec_trusted_execution(guest_epa.trusted_execution)
- if trusted_execution is not None:
- epa_specs['trust:trusted_host'] = trusted_execution
-
- if guest_epa.has_field('numa_node_policy'):
- if guest_epa.numa_node_policy.has_field('node_cnt'):
- numa_node_count = espec_utils.guest.mano_to_extra_spec_numa_node_count(guest_epa.numa_node_policy.node_cnt)
- if numa_node_count is not None:
- epa_specs['hw:numa_nodes'] = numa_node_count
-
- if guest_epa.numa_node_policy.has_field('mem_policy'):
- numa_memory_policy = espec_utils.guest.mano_to_extra_spec_numa_memory_policy(guest_epa.numa_node_policy.mem_policy)
- if numa_memory_policy is not None:
- epa_specs['hw:numa_mempolicy'] = numa_memory_policy
-
- if guest_epa.numa_node_policy.has_field('node'):
- for node in guest_epa.numa_node_policy.node:
- if node.has_field('vcpu') and node.vcpu:
- epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j.id) for j in node.vcpu])
- if node.memory_mb:
- epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
-
- if guest_epa.has_field('pcie_device'):
- pci_devices = []
- for device in guest_epa.pcie_device:
- pci_devices.append(device.device_id +':'+str(device.count))
- epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
-
- return epa_specs
-
- @staticmethod
- def _get_host_epa_specs(host_epa):
- """
- Returns EPA Specs dictionary for host_epa attributes
- """
-
- epa_specs = {}
-
- if host_epa.has_field('cpu_model'):
- cpu_model = espec_utils.host.mano_to_extra_spec_cpu_model(host_epa.cpu_model)
- if cpu_model is not None:
- epa_specs['capabilities:cpu_info:model'] = cpu_model
-
- if host_epa.has_field('cpu_arch'):
- cpu_arch = espec_utils.host.mano_to_extra_spec_cpu_arch(host_epa.cpu_arch)
- if cpu_arch is not None:
- epa_specs['capabilities:cpu_info:arch'] = cpu_arch
-
- if host_epa.has_field('cpu_vendor'):
- cpu_vendor = espec_utils.host.mano_to_extra_spec_cpu_vendor(host_epa.cpu_vendor)
- if cpu_vendor is not None:
- epa_specs['capabilities:cpu_info:vendor'] = cpu_vendor
-
- if host_epa.has_field('cpu_socket_count'):
- cpu_socket_count = espec_utils.host.mano_to_extra_spec_cpu_socket_count(host_epa.cpu_socket_count)
- if cpu_socket_count is not None:
- epa_specs['capabilities:cpu_info:topology:sockets'] = cpu_socket_count
-
- if host_epa.has_field('cpu_core_count'):
- cpu_core_count = espec_utils.host.mano_to_extra_spec_cpu_core_count(host_epa.cpu_core_count)
- if cpu_core_count is not None:
- epa_specs['capabilities:cpu_info:topology:cores'] = cpu_core_count
-
- if host_epa.has_field('cpu_core_thread_count'):
- cpu_core_thread_count = espec_utils.host.mano_to_extra_spec_cpu_core_thread_count(host_epa.cpu_core_thread_count)
- if cpu_core_thread_count is not None:
- epa_specs['capabilities:cpu_info:topology:threads'] = cpu_core_thread_count
-
- if host_epa.has_field('cpu_feature'):
- cpu_features = []
- espec_cpu_features = []
- for feature in host_epa.cpu_feature:
- cpu_features.append(feature.feature)
- espec_cpu_features = espec_utils.host.mano_to_extra_spec_cpu_features(cpu_features)
- if espec_cpu_features is not None:
- epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
- return epa_specs
-
- @staticmethod
- def _get_hypervisor_epa_specs(guest_epa):
- """
- Returns EPA Specs dictionary for hypervisor_epa attributes
- """
- hypervisor_epa = {}
- return hypervisor_epa
-
- @staticmethod
- def _get_vswitch_epa_specs(guest_epa):
- """
- Returns EPA Specs dictionary for vswitch_epa attributes
- """
- vswitch_epa = {}
- return vswitch_epa
-
- @staticmethod
- def _get_host_aggregate_epa_specs(host_aggregate):
- """
- Returns EPA Specs dictionary for host aggregates
- """
- epa_specs = {}
- for aggregate in host_aggregate:
- epa_specs['aggregate_instance_extra_specs:'+aggregate.metadata_key] = aggregate.metadata_value
-
- return epa_specs
-
- @staticmethod
- def _get_epa_specs(flavor):
- """
- Returns epa_specs dictionary based on flavor information
- """
- epa_specs = {}
- if flavor.has_field('guest_epa'):
- guest_epa = RwcalOpenstackPlugin._get_guest_epa_specs(flavor.guest_epa)
- epa_specs.update(guest_epa)
- if flavor.has_field('host_epa'):
- host_epa = RwcalOpenstackPlugin._get_host_epa_specs(flavor.host_epa)
- epa_specs.update(host_epa)
- if flavor.has_field('hypervisor_epa'):
- hypervisor_epa = RwcalOpenstackPlugin._get_hypervisor_epa_specs(flavor.hypervisor_epa)
- epa_specs.update(hypervisor_epa)
- if flavor.has_field('vswitch_epa'):
- vswitch_epa = RwcalOpenstackPlugin._get_vswitch_epa_specs(flavor.vswitch_epa)
- epa_specs.update(vswitch_epa)
- if flavor.has_field('host_aggregate'):
- host_aggregate = RwcalOpenstackPlugin._get_host_aggregate_epa_specs(flavor.host_aggregate)
- epa_specs.update(host_aggregate)
- return epa_specs
@rwstatus(ret_on_failure=[""])
def do_create_flavor(self, account, flavor):
Returns:
flavor id
"""
- epa_specs = RwcalOpenstackPlugin._get_epa_specs(flavor)
- with self._use_driver(account) as drv:
- return drv.nova_flavor_create(name = flavor.name,
- ram = flavor.vm_flavor.memory_mb,
- vcpus = flavor.vm_flavor.vcpu_count,
- disk = flavor.vm_flavor.storage_gb,
- epa_specs = epa_specs)
-
+ drv = self._use_driver(account)
+ return drv.nova_flavor_create(name = flavor.name,
+ ram = flavor.vm_flavor.memory_mb,
+ vcpus = flavor.vm_flavor.vcpu_count,
+ disk = flavor.vm_flavor.storage_gb,
+ epa_specs = drv.utils.flavor.get_extra_specs(flavor))
+
@rwstatus
def do_delete_flavor(self, account, flavor_id):
account - a cloud account
flavor_id - id flavor of the VM
"""
- with self._use_driver(account) as drv:
- drv.nova_flavor_delete(flavor_id)
-
- @staticmethod
- def _fill_epa_attributes(flavor, flavor_info):
- """Helper function to populate the EPA attributes
-
- Arguments:
- flavor : Object with EPA attributes
- flavor_info: A dictionary of flavor_info received from openstack
- Returns:
- None
- """
- getattr(flavor, 'vm_flavor').vcpu_count = flavor_info['vcpus']
- getattr(flavor, 'vm_flavor').memory_mb = flavor_info['ram']
- getattr(flavor, 'vm_flavor').storage_gb = flavor_info['disk']
-
- ### If extra_specs in flavor_info
- if not 'extra_specs' in flavor_info:
- return
-
- for attr in flavor_info['extra_specs']:
- if attr == 'hw:cpu_policy':
- cpu_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
- if cpu_pinning_policy is not None:
- getattr(flavor, 'guest_epa').cpu_pinning_policy = cpu_pinning_policy
-
- elif attr == 'hw:cpu_threads_policy':
- cpu_thread_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_thread_pinning_policy(flavor_info['extra_specs']['hw:cpu_threads_policy'])
- if cpu_thread_pinning_policy is not None:
- getattr(flavor, 'guest_epa').cpu_thread_pinning_policy = cpu_thread_pinning_policy
-
- elif attr == 'hw:mem_page_size':
- mempage_size = espec_utils.guest.extra_spec_to_mano_mempage_size(flavor_info['extra_specs']['hw:mem_page_size'])
- if mempage_size is not None:
- getattr(flavor, 'guest_epa').mempage_size = mempage_size
-
-
- elif attr == 'hw:numa_nodes':
- numa_node_count = espec_utils.guest.extra_specs_to_mano_numa_node_count(flavor_info['extra_specs']['hw:numa_nodes'])
- if numa_node_count is not None:
- getattr(flavor,'guest_epa').numa_node_policy.node_cnt = numa_node_count
-
- elif attr.startswith('hw:numa_cpus.'):
- node_id = attr.split('.')[1]
- nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
- if nodes:
- numa_node = nodes[0]
- else:
- numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
- numa_node.id = int(node_id)
-
- for x in flavor_info['extra_specs'][attr].split(','):
- numa_node_vcpu = numa_node.vcpu.add()
- numa_node_vcpu.id = int(x)
-
- elif attr.startswith('hw:numa_mem.'):
- node_id = attr.split('.')[1]
- nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
- if nodes:
- numa_node = nodes[0]
- else:
- numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
- numa_node.id = int(node_id)
-
- numa_node.memory_mb = int(flavor_info['extra_specs'][attr])
-
- elif attr == 'hw:numa_mempolicy':
- numa_memory_policy = espec_utils.guest.extra_to_mano_spec_numa_memory_policy(flavor_info['extra_specs']['hw:numa_mempolicy'])
- if numa_memory_policy is not None:
- getattr(flavor,'guest_epa').numa_node_policy.mem_policy = numa_memory_policy
-
- elif attr == 'trust:trusted_host':
- trusted_execution = espec_utils.guest.extra_spec_to_mano_trusted_execution(flavor_info['extra_specs']['trust:trusted_host'])
- if trusted_execution is not None:
- getattr(flavor,'guest_epa').trusted_execution = trusted_execution
-
- elif attr == 'pci_passthrough:alias':
- device_types = flavor_info['extra_specs']['pci_passthrough:alias']
- for device in device_types.split(','):
- dev = getattr(flavor,'guest_epa').pcie_device.add()
- dev.device_id = device.split(':')[0]
- dev.count = int(device.split(':')[1])
-
- elif attr == 'capabilities:cpu_info:model':
- cpu_model = espec_utils.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
- if cpu_model is not None:
- getattr(flavor, 'host_epa').cpu_model = cpu_model
-
- elif attr == 'capabilities:cpu_info:arch':
- cpu_arch = espec_utils.host.extra_specs_to_mano_cpu_arch(flavor_info['extra_specs']['capabilities:cpu_info:arch'])
- if cpu_arch is not None:
- getattr(flavor, 'host_epa').cpu_arch = cpu_arch
-
- elif attr == 'capabilities:cpu_info:vendor':
- cpu_vendor = espec_utils.host.extra_spec_to_mano_cpu_vendor(flavor_info['extra_specs']['capabilities:cpu_info:vendor'])
- if cpu_vendor is not None:
- getattr(flavor, 'host_epa').cpu_vendor = cpu_vendor
-
- elif attr == 'capabilities:cpu_info:topology:sockets':
- cpu_sockets = espec_utils.host.extra_spec_to_mano_cpu_socket_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:sockets'])
- if cpu_sockets is not None:
- getattr(flavor, 'host_epa').cpu_socket_count = cpu_sockets
-
- elif attr == 'capabilities:cpu_info:topology:cores':
- cpu_cores = espec_utils.host.extra_spec_to_mano_cpu_core_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:cores'])
- if cpu_cores is not None:
- getattr(flavor, 'host_epa').cpu_core_count = cpu_cores
-
- elif attr == 'capabilities:cpu_info:topology:threads':
- cpu_threads = espec_utils.host.extra_spec_to_mano_cpu_core_thread_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:threads'])
- if cpu_threads is not None:
- getattr(flavor, 'host_epa').cpu_core_thread_count = cpu_threads
-
- elif attr == 'capabilities:cpu_info:features':
- cpu_features = espec_utils.host.extra_spec_to_mano_cpu_features(flavor_info['extra_specs']['capabilities:cpu_info:features'])
- if cpu_features is not None:
- for feature in cpu_features:
- getattr(flavor, 'host_epa').cpu_feature.append(feature)
- elif attr.startswith('aggregate_instance_extra_specs:'):
- aggregate = getattr(flavor, 'host_aggregate').add()
- aggregate.metadata_key = ":".join(attr.split(':')[1::])
- aggregate.metadata_value = flavor_info['extra_specs'][attr]
-
- @staticmethod
- def _fill_flavor_info(flavor_info):
- """Create a GI object from flavor info dictionary
-
- Converts Flavor information dictionary object returned by openstack
- driver into Protobuf Gi Object
-
- Arguments:
- flavor_info: Flavor information from openstack
-
- Returns:
- Object of class FlavorInfoItem
- """
- flavor = RwcalYang.FlavorInfoItem()
- flavor.name = flavor_info['name']
- flavor.id = flavor_info['id']
- RwcalOpenstackPlugin._fill_epa_attributes(flavor, flavor_info)
- return flavor
+ drv = self._use_driver(account)
+ drv.nova_flavor_delete(flavor_id)
@rwstatus(ret_on_failure=[[]])
List of flavors
"""
response = RwcalYang.VimResources()
- with self._use_driver(account) as drv:
- flavors = drv.nova_flavor_list()
+ drv = self._use_driver(account)
+ flavors = drv.nova_flavor_list()
for flv in flavors:
- response.flavorinfo_list.append(RwcalOpenstackPlugin._fill_flavor_info(flv))
+ response.flavorinfo_list.append(drv.utils.flavor.parse_flavor_info(flv))
return response
@rwstatus(ret_on_failure=[None])
Returns:
Flavor info item
"""
- with self._use_driver(account) as drv:
- flavor = drv.nova_flavor_get(id)
- return RwcalOpenstackPlugin._fill_flavor_info(flavor)
+ drv = self._use_driver(account)
+ flavor = drv.nova_flavor_get(id)
+ return drv.utils.flavor.parse_flavor_info(flavor)
def _fill_network_info(self, network_info, account):
if 'subnets' in network_info and network_info['subnets']:
subnet_id = network_info['subnets'][0]
- with self._use_driver(account) as drv:
- subnet = drv.neutron_subnet_get(subnet_id)
+ drv = self._use_driver(account)
+ subnet = drv.neutron_subnet_get(subnet_id)
network.subnet = subnet['cidr']
return network
List of networks
"""
response = RwcalYang.VimResources()
- with self._use_driver(account) as drv:
- networks = drv.neutron_network_list()
+ drv = self._use_driver(account)
+ networks = drv.neutron_network_list()
for network in networks:
response.networkinfo_list.append(self._fill_network_info(network, account))
return response
Returns:
Network info item
"""
- with self._use_driver(account) as drv:
- network = drv.neutron_network_get(id)
+ drv = self._use_driver(account)
+ network = drv.neutron_network_get(id)
return self._fill_network_info(network, account)
@rwstatus(ret_on_failure=[""])
Returns:
Network id
"""
+ from warnings import warn
+ warn("This function is deprecated")
+
kwargs = {}
kwargs['name'] = network.network_name
kwargs['admin_state_up'] = True
if network.provider_network.has_field('segmentation_id'):
kwargs['segmentation_id'] = network.provider_network.segmentation_id
- with self._use_driver(account) as drv:
- network_id = drv.neutron_network_create(**kwargs)
- drv.neutron_subnet_create(network_id = network_id,
- cidr = network.subnet)
+ drv = self._use_driver(account)
+ network_id = drv.neutron_network_create(**kwargs)
+ drv.neutron_subnet_create(network_id = network_id,
+ cidr = network.subnet)
return network_id
@rwstatus
account - a cloud account
network_id - an id for the network
"""
- with self._use_driver(account) as drv:
- drv.neutron_network_delete(network_id)
+ drv = self._use_driver(account)
+ drv.neutron_network_delete(network_id)
@staticmethod
def _fill_port_info(port_info):
Returns:
Port info item
"""
- with self._use_driver(account) as drv:
- port = drv.neutron_port_get(port_id)
-
+ drv = self._use_driver(account)
+ port = drv.neutron_port_get(port_id)
return RwcalOpenstackPlugin._fill_port_info(port)
@rwstatus(ret_on_failure=[[]])
Port info list
"""
response = RwcalYang.VimResources()
- with self._use_driver(account) as drv:
- ports = drv.neutron_port_list(*{})
+ drv = self._use_driver(account)
+ ports = drv.neutron_port_list(*{})
for port in ports:
response.portinfo_list.append(RwcalOpenstackPlugin._fill_port_info(port))
return response
Returns:
Port id
"""
+ from warnings import warn
+ warn("This function is deprecated")
+
kwargs = {}
kwargs['name'] = port.port_name
kwargs['network_id'] = port.network_id
else:
kwargs['port_type'] = "normal"
- with self._use_driver(account) as drv:
- return drv.neutron_port_create(**kwargs)
+ drv = self._use_driver(account)
+ return drv.neutron_port_create(**kwargs)
@rwstatus
def do_delete_port(self, account, port_id):
account - a cloud account
port_id - an id for port
"""
- with self._use_driver(account) as drv:
- drv.neutron_port_delete(port_id)
+ drv = self._use_driver(account)
+ drv.neutron_port_delete(port_id)
@rwstatus(ret_on_failure=[""])
def do_add_host(self, account, host):
"""
raise NotImplementedError
- @staticmethod
- def _fill_connection_point_info(c_point, port_info):
- """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
-
- Converts Port information dictionary object returned by openstack
- driver into Protobuf Gi Object
-
- Arguments:
- port_info - Port information from openstack
- Returns:
- Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
- """
- c_point.name = port_info['name']
- c_point.connection_point_id = port_info['id']
- if ('fixed_ips' in port_info) and (len(port_info['fixed_ips']) >= 1):
- if 'ip_address' in port_info['fixed_ips'][0]:
- c_point.ip_address = port_info['fixed_ips'][0]['ip_address']
- if 'mac_address' in port_info :
- c_point.mac_addr = port_info['mac_address']
- if port_info['status'] == 'ACTIVE':
- c_point.state = 'active'
- else:
- c_point.state = 'inactive'
- if 'network_id' in port_info:
- c_point.virtual_link_id = port_info['network_id']
- if ('device_id' in port_info) and (port_info['device_id']):
- c_point.vdu_id = port_info['device_id']
-
- @staticmethod
- def _fill_virtual_link_info(network_info, port_list, subnet):
- """Create a GI object for VirtualLinkInfoParams
-
- Converts Network and Port information dictionary object
- returned by openstack driver into Protobuf Gi Object
-
- Arguments:
- network_info - Network information from openstack
- port_list - A list of port information from openstack
- subnet: Subnet information from openstack
- Returns:
- Protobuf Gi object for VirtualLinkInfoParams
- """
- link = RwcalYang.VirtualLinkInfoParams()
- link.name = network_info['name']
- if network_info['status'] == 'ACTIVE':
- link.state = 'active'
- else:
- link.state = 'inactive'
- link.virtual_link_id = network_info['id']
- for port in port_list:
- if port['device_owner'] == 'compute:None':
- c_point = link.connection_points.add()
- RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
-
- if subnet != None:
- link.subnet = subnet['cidr']
-
- if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
- link.provider_network.overlay_type = network_info['provider:network_type'].upper()
- if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
- link.provider_network.segmentation_id = network_info['provider:segmentation_id']
- if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
- link.provider_network.physical_network = network_info['provider:physical_network'].upper()
-
- return link
-
- @staticmethod
- def _fill_vdu_info(drv, vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
- """Create a GI object for VDUInfoParams
-
- Converts VM information dictionary object returned by openstack
- driver into Protobuf Gi Object
-
- Arguments:
- vm_info - VM information from openstack
- flavor_info - VM Flavor information from openstack
- mgmt_network - Management network
- port_list - A list of port information from openstack
- server_group - A list (with one element or empty list) of server group to which this VM belongs
- Returns:
- Protobuf Gi object for VDUInfoParams
- """
- vdu = RwcalYang.VDUInfoParams()
- vdu.name = vm_info['name']
- vdu.vdu_id = vm_info['id']
- for network_name, network_info in vm_info['addresses'].items():
- if network_info and network_name == mgmt_network:
- for interface in network_info:
- if 'OS-EXT-IPS:type' in interface:
- if interface['OS-EXT-IPS:type'] == 'fixed':
- vdu.management_ip = interface['addr']
- elif interface['OS-EXT-IPS:type'] == 'floating':
- vdu.public_ip = interface['addr']
-
- # Look for any metadata
-# for key, value in vm_info['metadata'].items():
-# if key == 'node_id':
-# vdu.node_id = value
-# else:
-# custommetadata = vdu.supplemental_boot_data.custom_meta_data.add()
-# custommetadata.name = key
-# custommetadata.value = str(value)
-
- # Look for config_drive
- if ('config_drive' in vm_info):
- vdu.supplemental_boot_data.boot_data_drive = vm_info['config_drive']
- if ('image' in vm_info) and ('id' in vm_info['image']):
- vdu.image_id = vm_info['image']['id']
- if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
- vdu.flavor_id = vm_info['flavor']['id']
-
- if vm_info['status'] == 'ACTIVE':
- vdu.state = 'active'
- elif vm_info['status'] == 'ERROR':
- vdu.state = 'failed'
- else:
- vdu.state = 'inactive'
-
- if 'availability_zone' in vm_info:
- vdu.availability_zone = vm_info['availability_zone']
-
- if server_group:
- vdu.server_group.name = server_group[0]
-
- vdu.cloud_type = 'openstack'
- # Fill the port information
- for port in port_list:
- c_point = vdu.connection_points.add()
- RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
-
- if flavor_info is not None:
- RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
-
- # Fill the volume information
- if volume_list is not None:
- for os_volume in volume_list:
- volr = vdu.volumes.add()
- try:
- " Device name is of format /dev/vda"
- vol_name = (os_volume['device']).split('/')[2]
- except:
- continue
- volr.name = vol_name
- volr.volume_id = os_volume['volumeId']
- try:
- vol_details = drv.cinder_volume_get(volr.volume_id)
- except:
- continue
- if vol_details is None:
- continue
- for key, value in vol_details.metadata.items():
- volmd = volr.custom_meta_data.add()
- volmd.name = key
- volmd.value = value
-
- return vdu
@rwcalstatus(ret_on_failure=[""])
def do_create_virtual_link(self, account, link_params):
link_params - information that defines the type of VDU to create
Returns:
- The vdu_id
+ A kwargs dictionary for glance operation
"""
- kwargs = {}
- kwargs['name'] = link_params.name
- kwargs['admin_state_up'] = True
- kwargs['external_router'] = False
- kwargs['shared'] = False
-
- if link_params.has_field('provider_network'):
- if link_params.provider_network.has_field('physical_network'):
- kwargs['physical_network'] = link_params.provider_network.physical_network
- if link_params.provider_network.has_field('overlay_type'):
- kwargs['network_type'] = link_params.provider_network.overlay_type.lower()
- if link_params.provider_network.has_field('segmentation_id'):
- kwargs['segmentation_id'] = link_params.provider_network.segmentation_id
-
-
- with self._use_driver(account) as drv:
- try:
- network_id = drv.neutron_network_create(**kwargs)
- except Exception as e:
- self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
- raise
-
- kwargs = {'network_id' : network_id,
- 'dhcp_params': {'enable_dhcp': True},
- 'gateway_ip' : None,}
-
- if link_params.ip_profile_params.has_field('ip_version'):
- kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
- else:
- kwargs['ip_version'] = 4
-
- if link_params.ip_profile_params.has_field('subnet_address'):
- kwargs['cidr'] = link_params.ip_profile_params.subnet_address
- elif link_params.ip_profile_params.has_field('subnet_prefix_pool'):
- subnet_pool = drv.netruon_subnetpool_by_name(link_params.ip_profile_params.subnet_prefix_pool)
- if subnet_pool is None:
- self.log.error("Could not find subnet pool with name :%s to be used for network: %s",
- link_params.ip_profile_params.subnet_prefix_pool,
- link_params.name)
- raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
-
- kwargs['subnetpool_id'] = subnet_pool['id']
- elif link_params.has_field('subnet'):
- kwargs['cidr'] = link_params.subnet
- else:
- assert 0, "No IP Prefix or Pool name specified"
-
- if link_params.ip_profile_params.has_field('dhcp_params'):
- if link_params.ip_profile_params.dhcp_params.has_field('enabled'):
- kwargs['dhcp_params']['enable_dhcp'] = link_params.ip_profile_params.dhcp_params.enabled
- if link_params.ip_profile_params.dhcp_params.has_field('start_address'):
- kwargs['dhcp_params']['start_address'] = link_params.ip_profile_params.dhcp_params.start_address
- if link_params.ip_profile_params.dhcp_params.has_field('count'):
- kwargs['dhcp_params']['count'] = link_params.ip_profile_params.dhcp_params.count
-
- if link_params.ip_profile_params.has_field('dns_server'):
- kwargs['dns_server'] = []
- for server in link_params.ip_profile_params.dns_server:
- kwargs['dns_server'].append(server.address)
-
- if link_params.ip_profile_params.has_field('gateway_address'):
- kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
-
- drv.neutron_subnet_create(**kwargs)
+
+ drv = self._use_driver(account)
+ try:
+ kwargs = drv.utils.network.make_virtual_link_args(link_params)
+ network_id = drv.neutron_network_create(**kwargs)
+ except Exception as e:
+ self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
+ raise
+ kwargs = drv.utils.network.make_subnet_args(link_params, network_id)
+ drv.neutron_subnet_create(**kwargs)
return network_id
Returns:
None
"""
- if not link_id:
- self.log.error("Empty link_id during the virtual link deletion")
- raise Exception("Empty link_id during the virtual link deletion")
-
- with self._use_driver(account) as drv:
+ drv = self._use_driver(account)
+ try:
port_list = drv.neutron_port_list(**{'network_id': link_id})
-
- for port in port_list:
- if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
- self.do_delete_port(account, port['id'], no_rwstatus=True)
- self.do_delete_network(account, link_id, no_rwstatus=True)
+ for port in port_list:
+ if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+ self.do_delete_port(account, port['id'], no_rwstatus=True)
+ self.do_delete_network(account, link_id, no_rwstatus=True)
+ except Exception as e:
+ self.log.exception("Exception %s occured during virtual-link deletion", str(e))
+ raise
@rwstatus(ret_on_failure=[None])
def do_get_virtual_link(self, account, link_id):
Returns:
Object of type RwcalYang.VirtualLinkInfoParams
"""
- if not link_id:
- self.log.error("Empty link_id during the virtual link get request")
- raise Exception("Empty link_id during the virtual link get request")
-
- with self._use_driver(account) as drv:
+ drv = self._use_driver(account)
+ try:
network = drv.neutron_network_get(link_id)
if network:
port_list = drv.neutron_port_list(**{'network_id': network['id']})
- if 'subnets' in network:
+ if 'subnets' in network and network['subnets']:
subnet = drv.neutron_subnet_get(network['subnets'][0])
else:
subnet = None
- virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
- else:
- virtual_link = None
- return virtual_link
+ virtual_link = drv.utils.network.parse_cloud_virtual_link_info(network, port_list, subnet)
+ except Exception as e:
+ self.log.exception("Exception %s occured during virtual-link-get", str(e))
+ raise
+ return virtual_link
@rwstatus(ret_on_failure=[None])
def do_get_virtual_link_list(self, account):
A list of objects of type RwcalYang.VirtualLinkInfoParams
"""
vnf_resources = RwcalYang.VNFResources()
- with self._use_driver(account) as drv:
+ drv = self._use_driver(account)
+ try:
networks = drv.neutron_network_list()
for network in networks:
port_list = drv.neutron_port_list(**{'network_id': network['id']})
- if ('subnets' in network) and (network['subnets']):
+ if 'subnets' in network and network['subnets']:
subnet = drv.neutron_subnet_get(network['subnets'][0])
else:
subnet = None
- virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+ virtual_link = drv.utils.network.parse_cloud_virtual_link_info(network, port_list, subnet)
vnf_resources.virtual_link_info_list.append(virtual_link)
- return vnf_resources
-
- def _create_connection_point(self, account, c_point):
- """
- Create a connection point
- Arguments:
- account - a cloud account
- c_point - connection_points
- """
- kwargs = {}
- kwargs['name'] = c_point.name
- kwargs['network_id'] = c_point.virtual_link_id
- kwargs['admin_state_up'] = True
-
- if c_point.type_yang == 'VIRTIO' or c_point.type_yang == 'E1000':
- kwargs['port_type'] = 'normal'
- elif c_point.type_yang == 'SR_IOV':
- kwargs['port_type'] = 'direct'
- else:
- raise NotImplementedError("Port Type: %s not supported" %(c_point.type_yang))
-
- # By default port gets created with post_security enaled as True
- if 'port_security_enabled' in c_point:
- kwargs['port_security_enabled'] = c_point.port_security_enabled
-
- with self._use_driver(account) as drv:
- if c_point.has_field('security_group'):
- group = drv.neutron_security_group_by_name(c_point.security_group)
- if group is not None:
- kwargs['security_groups'] = [group['id']]
- return drv.neutron_port_create(**kwargs)
-
- def _allocate_floating_ip(self, drv, pool_name):
- """
- Allocate a floating_ip. If unused floating_ip exists then its reused.
- Arguments:
- drv: OpenstackDriver instance
- pool_name: Floating IP pool name
-
- Returns:
- An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
- """
-
- # available_ip = [ ip for ip in drv.nova_floating_ip_list() if ip.instance_id == None ]
-
- # if pool_name is not None:
- # ### Filter further based on IP address
- # available_ip = [ ip for ip in available_ip if ip.pool == pool_name ]
-
- # if not available_ip:
- # floating_ip = drv.nova_floating_ip_create(pool_name)
- # else:
- # floating_ip = available_ip[0]
-
- floating_ip = drv.nova_floating_ip_create(pool_name)
- return floating_ip
-
- def _match_vm_flavor(self, required, available):
- self.log.info("Matching VM Flavor attributes")
- if available.vcpu_count != required.vcpu_count:
- self.log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
- required.vcpu_count,
- available.vcpu_count)
- return False
- if available.memory_mb != required.memory_mb:
- self.log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
- required.memory_mb,
- available.memory_mb)
- return False
- if available.storage_gb != required.storage_gb:
- self.log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
- required.storage_gb,
- available.storage_gb)
- return False
- self.log.debug("VM Flavor match found")
- return True
-
- def _match_guest_epa(self, required, available):
- self.log.info("Matching Guest EPA attributes")
- if required.has_field('pcie_device'):
- self.log.debug("Matching pcie_device")
- if available.has_field('pcie_device') == False:
- self.log.debug("Matching pcie_device failed. Not available in flavor")
- return False
- else:
- for dev in required.pcie_device:
- if not [ d for d in available.pcie_device
- if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
- self.log.debug("Matching pcie_device failed. Required: %s, Available: %s", required.pcie_device, available.pcie_device)
- return False
- elif available.has_field('pcie_device'):
- self.log.debug("Rejecting available flavor because pcie_device not required but available")
- return False
-
-
- if required.has_field('mempage_size'):
- self.log.debug("Matching mempage_size")
- if available.has_field('mempage_size') == False:
- self.log.debug("Matching mempage_size failed. Not available in flavor")
- return False
- else:
- if required.mempage_size != available.mempage_size:
- self.log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size)
- return False
- elif available.has_field('mempage_size'):
- self.log.debug("Rejecting available flavor because mempage_size not required but available")
- return False
-
- if required.has_field('cpu_pinning_policy'):
- self.log.debug("Matching cpu_pinning_policy")
- if required.cpu_pinning_policy != 'ANY':
- if available.has_field('cpu_pinning_policy') == False:
- self.log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
- return False
- else:
- if required.cpu_pinning_policy != available.cpu_pinning_policy:
- self.log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy)
- return False
- elif available.has_field('cpu_pinning_policy'):
- self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
- return False
-
- if required.has_field('cpu_thread_pinning_policy'):
- self.log.debug("Matching cpu_thread_pinning_policy")
- if available.has_field('cpu_thread_pinning_policy') == False:
- self.log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
- return False
- else:
- if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
- self.log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
- return False
- elif available.has_field('cpu_thread_pinning_policy'):
- self.log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
- return False
-
- if required.has_field('trusted_execution'):
- self.log.debug("Matching trusted_execution")
- if required.trusted_execution == True:
- if available.has_field('trusted_execution') == False:
- self.log.debug("Matching trusted_execution failed. Not available in flavor")
- return False
- else:
- if required.trusted_execution != available.trusted_execution:
- self.log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution)
- return False
- elif available.has_field('trusted_execution'):
- self.log.debug("Rejecting available flavor because trusted_execution not required but available")
- return False
-
- if required.has_field('numa_node_policy'):
- self.log.debug("Matching numa_node_policy")
- if available.has_field('numa_node_policy') == False:
- self.log.debug("Matching numa_node_policy failed. Not available in flavor")
- return False
- else:
- if required.numa_node_policy.has_field('node_cnt'):
- self.log.debug("Matching numa_node_policy node_cnt")
- if available.numa_node_policy.has_field('node_cnt') == False:
- self.log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
- return False
- else:
- if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
- self.log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
- return False
- elif available.numa_node_policy.has_field('node_cnt'):
- self.log.debug("Rejecting available flavor because numa node count not required but available")
- return False
-
- if required.numa_node_policy.has_field('mem_policy'):
- self.log.debug("Matching numa_node_policy mem_policy")
- if available.numa_node_policy.has_field('mem_policy') == False:
- self.log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
- return False
- else:
- if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
- self.log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
- return False
- elif available.numa_node_policy.has_field('mem_policy'):
- self.log.debug("Rejecting available flavor because num node mem_policy not required but available")
- return False
-
- if required.numa_node_policy.has_field('node'):
- self.log.debug("Matching numa_node_policy nodes configuration")
- if available.numa_node_policy.has_field('node') == False:
- self.log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
- return False
- for required_node in required.numa_node_policy.node:
- self.log.debug("Matching numa_node_policy nodes configuration for node %s", required_node)
- numa_match = False
- for available_node in available.numa_node_policy.node:
- if required_node.id != available_node.id:
- self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
- continue
- if required_node.vcpu != available_node.vcpu:
- self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
- continue
- if required_node.memory_mb != available_node.memory_mb:
- self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
- continue
- numa_match = True
- if numa_match == False:
- return False
- elif available.numa_node_policy.has_field('node'):
- self.log.debug("Rejecting available flavor because numa nodes not required but available")
- return False
- elif available.has_field('numa_node_policy'):
- self.log.debug("Rejecting available flavor because numa_node_policy not required but available")
- return False
- self.log.info("Successful match for Guest EPA attributes")
- return True
-
- def _match_vswitch_epa(self, required, available):
- self.log.debug("VSwitch EPA match found")
- return True
-
- def _match_hypervisor_epa(self, required, available):
- self.log.debug("Hypervisor EPA match found")
- return True
-
- def _match_host_epa(self, required, available):
- self.log.info("Matching Host EPA attributes")
- if required.has_field('cpu_model'):
- self.log.debug("Matching CPU model")
- if available.has_field('cpu_model') == False:
- self.log.debug("Matching CPU model failed. Not available in flavor")
- return False
- else:
- #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
- if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
- self.log.debug("Matching CPU model failed. Required: %s, Available: %s", required.cpu_model, available.cpu_model)
- return False
- elif available.has_field('cpu_model'):
- self.log.debug("Rejecting available flavor because cpu_model not required but available")
- return False
-
- if required.has_field('cpu_arch'):
- self.log.debug("Matching CPU architecture")
- if available.has_field('cpu_arch') == False:
- self.log.debug("Matching CPU architecture failed. Not available in flavor")
- return False
- else:
- #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
- if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
- self.log.debug("Matching CPU architecture failed. Required: %s, Available: %s", required.cpu_arch, available.cpu_arch)
- return False
- elif available.has_field('cpu_arch'):
- self.log.debug("Rejecting available flavor because cpu_arch not required but available")
- return False
-
- if required.has_field('cpu_vendor'):
- self.log.debug("Matching CPU vendor")
- if available.has_field('cpu_vendor') == False:
- self.log.debug("Matching CPU vendor failed. Not available in flavor")
- return False
- else:
- #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
- if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
- self.log.debug("Matching CPU vendor failed. Required: %s, Available: %s", required.cpu_vendor, available.cpu_vendor)
- return False
- elif available.has_field('cpu_vendor'):
- self.log.debug("Rejecting available flavor because cpu_vendor not required but available")
- return False
-
- if required.has_field('cpu_socket_count'):
- self.log.debug("Matching CPU socket count")
- if available.has_field('cpu_socket_count') == False:
- self.log.debug("Matching CPU socket count failed. Not available in flavor")
- return False
- else:
- if required.cpu_socket_count != available.cpu_socket_count:
- self.log.debug("Matching CPU socket count failed. Required: %s, Available: %s", required.cpu_socket_count, available.cpu_socket_count)
- return False
- elif available.has_field('cpu_socket_count'):
- self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
- return False
-
- if required.has_field('cpu_core_count'):
- self.log.debug("Matching CPU core count")
- if available.has_field('cpu_core_count') == False:
- self.log.debug("Matching CPU core count failed. Not available in flavor")
- return False
- else:
- if required.cpu_core_count != available.cpu_core_count:
- self.log.debug("Matching CPU core count failed. Required: %s, Available: %s", required.cpu_core_count, available.cpu_core_count)
- return False
- elif available.has_field('cpu_core_count'):
- self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
- return False
-
- if required.has_field('cpu_core_thread_count'):
- self.log.debug("Matching CPU core thread count")
- if available.has_field('cpu_core_thread_count') == False:
- self.log.debug("Matching CPU core thread count failed. Not available in flavor")
- return False
- else:
- if required.cpu_core_thread_count != available.cpu_core_thread_count:
- self.log.debug("Matching CPU core thread count failed. Required: %s, Available: %s", required.cpu_core_thread_count, available.cpu_core_thread_count)
- return False
- elif available.has_field('cpu_core_thread_count'):
- self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
- return False
-
- if required.has_field('cpu_feature'):
- self.log.debug("Matching CPU feature list")
- if available.has_field('cpu_feature') == False:
- self.log.debug("Matching CPU feature list failed. Not available in flavor")
- return False
- else:
- for feature in required.cpu_feature:
- if feature not in available.cpu_feature:
- self.log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s", feature, available.cpu_feature)
- return False
- elif available.has_field('cpu_feature'):
- self.log.debug("Rejecting available flavor because cpu_feature not required but available")
- return False
- self.log.info("Successful match for Host EPA attributes")
- return True
-
-
- def _match_placement_group_inputs(self, required, available):
- self.log.info("Matching Host aggregate attributes")
-
- if not required and not available:
- # Host aggregate not required and not available => success
- self.log.info("Successful match for Host Aggregate attributes")
- return True
- if required and available:
- # Host aggregate requested and available => Do a match and decide
- xx = [ x.as_dict() for x in required ]
- yy = [ y.as_dict() for y in available ]
- for i in xx:
- if i not in yy:
- self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
- return False
- self.log.info("Successful match for Host Aggregate attributes")
- return True
- else:
- # Either of following conditions => Failure
- # - Host aggregate required but not available
- # - Host aggregate not required but available
- self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
- return False
-
- def match_epa_params(self, resource_info, request_params):
- result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
- getattr(resource_info, 'vm_flavor'))
- if result == False:
- self.log.debug("VM Flavor mismatched")
- return False
-
- result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
- getattr(resource_info, 'guest_epa'))
- if result == False:
- self.log.debug("Guest EPA mismatched")
- return False
-
- result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
- getattr(resource_info, 'vswitch_epa'))
- if result == False:
- self.log.debug("Vswitch EPA mismatched")
- return False
-
- result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
- getattr(resource_info, 'hypervisor_epa'))
- if result == False:
- self.log.debug("Hypervisor EPA mismatched")
- return False
-
- result = self._match_host_epa(getattr(request_params, 'host_epa'),
- getattr(resource_info, 'host_epa'))
- if result == False:
- self.log.debug("Host EPA mismatched")
- return False
-
- result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
- getattr(resource_info, 'host_aggregate'))
-
- if result == False:
- self.log.debug("Host Aggregate mismatched")
- return False
-
- return True
-
- def _select_resource_flavor(self, account, vdu_init):
- """
- Select a existing flavor if it matches the request or create new flavor
- """
- flavor = RwcalYang.FlavorInfoItem()
- flavor.name = str(uuid.uuid4())
- epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
- epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
- flavor.from_dict(epa_dict)
-
- rc, response = self.do_get_flavor_list(account)
- if rc != RwTypes.RwStatus.SUCCESS:
- self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
- account.name)
- raise OpenstackCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(account.name))
-
- flavor_id = None
- flavor_list = response.flavorinfo_list
- self.log.debug("Received %d flavor information from RW.CAL", len(flavor_list))
- for flv in flavor_list:
- self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
- vdu_init.name, flv)
- if self.match_epa_params(flv, vdu_init):
- self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
- vdu_init.name, flv.name, flv.id)
- return flv.id
-
- if account.openstack.dynamic_flavor_support is False:
- self.log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", vdu_init.name)
- raise OpenstackCALOperationFailure("No resource available with matching EPA attributes")
- else:
- rc,flavor_id = self.do_create_flavor(account,flavor)
- if rc != RwTypes.RwStatus.SUCCESS:
- self.log.error("Create-flavor operation failed for cloud account: %s",
- account.name)
- raise OpenstackCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
- return flavor_id
-
- def _create_vm(self, account, vduinfo, pci_assignement=None, server_group=None, port_list=None, network_list=None, imageinfo_list=None):
- """Create a new virtual machine.
-
- Arguments:
- account - a cloud account
- vminfo - information that defines the type of VM to create
-
- Returns:
- The image id
- """
- kwargs = {}
- kwargs['name'] = vduinfo.name
- kwargs['flavor_id'] = vduinfo.flavor_id
- if vduinfo.has_field('image_id'):
- kwargs['image_id'] = vduinfo.image_id
- else:
- kwargs['image_id'] = ""
-
- with self._use_driver(account) as drv:
- ### If floating_ip is required and we don't have one, better fail before any further allocation
- floating_ip = False
- pool_name = None
- if vduinfo.has_field('allocate_public_address') and vduinfo.allocate_public_address:
- if account.openstack.has_field('floating_ip_pool'):
- pool_name = account.openstack.floating_ip_pool
- floating_ip = True
-
- if vduinfo.has_field('vdu_init') and vduinfo.vdu_init.has_field('userdata'):
- kwargs['userdata'] = vduinfo.vdu_init.userdata
- else:
- kwargs['userdata'] = ''
-
- if account.openstack.security_groups:
- kwargs['security_groups'] = account.openstack.security_groups
-
- kwargs['port_list'] = port_list
- kwargs['network_list'] = network_list
-
- metadata = {}
- files = {}
- config_drive = False
- # Add all metadata related fields
- if vduinfo.has_field('node_id'):
- metadata['node_id'] = vduinfo.node_id
- if pci_assignement is not None:
- metadata['pci_assignement'] = pci_assignement
- if vduinfo.has_field('supplemental_boot_data'):
- if vduinfo.supplemental_boot_data.has_field('custom_meta_data'):
- for custom_meta_item in vduinfo.supplemental_boot_data.custom_meta_data:
- if custom_meta_item.data_type == "STRING":
- metadata[custom_meta_item.name] = custom_meta_item.value
- elif custom_meta_item.data_type == "JSON":
- metadata[custom_meta_item.name] = tornado.escape.json_decode(custom_meta_item.value)
- else:
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Unsupported data-type {} for custom-meta-data name {} ".format(custom_meta_item.data_type, custom_meta_item.name))
- if vduinfo.supplemental_boot_data.has_field('config_file'):
- for custom_config_file in vduinfo.supplemental_boot_data.config_file:
- files[custom_config_file.dest] = custom_config_file.source
-
- if vduinfo.supplemental_boot_data.has_field('boot_data_drive'):
- if vduinfo.supplemental_boot_data.boot_data_drive is True:
- config_drive = True
-
- kwargs['metadata'] = metadata
- kwargs['files'] = files
- kwargs['config_drive'] = config_drive
-
- if vduinfo.has_field('availability_zone') and vduinfo.availability_zone.has_field('name'):
- kwargs['availability_zone'] = vduinfo.availability_zone
- else:
- kwargs['availability_zone'] = None
-
- if server_group is not None:
- kwargs['scheduler_hints'] = {'group': server_group}
- else:
- kwargs['scheduler_hints'] = None
-
- kwargs['block_device_mapping_v2'] = None
- vol_metadata = False
- if vduinfo.has_field('volumes') :
- kwargs['block_device_mapping_v2'] = []
- with self._use_driver(account) as drv:
- # Only support image->volume
- for volume in vduinfo.volumes:
- block_map = dict()
- block_map['boot_index'] = volume.boot_priority
- if "image" in volume:
- # Support image->volume
- # Match retrived image info with volume based image name and checksum
- if volume.image is not None:
- matching_images = [img for img in imageinfo_list if img['name'] == volume.image]
- if volume.image_checksum is not None:
- matching_images = [img for img in matching_images if img['checksum'] == volume.image_checksum]
- img_id = matching_images[0]['id']
- if img_id is None:
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Volume image not found for name {} checksum {}".format(volume.name, volume.checksum))
- block_map['uuid'] = img_id
- block_map['source_type'] = "image"
- else:
- block_map['source_type'] = "blank"
-
- block_map['device_name'] = volume.name
- block_map['destination_type'] = "volume"
- block_map['volume_size'] = volume.size
- block_map['delete_on_termination'] = True
- if volume.has_field('device_type') and volume.device_type == 'cdrom':
- block_map['device_type'] = 'cdrom'
- if volume.has_field('device_bus') and volume.device_bus == 'ide':
- block_map['disk_bus'] = 'ide'
- kwargs['block_device_mapping_v2'].append(block_map)
-
-
- with self._use_driver(account) as drv:
- vm_id = drv.nova_server_create(**kwargs)
- if floating_ip:
- self.prepare_vdu_on_boot(account, vm_id, floating_ip, pool_name, vduinfo.volumes)
+ except Exception as e:
+ self.log.exception("Exception %s occured during virtual-link-list-get", str(e))
+ raise
+ return vnf_resources
- return vm_id
- def get_openstack_image_info(self, account, image_name, image_checksum=None):
- self.log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s",
- image_name, image_checksum, account.name
- )
-
- image_list = []
- with self._use_driver(account) as drv:
- image_list = drv.glance_image_list()
- matching_images = [img for img in image_list if img['name'] == image_name]
-
- # If the image checksum was filled in then further filter the images by the checksum
- if image_checksum is not None:
- matching_images = [img for img in matching_images if img['checksum'] == image_checksum]
- else:
- self.log.warning("Image checksum not provided. Lookup using image name (%s) only.",
- image_name)
-
- if len(matching_images) == 0:
- raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format(
- image_name, image_checksum, account.name
- ))
-
- elif len(matching_images) > 1:
- unique_checksums = {i.checksum for i in matching_images}
- if len(unique_checksums) > 1:
- msg = ("Too many images with different checksums matched "
- "image name of %s for cloud account: %s" % (image_name, account.name))
- raise ResMgrCALOperationFailure(msg)
-
- return matching_images[0]
@rwcalstatus(ret_on_failure=[""])
def do_create_vdu(self, account, vdu_init):
Returns:
The vdu_id
"""
- ### First create required number of ports aka connection points
- # Add the mgmt_ntwk by default.
- mgmt_network_id = None
- with self._use_driver(account) as drv:
- mgmt_network_id = drv._mgmt_network_id
-
- port_list = []
- network_list = []
- imageinfo_list = []
- is_explicit_mgmt_defined = False
- for c_point in vdu_init.connection_points:
- # if the user has specified explicit mgmt_network connection point
- # then remove the mgmt_network from the VM list
- if c_point.virtual_link_id == mgmt_network_id:
- is_explicit_mgmt_defined = True
- if c_point.virtual_link_id in network_list:
- assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
- else:
- network_list.append(c_point.virtual_link_id)
- port_id = self._create_connection_point(account, c_point)
- port_list.append(port_id)
-
- if not vdu_init.has_field('flavor_id'):
- vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
-
- ### Obtain all images for volumes and perform validations
- if vdu_init.has_field('volumes'):
- for volume in vdu_init.volumes:
- if "image" in volume:
- image_checksum = volume.image_checksum if volume.has_field("image_checksum") else None
- image_info = self.get_openstack_image_info(account, volume.image, image_checksum)
- imageinfo_list.append(image_info)
- elif vdu_init.has_field('image_id'):
- with self._use_driver(account) as drv:
- image_info = drv.glance_image_get(vdu_init.image_id)
- imageinfo_list.append(image_info)
-
- if not imageinfo_list:
- err_str = ("VDU has no image information")
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-
- ### Check VDU Virtual Interface type and make sure VM with property exists
- if vdu_init.connection_points:
- ### All virtual interfaces need to be of the same type for Openstack Accounts
- if not (all(cp.type_yang == 'E1000' for cp in vdu_init.connection_points) or all(cp.type_yang != 'E1000' for cp in vdu_init.connection_points)):
- ### We have a mix of E1000 & VIRTIO/SR_IPOV virtual interface types in the VDU, abort instantiation.
- assert False, "Only one type of Virtual Intefaces supported for Openstack accounts. Found a mix of VIRTIO/SR_IOV & E1000."
-
- ## It is not clear if all the images need to checked for HW properties. In the absence of model info describing each im age's properties,
- ### we shall assume that all images need to have similar properties
- for img_info in imageinfo_list:
-
- virt_intf_type = vdu_init.connection_points[0].type_yang
- if virt_intf_type == 'E1000':
- if 'hw_vif_model' in img_info and img_info.hw_vif_model == 'e1000':
- self.log.debug("VDU has Virtual Interface E1000, found matching image with property hw_vif_model=e1000")
- else:
- err_str = ("VDU has Virtual Interface E1000, but image '%s' does not have property hw_vif_model=e1000" % img_info.name)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- elif virt_intf_type == 'VIRTIO' or virt_intf_type == 'SR_IOV':
- if 'hw_vif_model' in img_info:
- err_str = ("VDU has Virtual Interface %s, but image '%s' has hw_vif_model mismatch" % virt_intf_type,img_info.name)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- else:
- self.log.debug("VDU has Virtual Interface %s, found matching image" % virt_intf_type)
- else:
- err_str = ("VDU Virtual Interface '%s' not supported yet" % virt_intf_type)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
+ drv = self._use_driver(account)
+ try:
+ kwargs = drv.utils.compute.make_vdu_create_args(vdu_init, account)
+ vm_id = drv.nova_server_create(**kwargs)
+ self.prepare_vdu_on_boot(account, vm_id, vdu_init)
+ except Exception as e:
+ self.log.exception("Exception %s occured during create-vdu", str(e))
+ raise
+ return vm_id
+
- with self._use_driver(account) as drv:
- ### Now Create VM
- vm_network_list = []
- if not is_explicit_mgmt_defined:
- vm_network_list.append(drv._mgmt_network_id)
-
- if vdu_init.has_field('volumes'):
- # Only combination supported: Image->Volume
- for volume in vdu_init.volumes:
- if "volume" in volume:
- err_str = ("VDU Volume source not supported yet")
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- if not volume.has_field('device_type'):
- err_str = ("VDU Volume destination type not defined")
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
- if volume.device_type not in ['disk', 'cdrom'] :
- err_str = ("VDU Volume destination type '%s' not supported" % volume.device_type)
- self.log.error(err_str)
- raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-
-
- server_group = None
- if vdu_init.has_field('server_group'):
- ### Get list of server group in openstack for name->id mapping
- openstack_group_list = drv.nova_server_group_list()
- group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
- if len(group_id) != 1:
- raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
- server_group = group_id[0]
-
- pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
- if pci_assignement != '':
- vm.user_tags.pci_assignement = pci_assignement
-
- vm_id = self._create_vm(account, vdu_init, pci_assignement=pci_assignement, server_group=server_group, port_list=port_list, network_list=vm_network_list, imageinfo_list = imageinfo_list)
- return vm_id
-
- def prepare_vpci_metadata(self, drv, vdu_init):
- pci_assignement = ''
- ### TEF specific metadata creation for
- virtio_vpci = []
- sriov_vpci = []
- virtio_meta = ''
- sriov_meta = ''
- ### For MGMT interface
- if vdu_init.has_field('mgmt_vpci'):
- xx = 'u\''+ drv._mgmt_network_id + '\' :[[u\'' + vdu_init.mgmt_vpci + '\', ' + '\'\']]'
- virtio_vpci.append(xx)
-
- for c_point in vdu_init.connection_points:
- if c_point.has_field('vpci'):
- if c_point.has_field('vpci') and c_point.type_yang == 'VIRTIO':
- xx = 'u\''+c_point.virtual_link_id + '\' :[[u\'' + c_point.vpci + '\', ' + '\'\']]'
- virtio_vpci.append(xx)
- elif c_point.has_field('vpci') and c_point.type_yang == 'SR_IOV':
- xx = '[u\'' + c_point.vpci + '\', ' + '\'\']'
- sriov_vpci.append(xx)
-
- if virtio_vpci:
- virtio_meta += ','.join(virtio_vpci)
-
- if sriov_vpci:
- sriov_meta = 'u\'VF\': ['
- sriov_meta += ','.join(sriov_vpci)
- sriov_meta += ']'
-
- if virtio_meta != '':
- pci_assignement += virtio_meta
- pci_assignement += ','
-
- if sriov_meta != '':
- pci_assignement += sriov_meta
-
- if pci_assignement != '':
- pci_assignement = '{' + pci_assignement + '}'
-
- return pci_assignement
-
-
-
- def prepare_vdu_on_boot(self, account, server_id, floating_ip, pool_name, volumes=None):
+ def prepare_vdu_on_boot(self, account, server_id, vdu_params):
cmd = PREPARE_VM_CMD.format(auth_url = account.openstack.auth_url,
username = account.openstack.key,
password = account.openstack.secret,
project_domain = account.openstack.project_domain,
mgmt_network = account.openstack.mgmt_network,
server_id = server_id)
- if floating_ip:
+ vol_list = list()
+
+ if vdu_params.has_field('allocate_public_address') and vdu_params.allocate_public_address:
cmd += " --floating_ip"
- if pool_name:
- cmd += (" --pool_name " + pool_name)
-
- vol_metadata = False
- if volumes is not None:
- for volume in volumes:
- if volume.has_field('custom_meta_data'):
- vol_metadata = True
- break
+ if account.openstack.has_field('floating_ip_pool'):
+ cmd += (" --pool_name " + account.openstack.floating_ip_pool)
- if vol_metadata is True:
- tmp_file = None
- with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
- vol_list = list()
- for volume in volumes:
- vol_dict = volume.as_dict()
- vol_list.append(vol_dict)
-
- yaml.dump(vol_list, tmp_file)
- cmd += (" --vol_metadata {}").format(tmp_file.name)
+ if vdu_params.has_field('volumes'):
+ for volume in vdu_params.volumes:
+ if volume.has_field('custom_meta_data'):
+ vol_list.append(volume.as_dict())
+ if vol_list:
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
+ yaml.dump(vol_list, tmp_file)
+ cmd += (" --vol_metadata {}").format(tmp_file.name)
+
exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
exec_cmd = exec_path+'/'+cmd
self.log.info("Running command: %s" %(exec_cmd))
account - a cloud account
vdu_modify - Information about VDU Modification (RwcalYang.VDUModifyParams)
"""
+ drv = self._use_driver(account)
### First create required number of ports aka connection points
port_list = []
network_list = []
port_id = self._create_connection_point(account, c_point)
port_list.append(port_id)
+ drv = self._use_driver(account)
### Now add the ports to VM
for port_id in port_list:
- with self._use_driver(account) as drv:
- drv.nova_server_add_port(vdu_modify.vdu_id, port_id)
+ drv.nova_server_add_port(vdu_modify.vdu_id, port_id)
### Delete the requested connection_points
for c_point in vdu_modify.connection_points_remove:
self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
if vdu_modify.has_field('image_id'):
- with self._use_driver(account) as drv:
- drv.nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
+ drv.nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
@rwstatus
Returns:
None
"""
- if not vdu_id:
- self.log.error("empty vdu_id during the vdu deletion")
- return
-
- with self._use_driver(account) as drv:
- ### Get list of floating_ips associated with this instance and delete them
- floating_ips = [ f for f in drv.nova_floating_ip_list() if f.instance_id == vdu_id ]
- for f in floating_ips:
- drv.nova_drv.floating_ip_delete(f)
-
- ### Get list of port on VM and delete them.
- port_list = drv.neutron_port_list(**{'device_id': vdu_id})
-
- for port in port_list:
- if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
- self.do_delete_port(account, port['id'], no_rwstatus=True)
-
- self.do_delete_vm(account, vdu_id, no_rwstatus=True)
-
+ drv = self._use_driver(account)
+ try:
+ drv.utils.compute.perform_vdu_network_cleanup(vdu_id)
+ drv.nova_server_delete(vdu_id)
+ except Exception as e:
+ self.log.exception("Exception %s occured during delete-vdu", str(e))
+ raise
+
@rwstatus(ret_on_failure=[None])
def do_get_vdu(self, account, vdu_id):
Returns:
Object of type RwcalYang.VDUInfoParams
"""
- with self._use_driver(account) as drv:
- port_list = drv.neutron_port_list(**{'device_id': vdu_id})
-
- vm = drv.nova_server_get(vdu_id)
-
- flavor_info = None
- if ('flavor' in vm) and ('id' in vm['flavor']):
- try:
- flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
- except Exception as e:
- self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
-
- openstack_group_list = drv.nova_server_group_list()
- server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
- openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
- vdu_info = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
- flavor_info,
- account.openstack.mgmt_network,
- port_list,
- server_group,
- volume_list = openstack_srv_volume_list)
- if vdu_info.state == 'active':
- try:
- console_info = drv.nova_server_console(vdu_info.vdu_id)
- except Exception as e:
- pass
- else:
- vdu_info.console_url = console_info['console']['url']
- pass
-
- return vdu_info
+ drv = self._use_driver(account)
+ try:
+ vm_info = drv.nova_server_get(vdu_id)
+ vdu_info = drv.utils.compute.parse_cloud_vdu_info(vm_info)
+ except Exception as e:
+ self.log.exception("Exception %s occured during get-vdu", str(e))
+ raise
+
+ return vdu_info
@rwstatus(ret_on_failure=[None])
A list of objects of type RwcalYang.VDUInfoParams
"""
vnf_resources = RwcalYang.VNFResources()
- with self._use_driver(account) as drv:
+ drv = self._use_driver(account)
+ try:
vms = drv.nova_server_list()
for vm in vms:
- port_list = drv.neutron_port_list(**{'device_id': vm['id']})
+ vdu = drv.utils.compute.parse_cloud_vdu_info(vm)
+ vnf_resources.vdu_info_list.append(vdu)
+ except Exception as e:
+ self.log.exception("Exception %s occured during get-vdu-list", str(e))
+ raise
+ return vnf_resources
+
+
+class SdnOpenstackPlugin(GObject.Object, RwSdn.Topology):
+ instance_num = 1
+ def __init__(self):
+ GObject.Object.__init__(self)
+ self._driver_class = openstack_drv.OpenstackDriver
+ self.log = logging.getLogger('rwsdn.openstack.%s' % SdnOpenstackPlugin.instance_num)
+ self.log.setLevel(logging.DEBUG)
+
+ self._rwlog_handler = None
+ SdnOpenstackPlugin.instance_num += 1
+
+ @contextlib.contextmanager
+ def _use_driver(self, account):
+ if self._rwlog_handler is None:
+ raise UninitializedPluginError("Must call init() in CAL plugin before use.")
+
+ with rwlogger.rwlog_root_handler(self._rwlog_handler):
+ try:
+ drv = self._driver_class(username = account.openstack.key,
+ password = account.openstack.secret,
+ auth_url = account.openstack.auth_url,
+ tenant_name = account.openstack.tenant,
+ mgmt_network = account.openstack.mgmt_network,
+ cert_validate = account.openstack.cert_validate )
+ except Exception as e:
+ self.log.error("SdnOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
+ raise
- flavor_info = None
+ yield drv
+
+ @rwstatus
+ def do_init(self, rwlog_ctx):
+ self._rwlog_handler = rwlogger.RwLogger(
+ category="rw-cal-log",
+ subcategory="openstack",
+ log_hdl=rwlog_ctx,
+ )
+ self.log.addHandler(self._rwlog_handler)
+ self.log.propagate = False
+
+ @rwstatus(ret_on_failure=[None])
+ def do_validate_sdn_creds(self, account):
+ """
+ Validates the sdn account credentials for the specified account.
+ Performs an access to the resources using Keystone API. If creds
+ are not valid, returns an error code & reason string
- if ('flavor' in vm) and ('id' in vm['flavor']):
- try:
- flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
- except Exception as e:
- self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+ @param account - a SDN account
+ Returns:
+ Validation Code and Details String
+ """
+ status = RwsdnYang.SdnConnectionStatus()
+ try:
+ with self._use_driver(account) as drv:
+ drv.validate_account_creds()
+
+ except openstack_drv.ValidationError as e:
+ self.log.error("SdnOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
+ status.status = "failure"
+ status.details = "Invalid Credentials: %s" % str(e)
+
+ except Exception as e:
+ msg = "SdnOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+ self.log.error(msg)
+ status.status = "failure"
+ status.details = msg
+
+ else:
+ status.status = "success"
+ status.details = "Connection was successful"
+
+ return status
+
+ @rwstatus(ret_on_failure=[""])
+ def do_create_vnffg_chain(self, account,vnffg):
+ """
+ Creates Service Function chain in ODL
+
+ @param account - a SDN account
+
+ """
+ self.log.debug('Received Create VNFFG chain for account {}, chain {}'.format(account,vnffg))
+ with self._use_driver(account) as drv:
+ port_list = list()
+ vnf_chain_list = sorted(vnffg.vnf_chain_path, key = lambda x: x.order)
+ prev_vm_id = None
+ for path in vnf_chain_list:
+ if prev_vm_id and path.vnfr_ids[0].vdu_list[0].vm_id == prev_vm_id:
+ prev_entry = port_list.pop()
+ port_list.append((prev_entry[0],path.vnfr_ids[0].vdu_list[0].port_id))
+ prev_vm_id = None
else:
- flavor_info = None
-
- openstack_group_list = drv.nova_server_group_list()
- server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
-
- openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
- vdu = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
- flavor_info,
- account.openstack.mgmt_network,
- port_list,
- server_group,
- volume_list = openstack_srv_volume_list)
- if vdu.state == 'active':
- try:
- console_info = drv.nova_server_console(vdu.vdu_id)
- except Exception as e:
- pass
- else:
- vdu.console_url = console_info['console']['url']
- pass
- vnf_resources.vdu_info_list.append(vdu)
- return vnf_resources
+ prev_vm_id = path.vnfr_ids[0].vdu_list[0].vm_id
+ port_list.append((path.vnfr_ids[0].vdu_list[0].port_id,path.vnfr_ids[0].vdu_list[0].port_id))
+ vnffg_id = drv.create_port_chain(vnffg.name,port_list)
+ return vnffg_id
+
+ @rwstatus
+ def do_terminate_vnffg_chain(self, account,vnffg_id):
+ """
+ Terminate Service Function chain in ODL
+
+ @param account - a SDN account
+ """
+ self.log.debug('Received terminate VNFFG chain for id %s ', vnffg_id)
+ with self._use_driver(account) as drv:
+ drv.delete_port_chain(vnffg_id)
+
+ @rwstatus(ret_on_failure=[None])
+ def do_create_vnffg_classifier(self, account, vnffg_classifier):
+ """
+ Add VNFFG Classifier
+
+ @param account - a SDN account
+ """
+ self.log.debug('Received Create VNFFG classifier for account {}, classifier {}'.format(account,vnffg_classifier))
+ protocol_map = {1:'ICMP',6:'TCP',17:'UDP'}
+ flow_classifier_list = list()
+ with self._use_driver(account) as drv:
+ for rule in vnffg_classifier.match_attributes:
+ classifier_name = vnffg_classifier.name + '_' + rule.name
+ flow_dict = {}
+ for field, value in rule.as_dict().items():
+ if field == 'ip_proto':
+ flow_dict['protocol'] = protocol_map.get(value,None)
+ elif field == 'source_ip_address':
+ flow_dict['source_ip_prefix'] = value
+ elif field == 'destination_ip_address':
+ flow_dict['destination_ip_prefix'] = value
+ elif field == 'source_port':
+ flow_dict['source_port_range_min'] = value
+ flow_dict['source_port_range_max'] = value
+ elif field == 'destination_port':
+ flow_dict['destination_port_range_min'] = value
+ flow_dict['destination_port_range_max'] = value
+ if vnffg_classifier.has_field('port_id'):
+ flow_dict['logical_source_port'] = vnffg_classifier.port_id
+ flow_classifier_id = drv.create_flow_classifer(classifier_name, flow_dict)
+ flow_classifier_list.append(flow_classifier_id)
+ drv.update_port_chain(vnffg_classifier.rsp_id,flow_classifier_list)
+ return flow_classifier_list
+
+ @rwstatus(ret_on_failure=[None])
+ def do_terminate_vnffg_classifier(self, account, vnffg_classifier_list):
+ """
+ Add VNFFG Classifier
+
+ @param account - a SDN account
+ """
+ self.log.debug('Received terminate VNFFG classifier for id %s ', vnffg_classifier_list)
+ with self._use_driver(account) as drv:
+ for classifier_id in vnffg_classifier_list:
+ drv.delete_flow_classifier(classifier_id)
+
+ @rwstatus(ret_on_failure=[None])
+ def do_get_vnffg_rendered_paths(self, account):
+ """
+ Get ODL Rendered Service Path List (SFC)
+
+ @param account - a SDN account
+ """
+ self.log.debug('Received get VNFFG rendered path for account %s ', account)
+ vnffg_rsps = RwsdnYang.VNFFGRenderedPaths()
+ with self._use_driver(account) as drv:
+ port_chain_list = drv.get_port_chain_list()
+ for port_chain in port_chain_list:
+ #rsp = vnffg_rsps.vnffg_rendered_path.add()
+ #rsp.name = port_chain['name']
+ pass
+ return vnffg_rsps