1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
42 from cinderclient
import client
as cClient
43 from glanceclient
import client
as glClient
44 import glanceclient
.exc
as gl1Exceptions
45 from keystoneauth1
import session
46 from keystoneauth1
.identity
import v2
, v3
47 import keystoneclient
.exceptions
as ksExceptions
48 import keystoneclient
.v2_0
.client
as ksClient_v2
49 import keystoneclient
.v3
.client
as ksClient_v3
51 from neutronclient
.common
import exceptions
as neExceptions
52 from neutronclient
.neutron
import client
as neClient
53 from novaclient
import client
as nClient
, exceptions
as nvExceptions
54 from osm_ro_plugin
import vimconn
55 from requests
.exceptions
import ConnectionError
58 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
59 __date__
= "$22-sep-2017 23:59:59$"
61 """contain the openstack virtual machine status to openmano status"""
62 vmStatus2manoFormat
= {
65 "SUSPENDED": "SUSPENDED",
66 "SHUTOFF": "INACTIVE",
71 netStatus2manoFormat
= {
74 "INACTIVE": "INACTIVE",
80 supportedClassificationTypes
= ["legacy_flow_classifier"]
82 # global var to have a timeout creating and deleting volumes
87 class SafeDumper(yaml
.SafeDumper
):
88 def represent_data(self
, data
):
89 # Openstack APIs use custom subclasses of dict and YAML safe dumper
90 # is designed to not handle that (reference issue 142 of pyyaml)
91 if isinstance(data
, dict) and data
.__class
__ != dict:
92 # A simple solution is to convert those items back to dicts
93 data
= dict(data
.items())
95 return super(SafeDumper
, self
).represent_data(data
)
98 class vimconnector(vimconn
.VimConnector
):
113 """using common constructor parameters. In this case
114 'url' is the keystone authorization url,
115 'url_admin' is not use
117 api_version
= config
.get("APIversion")
119 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
120 raise vimconn
.VimConnException(
121 "Invalid value '{}' for config:APIversion. "
122 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
125 vim_type
= config
.get("vim_type")
127 if vim_type
and vim_type
not in ("vio", "VIO"):
128 raise vimconn
.VimConnException(
129 "Invalid value '{}' for config:vim_type."
130 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
133 if config
.get("dataplane_net_vlan_range") is not None:
134 # validate vlan ranges provided by user
135 self
._validate
_vlan
_ranges
(
136 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
139 if config
.get("multisegment_vlan_range") is not None:
140 # validate vlan ranges provided by user
141 self
._validate
_vlan
_ranges
(
142 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
145 vimconn
.VimConnector
.__init
__(
159 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
160 raise vimconn
.VimConnException(
161 "options insecure and ca_cert are mutually exclusive"
166 if self
.config
.get("insecure"):
169 if self
.config
.get("ca_cert"):
170 self
.verify
= self
.config
.get("ca_cert")
173 raise TypeError("url param can not be NoneType")
175 self
.persistent_info
= persistent_info
176 self
.availability_zone
= persistent_info
.get("availability_zone", None)
177 self
.session
= persistent_info
.get("session", {"reload_client": True})
178 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
179 self
.nova
= self
.session
.get("nova")
180 self
.neutron
= self
.session
.get("neutron")
181 self
.cinder
= self
.session
.get("cinder")
182 self
.glance
= self
.session
.get("glance")
183 # self.glancev1 = self.session.get("glancev1")
184 self
.keystone
= self
.session
.get("keystone")
185 self
.api_version3
= self
.session
.get("api_version3")
186 self
.vim_type
= self
.config
.get("vim_type")
189 self
.vim_type
= self
.vim_type
.upper()
191 if self
.config
.get("use_internal_endpoint"):
192 self
.endpoint_type
= "internalURL"
194 self
.endpoint_type
= None
196 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
197 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
198 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
199 self
.logger
= logging
.getLogger("ro.vim.openstack")
201 # allow security_groups to be a list or a single string
202 if isinstance(self
.config
.get("security_groups"), str):
203 self
.config
["security_groups"] = [self
.config
["security_groups"]]
205 self
.security_groups_id
= None
207 # ###### VIO Specific Changes #########
208 if self
.vim_type
== "VIO":
209 self
.logger
= logging
.getLogger("ro.vim.vio")
212 self
.logger
.setLevel(getattr(logging
, log_level
))
214 def __getitem__(self
, index
):
215 """Get individuals parameters.
217 if index
== "project_domain_id":
218 return self
.config
.get("project_domain_id")
219 elif index
== "user_domain_id":
220 return self
.config
.get("user_domain_id")
222 return vimconn
.VimConnector
.__getitem
__(self
, index
)
224 def __setitem__(self
, index
, value
):
225 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 if index
== "project_domain_id":
228 self
.config
["project_domain_id"] = value
229 elif index
== "user_domain_id":
230 self
.config
["user_domain_id"] = value
232 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
234 self
.session
["reload_client"] = True
236 def serialize(self
, value
):
237 """Serialization of python basic types.
239 In the case value is not serializable a message will be logged and a
240 simple representation of the data that cannot be converted back to
243 if isinstance(value
, str):
248 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
250 except yaml
.representer
.RepresenterError
:
252 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
259 def _reload_connection(self
):
260 """Called before any operation, it check if credentials has changed
261 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
264 if self
.session
["reload_client"]:
265 if self
.config
.get("APIversion"):
266 self
.api_version3
= (
267 self
.config
["APIversion"] == "v3.3"
268 or self
.config
["APIversion"] == "3"
270 else: # get from ending auth_url that end with v3 or with v2.0
271 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
275 self
.session
["api_version3"] = self
.api_version3
277 if self
.api_version3
:
278 if self
.config
.get("project_domain_id") or self
.config
.get(
279 "project_domain_name"
281 project_domain_id_default
= None
283 project_domain_id_default
= "default"
285 if self
.config
.get("user_domain_id") or self
.config
.get(
288 user_domain_id_default
= None
290 user_domain_id_default
= "default"
294 password
=self
.passwd
,
295 project_name
=self
.tenant_name
,
296 project_id
=self
.tenant_id
,
297 project_domain_id
=self
.config
.get(
298 "project_domain_id", project_domain_id_default
300 user_domain_id
=self
.config
.get(
301 "user_domain_id", user_domain_id_default
303 project_domain_name
=self
.config
.get("project_domain_name"),
304 user_domain_name
=self
.config
.get("user_domain_name"),
310 password
=self
.passwd
,
311 tenant_name
=self
.tenant_name
,
312 tenant_id
=self
.tenant_id
,
315 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
316 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
317 # Titanium cloud and StarlingX
318 region_name
= self
.config
.get("region_name")
320 if self
.api_version3
:
321 self
.keystone
= ksClient_v3
.Client(
323 endpoint_type
=self
.endpoint_type
,
324 region_name
=region_name
,
327 self
.keystone
= ksClient_v2
.Client(
328 session
=sess
, endpoint_type
=self
.endpoint_type
331 self
.session
["keystone"] = self
.keystone
332 # In order to enable microversion functionality an explicit microversion must be specified in "config".
333 # This implementation approach is due to the warning message in
334 # https://developer.openstack.org/api-guide/compute/microversions.html
335 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
336 # always require an specific microversion.
337 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
338 version
= self
.config
.get("microversion")
343 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
344 # Titanium cloud and StarlingX
345 self
.nova
= self
.session
["nova"] = nClient
.Client(
348 endpoint_type
=self
.endpoint_type
,
349 region_name
=region_name
,
351 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
354 endpoint_type
=self
.endpoint_type
,
355 region_name
=region_name
,
357 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
360 endpoint_type
=self
.endpoint_type
,
361 region_name
=region_name
,
365 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
367 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
369 if self
.endpoint_type
== "internalURL":
370 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
371 glance_endpoint
= self
.keystone
.endpoints
.list(
372 glance_service_id
, interface
="internal"
375 glance_endpoint
= None
377 self
.glance
= self
.session
["glance"] = glClient
.Client(
378 2, session
=sess
, endpoint
=glance_endpoint
380 # using version 1 of glance client in new_image()
381 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
382 # endpoint=glance_endpoint)
383 self
.session
["reload_client"] = False
384 self
.persistent_info
["session"] = self
.session
385 # add availablity zone info inside self.persistent_info
386 self
._set
_availablity
_zones
()
387 self
.persistent_info
["availability_zone"] = self
.availability_zone
388 # force to get again security_groups_ids next time they are needed
389 self
.security_groups_id
= None
391 def __net_os2mano(self
, net_list_dict
):
392 """Transform the net openstack format to mano format
393 net_list_dict can be a list of dict or a single dict"""
394 if type(net_list_dict
) is dict:
395 net_list_
= (net_list_dict
,)
396 elif type(net_list_dict
) is list:
397 net_list_
= net_list_dict
399 raise TypeError("param net_list_dict must be a list or a dictionary")
400 for net
in net_list_
:
401 if net
.get("provider:network_type") == "vlan":
404 net
["type"] = "bridge"
406 def __classification_os2mano(self
, class_list_dict
):
407 """Transform the openstack format (Flow Classifier) to mano format
408 (Classification) class_list_dict can be a list of dict or a single dict
410 if isinstance(class_list_dict
, dict):
411 class_list_
= [class_list_dict
]
412 elif isinstance(class_list_dict
, list):
413 class_list_
= class_list_dict
415 raise TypeError("param class_list_dict must be a list or a dictionary")
416 for classification
in class_list_
:
417 id = classification
.pop("id")
418 name
= classification
.pop("name")
419 description
= classification
.pop("description")
420 project_id
= classification
.pop("project_id")
421 tenant_id
= classification
.pop("tenant_id")
422 original_classification
= copy
.deepcopy(classification
)
423 classification
.clear()
424 classification
["ctype"] = "legacy_flow_classifier"
425 classification
["definition"] = original_classification
426 classification
["id"] = id
427 classification
["name"] = name
428 classification
["description"] = description
429 classification
["project_id"] = project_id
430 classification
["tenant_id"] = tenant_id
432 def __sfi_os2mano(self
, sfi_list_dict
):
433 """Transform the openstack format (Port Pair) to mano format (SFI)
434 sfi_list_dict can be a list of dict or a single dict
436 if isinstance(sfi_list_dict
, dict):
437 sfi_list_
= [sfi_list_dict
]
438 elif isinstance(sfi_list_dict
, list):
439 sfi_list_
= sfi_list_dict
441 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443 for sfi
in sfi_list_
:
444 sfi
["ingress_ports"] = []
445 sfi
["egress_ports"] = []
447 if sfi
.get("ingress"):
448 sfi
["ingress_ports"].append(sfi
["ingress"])
450 if sfi
.get("egress"):
451 sfi
["egress_ports"].append(sfi
["egress"])
455 params
= sfi
.get("service_function_parameters")
459 correlation
= params
.get("correlation")
464 sfi
["sfc_encap"] = sfc_encap
465 del sfi
["service_function_parameters"]
467 def __sf_os2mano(self
, sf_list_dict
):
468 """Transform the openstack format (Port Pair Group) to mano format (SF)
469 sf_list_dict can be a list of dict or a single dict
471 if isinstance(sf_list_dict
, dict):
472 sf_list_
= [sf_list_dict
]
473 elif isinstance(sf_list_dict
, list):
474 sf_list_
= sf_list_dict
476 raise TypeError("param sf_list_dict must be a list or a dictionary")
479 del sf
["port_pair_group_parameters"]
480 sf
["sfis"] = sf
["port_pairs"]
483 def __sfp_os2mano(self
, sfp_list_dict
):
484 """Transform the openstack format (Port Chain) to mano format (SFP)
485 sfp_list_dict can be a list of dict or a single dict
487 if isinstance(sfp_list_dict
, dict):
488 sfp_list_
= [sfp_list_dict
]
489 elif isinstance(sfp_list_dict
, list):
490 sfp_list_
= sfp_list_dict
492 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494 for sfp
in sfp_list_
:
495 params
= sfp
.pop("chain_parameters")
499 correlation
= params
.get("correlation")
504 sfp
["sfc_encap"] = sfc_encap
505 sfp
["spi"] = sfp
.pop("chain_id")
506 sfp
["classifications"] = sfp
.pop("flow_classifiers")
507 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
509 # placeholder for now; read TODO note below
510 def _validate_classification(self
, type, definition
):
511 # only legacy_flow_classifier Type is supported at this point
513 # TODO(igordcard): this method should be an abstract method of an
514 # abstract Classification class to be implemented by the specific
515 # Types. Also, abstract vimconnector should call the validation
516 # method before the implemented VIM connectors are called.
518 def _format_exception(self
, exception
):
519 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
520 message_error
= str(exception
)
526 neExceptions
.NetworkNotFoundClient
,
527 nvExceptions
.NotFound
,
528 ksExceptions
.NotFound
,
529 gl1Exceptions
.HTTPNotFound
,
532 raise vimconn
.VimConnNotFoundException(
533 type(exception
).__name
__ + ": " + message_error
539 gl1Exceptions
.HTTPException
,
540 gl1Exceptions
.CommunicationError
,
542 ksExceptions
.ConnectionError
,
543 neExceptions
.ConnectionFailed
,
546 if type(exception
).__name
__ == "SSLError":
547 tip
= " (maybe option 'insecure' must be added to the VIM)"
549 raise vimconn
.VimConnConnectionException(
550 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
556 nvExceptions
.BadRequest
,
557 ksExceptions
.BadRequest
,
560 raise vimconn
.VimConnException(
561 type(exception
).__name
__ + ": " + message_error
566 nvExceptions
.ClientException
,
567 ksExceptions
.ClientException
,
568 neExceptions
.NeutronException
,
571 raise vimconn
.VimConnUnexpectedResponse(
572 type(exception
).__name
__ + ": " + message_error
574 elif isinstance(exception
, nvExceptions
.Conflict
):
575 raise vimconn
.VimConnConflictException(
576 type(exception
).__name
__ + ": " + message_error
578 elif isinstance(exception
, vimconn
.VimConnException
):
581 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
583 raise vimconn
.VimConnConnectionException(
584 type(exception
).__name
__ + ": " + message_error
587 def _get_ids_from_name(self
):
589 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
592 # get tenant_id if only tenant_name is supplied
593 self
._reload
_connection
()
595 if not self
.my_tenant_id
:
596 raise vimconn
.VimConnConnectionException(
597 "Error getting tenant information from name={} id={}".format(
598 self
.tenant_name
, self
.tenant_id
602 if self
.config
.get("security_groups") and not self
.security_groups_id
:
603 # convert from name to id
604 neutron_sg_list
= self
.neutron
.list_security_groups(
605 tenant_id
=self
.my_tenant_id
608 self
.security_groups_id
= []
609 for sg
in self
.config
.get("security_groups"):
610 for neutron_sg
in neutron_sg_list
:
611 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
612 self
.security_groups_id
.append(neutron_sg
["id"])
615 self
.security_groups_id
= None
617 raise vimconn
.VimConnConnectionException(
618 "Not found security group {} for this tenant".format(sg
)
621 def check_vim_connectivity(self
):
622 # just get network list to check connectivity and credentials
623 self
.get_network_list(filter_dict
={})
625 def get_tenant_list(self
, filter_dict
={}):
626 """Obtain tenants of VIM
627 filter_dict can contain the following keys:
628 name: filter by tenant name
629 id: filter by tenant uuid/id
631 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
636 self
._reload
_connection
()
638 if self
.api_version3
:
639 project_class_list
= self
.keystone
.projects
.list(
640 name
=filter_dict
.get("name")
643 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
647 for project
in project_class_list
:
648 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
651 project_list
.append(project
.to_dict())
655 ksExceptions
.ConnectionError
,
656 ksExceptions
.ClientException
,
659 self
._format
_exception
(e
)
661 def new_tenant(self
, tenant_name
, tenant_description
):
662 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
663 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
666 self
._reload
_connection
()
668 if self
.api_version3
:
669 project
= self
.keystone
.projects
.create(
671 self
.config
.get("project_domain_id", "default"),
672 description
=tenant_description
,
676 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
680 ksExceptions
.ConnectionError
,
681 ksExceptions
.ClientException
,
682 ksExceptions
.BadRequest
,
685 self
._format
_exception
(e
)
687 def delete_tenant(self
, tenant_id
):
688 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
689 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
692 self
._reload
_connection
()
694 if self
.api_version3
:
695 self
.keystone
.projects
.delete(tenant_id
)
697 self
.keystone
.tenants
.delete(tenant_id
)
701 ksExceptions
.ConnectionError
,
702 ksExceptions
.ClientException
,
703 ksExceptions
.NotFound
,
706 self
._format
_exception
(e
)
714 provider_network_profile
=None,
716 """Adds a tenant network to VIM
718 'net_name': name of the network
720 'bridge': overlay isolated network
721 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
722 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
723 'ip_profile': is a dict containing the IP parameters of the network
724 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
725 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
726 'gateway_address': (Optional) ip_schema, that is X.X.X.X
727 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
728 'dhcp_enabled': True or False
729 'dhcp_start_address': ip_schema, first IP to grant
730 'dhcp_count': number of IPs to grant.
731 'shared': if this network can be seen/use by other tenants/organization
732 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
733 physical-network: physnet-label}
734 Returns a tuple with the network identifier and created_items, or raises an exception on error
735 created_items can be None or a dictionary where this method can include key-values that will be passed to
736 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
737 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
741 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
743 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
748 if provider_network_profile
:
749 vlan
= provider_network_profile
.get("segmentation-id")
753 self
._reload
_connection
()
754 network_dict
= {"name": net_name
, "admin_state_up": True}
756 if net_type
in ("data", "ptp"):
757 provider_physical_network
= None
759 if provider_network_profile
and provider_network_profile
.get(
762 provider_physical_network
= provider_network_profile
.get(
766 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
767 # or not declared, just ignore the checking
770 self
.config
.get("dataplane_physical_net"), (tuple, list)
772 and provider_physical_network
773 not in self
.config
["dataplane_physical_net"]
775 raise vimconn
.VimConnConflictException(
776 "Invalid parameter 'provider-network:physical-network' "
777 "for network creation. '{}' is not one of the declared "
778 "list at VIM_config:dataplane_physical_net".format(
779 provider_physical_network
783 # use the default dataplane_physical_net
784 if not provider_physical_network
:
785 provider_physical_network
= self
.config
.get(
786 "dataplane_physical_net"
789 # if it is non empty list, use the first value. If it is a string use the value directly
791 isinstance(provider_physical_network
, (tuple, list))
792 and provider_physical_network
794 provider_physical_network
= provider_physical_network
[0]
796 if not provider_physical_network
:
797 raise vimconn
.VimConnConflictException(
798 "missing information needed for underlay networks. Provide "
799 "'dataplane_physical_net' configuration at VIM or use the NS "
800 "instantiation parameter 'provider-network.physical-network'"
804 if not self
.config
.get("multisegment_support"):
806 "provider:physical_network"
807 ] = provider_physical_network
810 provider_network_profile
811 and "network-type" in provider_network_profile
814 "provider:network_type"
815 ] = provider_network_profile
["network-type"]
817 network_dict
["provider:network_type"] = self
.config
.get(
818 "dataplane_network_type", "vlan"
822 network_dict
["provider:segmentation_id"] = vlan
827 "provider:physical_network": "",
828 "provider:network_type": "vxlan",
830 segment_list
.append(segment1_dict
)
832 "provider:physical_network": provider_physical_network
,
833 "provider:network_type": "vlan",
837 segment2_dict
["provider:segmentation_id"] = vlan
838 elif self
.config
.get("multisegment_vlan_range"):
839 vlanID
= self
._generate
_multisegment
_vlanID
()
840 segment2_dict
["provider:segmentation_id"] = vlanID
843 # raise vimconn.VimConnConflictException(
844 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 segment_list
.append(segment2_dict
)
847 network_dict
["segments"] = segment_list
849 # VIO Specific Changes. It needs a concrete VLAN
850 if self
.vim_type
== "VIO" and vlan
is None:
851 if self
.config
.get("dataplane_net_vlan_range") is None:
852 raise vimconn
.VimConnConflictException(
853 "You must provide 'dataplane_net_vlan_range' in format "
854 "[start_ID - end_ID] at VIM_config for creating underlay "
858 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
860 network_dict
["shared"] = shared
862 if self
.config
.get("disable_network_port_security"):
863 network_dict
["port_security_enabled"] = False
865 if self
.config
.get("neutron_availability_zone_hints"):
866 hints
= self
.config
.get("neutron_availability_zone_hints")
868 if isinstance(hints
, str):
871 network_dict
["availability_zone_hints"] = hints
873 new_net
= self
.neutron
.create_network({"network": network_dict
})
875 # create subnetwork, even if there is no profile
880 if not ip_profile
.get("subnet_address"):
881 # Fake subnet is required
882 subnet_rand
= random
.randint(0, 255)
883 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
885 if "ip_version" not in ip_profile
:
886 ip_profile
["ip_version"] = "IPv4"
889 "name": net_name
+ "-subnet",
890 "network_id": new_net
["network"]["id"],
891 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
892 "cidr": ip_profile
["subnet_address"],
895 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
896 if ip_profile
.get("gateway_address"):
897 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
899 subnet
["gateway_ip"] = None
901 if ip_profile
.get("dns_address"):
902 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
904 if "dhcp_enabled" in ip_profile
:
905 subnet
["enable_dhcp"] = (
907 if ip_profile
["dhcp_enabled"] == "false"
908 or ip_profile
["dhcp_enabled"] is False
912 if ip_profile
.get("dhcp_start_address"):
913 subnet
["allocation_pools"] = []
914 subnet
["allocation_pools"].append(dict())
915 subnet
["allocation_pools"][0]["start"] = ip_profile
[
919 if ip_profile
.get("dhcp_count"):
920 # parts = ip_profile["dhcp_start_address"].split(".")
921 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
922 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
923 ip_int
+= ip_profile
["dhcp_count"] - 1
924 ip_str
= str(netaddr
.IPAddress(ip_int
))
925 subnet
["allocation_pools"][0]["end"] = ip_str
927 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
928 self
.neutron
.create_subnet({"subnet": subnet
})
930 if net_type
== "data" and self
.config
.get("multisegment_support"):
931 if self
.config
.get("l2gw_support"):
932 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
933 for l2gw
in l2gw_list
:
935 "l2_gateway_id": l2gw
["id"],
936 "network_id": new_net
["network"]["id"],
937 "segmentation_id": str(vlanID
),
939 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
940 {"l2_gateway_connection": l2gw_conn
}
944 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
947 return new_net
["network"]["id"], created_items
948 except Exception as e
:
949 # delete l2gw connections (if any) before deleting the network
950 for k
, v
in created_items
.items():
951 if not v
: # skip already deleted
955 k_item
, _
, k_id
= k
.partition(":")
957 if k_item
== "l2gwconn":
958 self
.neutron
.delete_l2_gateway_connection(k_id
)
959 except Exception as e2
:
961 "Error deleting l2 gateway connection: {}: {}".format(
962 type(e2
).__name
__, e2
967 self
.neutron
.delete_network(new_net
["network"]["id"])
969 self
._format
_exception
(e
)
971 def get_network_list(self
, filter_dict
={}):
972 """Obtain tenant networks of VIM
978 admin_state_up: boolean
980 Returns the network list of dictionaries
982 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
985 self
._reload
_connection
()
986 filter_dict_os
= filter_dict
.copy()
988 if self
.api_version3
and "tenant_id" in filter_dict_os
:
990 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
992 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
993 net_list
= net_dict
["networks"]
994 self
.__net
_os
2mano
(net_list
)
998 neExceptions
.ConnectionFailed
,
999 ksExceptions
.ClientException
,
1000 neExceptions
.NeutronException
,
1003 self
._format
_exception
(e
)
1005 def get_network(self
, net_id
):
1006 """Obtain details of network from VIM
1007 Returns the network information from a network id"""
1008 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1009 filter_dict
= {"id": net_id
}
1010 net_list
= self
.get_network_list(filter_dict
)
1012 if len(net_list
) == 0:
1013 raise vimconn
.VimConnNotFoundException(
1014 "Network '{}' not found".format(net_id
)
1016 elif len(net_list
) > 1:
1017 raise vimconn
.VimConnConflictException(
1018 "Found more than one network with this criteria"
1023 for subnet_id
in net
.get("subnets", ()):
1025 subnet
= self
.neutron
.show_subnet(subnet_id
)
1026 except Exception as e
:
1028 "osconnector.get_network(): Error getting subnet %s %s"
1031 subnet
= {"id": subnet_id
, "fault": str(e
)}
1033 subnets
.append(subnet
)
1035 net
["subnets"] = subnets
1036 net
["encapsulation"] = net
.get("provider:network_type")
1037 net
["encapsulation_type"] = net
.get("provider:network_type")
1038 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1039 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1043 def delete_network(self
, net_id
, created_items
=None):
1045 Removes a tenant network from VIM and its associated elements
1046 :param net_id: VIM identifier of the network, provided by method new_network
1047 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1048 Returns the network identifier or raises an exception upon error or when network is not found
1050 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1052 if created_items
is None:
1056 self
._reload
_connection
()
1057 # delete l2gw connections (if any) before deleting the network
1058 for k
, v
in created_items
.items():
1059 if not v
: # skip already deleted
1063 k_item
, _
, k_id
= k
.partition(":")
1064 if k_item
== "l2gwconn":
1065 self
.neutron
.delete_l2_gateway_connection(k_id
)
1066 except Exception as e
:
1068 "Error deleting l2 gateway connection: {}: {}".format(
1073 # delete VM ports attached to this networks before the network
1074 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1075 for p
in ports
["ports"]:
1077 self
.neutron
.delete_port(p
["id"])
1078 except Exception as e
:
1079 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1081 self
.neutron
.delete_network(net_id
)
1085 neExceptions
.ConnectionFailed
,
1086 neExceptions
.NetworkNotFoundClient
,
1087 neExceptions
.NeutronException
,
1088 ksExceptions
.ClientException
,
1089 neExceptions
.NeutronException
,
1092 self
._format
_exception
(e
)
1094 def refresh_nets_status(self
, net_list
):
1095 """Get the status of the networks
1096 Params: the list of network identifiers
1097 Returns a dictionary with:
1098 net_id: #VIM id of this network
1099 status: #Mandatory. Text with one of:
1100 # DELETED (not found at vim)
1101 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1102 # OTHER (Vim reported other status not understood)
1103 # ERROR (VIM indicates an ERROR status)
1104 # ACTIVE, INACTIVE, DOWN (admin down),
1105 # BUILD (on building process)
1107 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1108 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1112 for net_id
in net_list
:
1116 net_vim
= self
.get_network(net_id
)
1118 if net_vim
["status"] in netStatus2manoFormat
:
1119 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1121 net
["status"] = "OTHER"
1122 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1124 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1125 net
["status"] = "DOWN"
1127 net
["vim_info"] = self
.serialize(net_vim
)
1129 if net_vim
.get("fault"): # TODO
1130 net
["error_msg"] = str(net_vim
["fault"])
1131 except vimconn
.VimConnNotFoundException
as e
:
1132 self
.logger
.error("Exception getting net status: %s", str(e
))
1133 net
["status"] = "DELETED"
1134 net
["error_msg"] = str(e
)
1135 except vimconn
.VimConnException
as e
:
1136 self
.logger
.error("Exception getting net status: %s", str(e
))
1137 net
["status"] = "VIM_ERROR"
1138 net
["error_msg"] = str(e
)
1139 net_dict
[net_id
] = net
1142 def get_flavor(self
, flavor_id
):
1143 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1144 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1147 self
._reload
_connection
()
1148 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1149 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151 return flavor
.to_dict()
1153 nvExceptions
.NotFound
,
1154 nvExceptions
.ClientException
,
1155 ksExceptions
.ClientException
,
1158 self
._format
_exception
(e
)
1160 def get_flavor_id_from_data(self
, flavor_dict
):
1161 """Obtain flavor id that match the flavor description
1162 Returns the flavor_id or raises a vimconnNotFoundException
1163 flavor_dict: contains the required ram, vcpus, disk
1164 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1165 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1166 vimconnNotFoundException is raised
1168 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1171 self
._reload
_connection
()
1172 flavor_candidate_id
= None
1173 flavor_candidate_data
= (10000, 10000, 10000)
1176 flavor_dict
["vcpus"],
1177 flavor_dict
["disk"],
1178 flavor_dict
.get("ephemeral", 0),
1179 flavor_dict
.get("swap", 0),
1182 extended
= flavor_dict
.get("extended", {})
1185 raise vimconn
.VimConnNotFoundException(
1186 "Flavor with EPA still not implemented"
1188 # if len(numas) > 1:
1189 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numas = extended.get("numas")
1192 for flavor
in self
.nova
.flavors
.list():
1193 epa
= flavor
.get_keys()
1204 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1206 if flavor_data
== flavor_target
:
1210 and flavor_target
< flavor_data
< flavor_candidate_data
1212 flavor_candidate_id
= flavor
.id
1213 flavor_candidate_data
= flavor_data
1215 if not exact_match
and flavor_candidate_id
:
1216 return flavor_candidate_id
1218 raise vimconn
.VimConnNotFoundException(
1219 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1222 nvExceptions
.NotFound
,
1223 nvExceptions
.ClientException
,
1224 ksExceptions
.ClientException
,
1227 self
._format
_exception
(e
)
1229 def process_resource_quota(self
, quota
, prefix
, extra_specs
):
1235 if "limit" in quota
:
1236 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1238 if "reserve" in quota
:
1239 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1241 if "shares" in quota
:
1242 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1243 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1245 def new_flavor(self
, flavor_data
, change_name_if_used
=True):
1246 """Adds a tenant flavor to openstack VIM
1247 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1249 Returns the flavor identifier
1251 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1257 name
= flavor_data
["name"]
1258 while retry
< max_retries
:
1261 self
._reload
_connection
()
1263 if change_name_if_used
:
1266 fl
= self
.nova
.flavors
.list()
1269 fl_names
.append(f
.name
)
1271 while name
in fl_names
:
1273 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1275 ram
= flavor_data
.get("ram", 64)
1276 vcpus
= flavor_data
.get("vcpus", 1)
1279 extended
= flavor_data
.get("extended")
1281 numas
= extended
.get("numas")
1284 numa_nodes
= len(numas
)
1286 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1288 if self
.vim_type
== "VIO":
1290 "vmware:extra_config"
1291 ] = '{"numa.nodeAffinity":"0"}'
1292 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1296 node_id
= numa
["id"]
1298 if "memory" in numa
:
1299 memory_mb
= numa
["memory"] * 1024
1300 memory
= "hw:numa_mem.{}".format(node_id
)
1301 extra_specs
[memory
] = int(memory_mb
)
1305 cpu
= "hw:numa_cpus.{}".format(node_id
)
1306 vcpu
= ",".join(map(str, vcpu
))
1307 extra_specs
[cpu
] = vcpu
1309 # overwrite ram and vcpus
1310 # check if key "memory" is present in numa else use ram value at flavor
1311 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1312 # implemented/virt-driver-cpu-thread-pinning.html
1313 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1315 if "paired-threads" in numa
:
1316 vcpus
= numa
["paired-threads"] * 2
1317 # cpu_thread_policy "require" implies that the compute node must have an
1319 extra_specs
["hw:cpu_thread_policy"] = "require"
1320 extra_specs
["hw:cpu_policy"] = "dedicated"
1321 elif "cores" in numa
:
1322 vcpus
= numa
["cores"]
1323 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1324 # architecture, or a non-SMT architecture will be emulated
1325 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1326 extra_specs
["hw:cpu_policy"] = "dedicated"
1327 elif "threads" in numa
:
1328 vcpus
= numa
["threads"]
1329 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1331 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1332 extra_specs
["hw:cpu_policy"] = "dedicated"
1333 # for interface in numa.get("interfaces",() ):
1334 # if interface["dedicated"]=="yes":
1335 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1336 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1337 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1338 # when a way to connect it is available
1339 elif extended
.get("cpu-quota"):
1340 self
.process_resource_quota(
1341 extended
.get("cpu-quota"), "cpu", extra_specs
1344 if extended
.get("mem-quota"):
1345 self
.process_resource_quota(
1346 extended
.get("mem-quota"), "memory", extra_specs
1349 if extended
.get("vif-quota"):
1350 self
.process_resource_quota(
1351 extended
.get("vif-quota"), "vif", extra_specs
1354 if extended
.get("disk-io-quota"):
1355 self
.process_resource_quota(
1356 extended
.get("disk-io-quota"), "disk_io", extra_specs
1359 # Set the mempage size as specified in the descriptor
1360 if extended
.get("mempage-size"):
1361 if extended
.get("mempage-size") == "LARGE":
1362 extra_specs
["hw:mem_page_size"] = "large"
1363 elif extended
.get("mempage-size") == "SMALL":
1364 extra_specs
["hw:mem_page_size"] = "small"
1365 elif extended
.get("mempage-size") == "SIZE_2MB":
1366 extra_specs
["hw:mem_page_size"] = "2MB"
1367 elif extended
.get("mempage-size") == "SIZE_1GB":
1368 extra_specs
["hw:mem_page_size"] = "1GB"
1369 elif extended
.get("mempage-size") == "PREFER_LARGE":
1370 extra_specs
["hw:mem_page_size"] = "any"
1372 # The validations in NBI should make reaching here not possible.
1373 # If this message is shown, check validations
1375 "Invalid mempage-size %s. Will be ignored",
1376 extended
.get("mempage-size"),
1378 if extended
.get("cpu-pinning-policy"):
1379 extra_specs
["hw:cpu_policy"] = extended
.get(
1380 "cpu-pinning-policy"
1383 # Set the cpu thread pinning policy as specified in the descriptor
1384 if extended
.get("cpu-thread-pinning-policy"):
1385 extra_specs
["hw:cpu_thread_policy"] = extended
.get(
1386 "cpu-thread-pinning-policy"
1389 # Set the mem policy as specified in the descriptor
1390 if extended
.get("mem-policy"):
1391 extra_specs
["hw:numa_mempolicy"] = extended
.get(
1396 new_flavor
= self
.nova
.flavors
.create(
1400 disk
=flavor_data
.get("disk", 0),
1401 ephemeral
=flavor_data
.get("ephemeral", 0),
1402 swap
=flavor_data
.get("swap", 0),
1403 is_public
=flavor_data
.get("is_public", True),
1407 new_flavor
.set_keys(extra_specs
)
1409 return new_flavor
.id
1410 except nvExceptions
.Conflict
as e
:
1411 if change_name_if_used
and retry
< max_retries
:
1414 self
._format
_exception
(e
)
1415 # except nvExceptions.BadRequest as e:
1417 ksExceptions
.ClientException
,
1418 nvExceptions
.ClientException
,
1422 self
._format
_exception
(e
)
1424 def delete_flavor(self
, flavor_id
):
1425 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1427 self
._reload
_connection
()
1428 self
.nova
.flavors
.delete(flavor_id
)
1431 # except nvExceptions.BadRequest as e:
1433 nvExceptions
.NotFound
,
1434 ksExceptions
.ClientException
,
1435 nvExceptions
.ClientException
,
1438 self
._format
_exception
(e
)
1440 def new_image(self
, image_dict
):
1442 Adds a tenant image to VIM. imge_dict is a dictionary with:
1444 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1445 location: path or URI
1446 public: "yes" or "no"
1447 metadata: metadata of the image
1448 Returns the image_id
1453 while retry
< max_retries
:
1456 self
._reload
_connection
()
1458 # determine format http://docs.openstack.org/developer/glance/formats.html
1459 if "disk_format" in image_dict
:
1460 disk_format
= image_dict
["disk_format"]
1461 else: # autodiscover based on extension
1462 if image_dict
["location"].endswith(".qcow2"):
1463 disk_format
= "qcow2"
1464 elif image_dict
["location"].endswith(".vhd"):
1466 elif image_dict
["location"].endswith(".vmdk"):
1467 disk_format
= "vmdk"
1468 elif image_dict
["location"].endswith(".vdi"):
1470 elif image_dict
["location"].endswith(".iso"):
1472 elif image_dict
["location"].endswith(".aki"):
1474 elif image_dict
["location"].endswith(".ari"):
1476 elif image_dict
["location"].endswith(".ami"):
1482 "new_image: '%s' loading from '%s'",
1484 image_dict
["location"],
1486 if self
.vim_type
== "VIO":
1487 container_format
= "bare"
1488 if "container_format" in image_dict
:
1489 container_format
= image_dict
["container_format"]
1491 new_image
= self
.glance
.images
.create(
1492 name
=image_dict
["name"],
1493 container_format
=container_format
,
1494 disk_format
=disk_format
,
1497 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1499 if image_dict
["location"].startswith("http"):
1500 # TODO there is not a method to direct download. It must be downloaded locally with requests
1501 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1503 with
open(image_dict
["location"]) as fimage
:
1504 self
.glance
.images
.upload(new_image
.id, fimage
)
1505 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1506 # image_dict.get("public","yes")=="yes",
1507 # container_format="bare", data=fimage, disk_format=disk_format)
1509 metadata_to_load
= image_dict
.get("metadata")
1511 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1513 if self
.vim_type
== "VIO":
1514 metadata_to_load
["upload_location"] = image_dict
["location"]
1516 metadata_to_load
["location"] = image_dict
["location"]
1518 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1522 nvExceptions
.Conflict
,
1523 ksExceptions
.ClientException
,
1524 nvExceptions
.ClientException
,
1526 self
._format
_exception
(e
)
1529 gl1Exceptions
.HTTPException
,
1530 gl1Exceptions
.CommunicationError
,
1533 if retry
== max_retries
:
1536 self
._format
_exception
(e
)
1537 except IOError as e
: # can not open the file
1538 raise vimconn
.VimConnConnectionException(
1539 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1540 http_code
=vimconn
.HTTP_Bad_Request
,
1543 def delete_image(self
, image_id
):
1544 """Deletes a tenant image from openstack VIM. Returns the old id"""
1546 self
._reload
_connection
()
1547 self
.glance
.images
.delete(image_id
)
1551 nvExceptions
.NotFound
,
1552 ksExceptions
.ClientException
,
1553 nvExceptions
.ClientException
,
1554 gl1Exceptions
.CommunicationError
,
1555 gl1Exceptions
.HTTPNotFound
,
1557 ) as e
: # TODO remove
1558 self
._format
_exception
(e
)
1560 def get_image_id_from_path(self
, path
):
1561 """Get the image id from image path in the VIM database. Returns the image_id"""
1563 self
._reload
_connection
()
1564 images
= self
.glance
.images
.list()
1566 for image
in images
:
1567 if image
.metadata
.get("location") == path
:
1570 raise vimconn
.VimConnNotFoundException(
1571 "image with location '{}' not found".format(path
)
1574 ksExceptions
.ClientException
,
1575 nvExceptions
.ClientException
,
1576 gl1Exceptions
.CommunicationError
,
1579 self
._format
_exception
(e
)
1581 def get_image_list(self
, filter_dict
={}):
1582 """Obtain tenant images from VIM
1586 checksum: image checksum
1587 Returns the image list of dictionaries:
1588 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1591 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1594 self
._reload
_connection
()
1595 # filter_dict_os = filter_dict.copy()
1596 # First we filter by the available filter fields: name, id. The others are removed.
1597 image_list
= self
.glance
.images
.list()
1600 for image
in image_list
:
1602 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1605 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1609 filter_dict
.get("checksum")
1610 and image
["checksum"] != filter_dict
["checksum"]
1614 filtered_list
.append(image
.copy())
1615 except gl1Exceptions
.HTTPNotFound
:
1618 return filtered_list
1620 ksExceptions
.ClientException
,
1621 nvExceptions
.ClientException
,
1622 gl1Exceptions
.CommunicationError
,
1625 self
._format
_exception
(e
)
1627 def __wait_for_vm(self
, vm_id
, status
):
1628 """wait until vm is in the desired status and return True.
1629 If the VM gets in ERROR status, return false.
1630 If the timeout is reached generate an exception"""
1632 while elapsed_time
< server_timeout
:
1633 vm_status
= self
.nova
.servers
.get(vm_id
).status
1635 if vm_status
== status
:
1638 if vm_status
== "ERROR":
1644 # if we exceeded the timeout rollback
1645 if elapsed_time
>= server_timeout
:
1646 raise vimconn
.VimConnException(
1647 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1648 http_code
=vimconn
.HTTP_Request_Timeout
,
1651 def _get_openstack_availablity_zones(self
):
1653 Get from openstack availability zones available
1657 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1658 openstack_availability_zone
= [
1660 for zone
in openstack_availability_zone
1661 if zone
.zoneName
!= "internal"
1664 return openstack_availability_zone
1668 def _set_availablity_zones(self
):
1670 Set vim availablity zone
1673 if "availability_zone" in self
.config
:
1674 vim_availability_zones
= self
.config
.get("availability_zone")
1676 if isinstance(vim_availability_zones
, str):
1677 self
.availability_zone
= [vim_availability_zones
]
1678 elif isinstance(vim_availability_zones
, list):
1679 self
.availability_zone
= vim_availability_zones
1681 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1683 def _get_vm_availability_zone(
1684 self
, availability_zone_index
, availability_zone_list
1687 Return thge availability zone to be used by the created VM.
1688 :return: The VIM availability zone to be used or None
1690 if availability_zone_index
is None:
1691 if not self
.config
.get("availability_zone"):
1693 elif isinstance(self
.config
.get("availability_zone"), str):
1694 return self
.config
["availability_zone"]
1696 # TODO consider using a different parameter at config for default AV and AV list match
1697 return self
.config
["availability_zone"][0]
1699 vim_availability_zones
= self
.availability_zone
1700 # check if VIM offer enough availability zones describe in the VNFD
1701 if vim_availability_zones
and len(availability_zone_list
) <= len(
1702 vim_availability_zones
1704 # check if all the names of NFV AV match VIM AV names
1705 match_by_index
= False
1706 for av
in availability_zone_list
:
1707 if av
not in vim_availability_zones
:
1708 match_by_index
= True
1712 return vim_availability_zones
[availability_zone_index
]
1714 return availability_zone_list
[availability_zone_index
]
1716 raise vimconn
.VimConnConflictException(
1717 "No enough availability zones at VIM for this deployment"
1727 affinity_group_list
,
1731 availability_zone_index
=None,
1732 availability_zone_list
=None,
1734 """Adds a VM instance to VIM
1736 start: indicates if VM must start or boot in pause mode. Ignored
1737 image_id,flavor_id: image and flavor uuid
1738 affinity_group_list: list of affinity groups, each one is a dictionary.
1740 net_list: list of interfaces, each one is a dictionary with:
1742 net_id: network uuid to connect
1743 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
1744 model: interface model, ignored #TODO
1745 mac_address: used for SR-IOV ifaces #TODO for other types
1746 use: 'data', 'bridge', 'mgmt'
1747 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
1748 vim_id: filled/added by this function
1749 floating_ip: True/False (or it can be None)
1750 port_security: True/False
1751 'cloud_config': (optional) dictionary with:
1752 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1753 'users': (optional) list of users to be inserted, each item is a dict with:
1754 'name': (mandatory) user name,
1755 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1756 'user-data': (optional) string is a text script to be passed directly to cloud-init
1757 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1758 'dest': (mandatory) string with the destination absolute path
1759 'encoding': (optional, by default text). Can be one of:
1760 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1761 'content' (mandatory): string with the content of the file
1762 'permissions': (optional) string with file permissions, typically octal notation '0644'
1763 'owner': (optional) file owner, string with the format 'owner:group'
1764 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1765 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1766 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1767 'size': (mandatory) string with the size of the disk in GB
1768 'vim_id' (optional) should use this existing volume id
1769 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1770 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1771 availability_zone_index is None
1772 #TODO ip, security groups
1773 Returns a tuple with the instance identifier and created_items or raises an exception on error
1774 created_items can be None or a dictionary where this method can include key-values that will be passed to
1775 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1776 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1780 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
1791 external_network
= []
1792 # ^list of external networks to be connected to instance, later on used to create floating_ip
1793 no_secured_ports
= [] # List of port-is with port-security disabled
1794 self
._reload
_connection
()
1795 # metadata_vpci = {} # For a specific neutron plugin
1796 block_device_mapping
= None
1798 for net
in net_list
:
1799 if not net
.get("net_id"): # skip non connected iface
1803 "network_id": net
["net_id"],
1804 "name": net
.get("name"),
1805 "admin_state_up": True,
1809 self
.config
.get("security_groups")
1810 and net
.get("port_security") is not False
1811 and not self
.config
.get("no_port_security_extension")
1813 if not self
.security_groups_id
:
1814 self
._get
_ids
_from
_name
()
1816 port_dict
["security_groups"] = self
.security_groups_id
1818 if net
["type"] == "virtual":
1821 # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
1822 elif net
["type"] == "VF" or net
["type"] == "SR-IOV": # for VF
1824 # if "VF" not in metadata_vpci:
1825 # metadata_vpci["VF"]=[]
1826 # metadata_vpci["VF"].append([ net["vpci"], "" ])
1827 port_dict
["binding:vnic_type"] = "direct"
1829 # VIO specific Changes
1830 if self
.vim_type
== "VIO":
1831 # Need to create port with port_security_enabled = False and no-security-groups
1832 port_dict
["port_security_enabled"] = False
1833 port_dict
["provider_security_groups"] = []
1834 port_dict
["security_groups"] = []
1835 else: # For PT PCI-PASSTHROUGH
1837 # if "PF" not in metadata_vpci:
1838 # metadata_vpci["PF"]=[]
1839 # metadata_vpci["PF"].append([ net["vpci"], "" ])
1840 port_dict
["binding:vnic_type"] = "direct-physical"
1842 if not port_dict
["name"]:
1843 port_dict
["name"] = name
1845 if net
.get("mac_address"):
1846 port_dict
["mac_address"] = net
["mac_address"]
1848 if net
.get("ip_address"):
1849 port_dict
["fixed_ips"] = [{"ip_address": net
["ip_address"]}]
1850 # TODO add "subnet_id": <subnet_id>
1852 new_port
= self
.neutron
.create_port({"port": port_dict
})
1853 created_items
["port:" + str(new_port
["port"]["id"])] = True
1854 net
["mac_adress"] = new_port
["port"]["mac_address"]
1855 net
["vim_id"] = new_port
["port"]["id"]
1856 # if try to use a network without subnetwork, it will return a emtpy list
1857 fixed_ips
= new_port
["port"].get("fixed_ips")
1860 net
["ip"] = fixed_ips
[0].get("ip_address")
1864 port
= {"port-id": new_port
["port"]["id"]}
1865 if float(self
.nova
.api_version
.get_string()) >= 2.32:
1866 port
["tag"] = new_port
["port"]["name"]
1868 net_list_vim
.append(port
)
1870 if net
.get("floating_ip", False):
1871 net
["exit_on_floating_ip_error"] = True
1872 external_network
.append(net
)
1873 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
1874 net
["exit_on_floating_ip_error"] = False
1875 external_network
.append(net
)
1876 net
["floating_ip"] = self
.config
.get("use_floating_ip")
1878 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1880 # As a workaround we wait until the VM is active and then disable the port-security
1881 if net
.get("port_security") is False and not self
.config
.get(
1882 "no_port_security_extension"
1884 no_secured_ports
.append(
1886 new_port
["port"]["id"],
1887 net
.get("port_security_disable_strategy"),
1892 # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
1893 # if len(metadata["pci_assignement"]) >255:
1894 # #limit the metadata size
1895 # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
1896 # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
1900 "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
1909 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
1911 # get availability Zone
1912 vm_av_zone
= self
._get
_vm
_availability
_zone
(
1913 availability_zone_index
, availability_zone_list
1916 # Create additional volumes in case these are present in disk_list
1917 existing_vim_volumes
= []
1918 base_disk_index
= ord("b")
1919 boot_volume_id
= None
1921 block_device_mapping
= {}
1922 for disk
in disk_list
:
1923 if "image_id" in disk
:
1924 # persistent root volume
1925 base_disk_index
= ord("a")
1927 # use existing persistent root volume
1928 if disk
.get("vim_volume_id"):
1929 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[
1932 existing_vim_volumes
.append({"id": disk
["vim_volume_id"]})
1933 # use existing persistent root volume
1934 elif disk
.get("vim_id"):
1935 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[
1938 existing_vim_volumes
.append({"id": disk
["vim_id"]})
1940 # create persistent root volume
1941 volume
= self
.cinder
.volumes
.create(
1943 name
=name
+ "vd" + chr(base_disk_index
),
1944 imageRef
=disk
["image_id"],
1945 # Make sure volume is in the same AZ as the VM to be attached to
1946 availability_zone
=vm_av_zone
,
1948 boot_volume_id
= volume
.id
1949 created_items
["volume:" + str(volume
.id)] = True
1950 block_device_mapping
[
1951 "vd" + chr(base_disk_index
)
1954 # non-root persistent volume
1957 if "vim_volume_id" in disk
.keys()
1960 if disk
.get(key_id
):
1961 # use existing persistent volume
1962 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[
1965 existing_vim_volumes
.append({"id": disk
[key_id
]})
1967 # create persistent volume
1968 volume
= self
.cinder
.volumes
.create(
1970 name
=name
+ "vd" + chr(base_disk_index
),
1971 # Make sure volume is in the same AZ as the VM to be attached to
1972 availability_zone
=vm_av_zone
,
1974 created_items
["volume:" + str(volume
.id)] = True
1975 block_device_mapping
[
1976 "vd" + chr(base_disk_index
)
1979 base_disk_index
+= 1
1981 # Wait until created volumes are with status available
1983 while elapsed_time
< volume_timeout
:
1984 for created_item
in created_items
:
1985 v
, _
, volume_id
= created_item
.partition(":")
1987 if self
.cinder
.volumes
.get(volume_id
).status
!= "available":
1989 else: # all ready: break from while
1995 # Wait until existing volumes in vim are with status available
1996 while elapsed_time
< volume_timeout
:
1997 for volume
in existing_vim_volumes
:
1998 if self
.cinder
.volumes
.get(volume
["id"]).status
!= "available":
2000 else: # all ready: break from while
2006 # If we exceeded the timeout rollback
2007 if elapsed_time
>= volume_timeout
:
2008 raise vimconn
.VimConnException(
2009 "Timeout creating volumes for instance " + name
,
2010 http_code
=vimconn
.HTTP_Request_Timeout
,
2013 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2015 # Manage affinity groups/server groups
2016 server_group_id
= None
2017 scheduller_hints
= {}
2019 if affinity_group_list
:
2020 # Only first id on the list will be used. Openstack restriction
2021 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2022 scheduller_hints
["group"] = server_group_id
2025 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2026 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2027 "block_device_mapping={}, server_group={})".format(
2032 self
.config
.get("security_groups"),
2034 self
.config
.get("keypair"),
2037 block_device_mapping
,
2041 server
= self
.nova
.servers
.create(
2046 security_groups
=self
.config
.get("security_groups"),
2047 # TODO remove security_groups in future versions. Already at neutron port
2048 availability_zone
=vm_av_zone
,
2049 key_name
=self
.config
.get("keypair"),
2051 config_drive
=config_drive
,
2052 block_device_mapping
=block_device_mapping
,
2053 scheduler_hints
=scheduller_hints
,
2054 ) # , description=description)
2056 vm_start_time
= time
.time()
2057 # Previously mentioned workaround to wait until the VM is active and then disable the port-security
2058 if no_secured_ports
:
2059 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2061 for port
in no_secured_ports
:
2063 "port": {"port_security_enabled": False, "security_groups": None}
2066 if port
[1] == "allow-address-pairs":
2068 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2072 self
.neutron
.update_port(port
[0], port_update
)
2074 raise vimconn
.VimConnException(
2075 "It was not possible to disable port security for port {}".format(
2080 # print "DONE :-)", server
2083 for floating_network
in external_network
:
2086 floating_ip_retries
= 3
2087 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2090 floating_ips
= self
.neutron
.list_floatingips().get(
2093 random
.shuffle(floating_ips
) # randomize
2094 for fip
in floating_ips
:
2097 or fip
.get("tenant_id") != server
.tenant_id
2101 if isinstance(floating_network
["floating_ip"], str):
2103 fip
.get("floating_network_id")
2104 != floating_network
["floating_ip"]
2108 free_floating_ip
= fip
["id"]
2112 isinstance(floating_network
["floating_ip"], str)
2113 and floating_network
["floating_ip"].lower() != "true"
2115 pool_id
= floating_network
["floating_ip"]
2117 # Find the external network
2118 external_nets
= list()
2120 for net
in self
.neutron
.list_networks()["networks"]:
2121 if net
["router:external"]:
2122 external_nets
.append(net
)
2124 if len(external_nets
) == 0:
2125 raise vimconn
.VimConnException(
2126 "Cannot create floating_ip automatically since "
2127 "no external network is present",
2128 http_code
=vimconn
.HTTP_Conflict
,
2131 if len(external_nets
) > 1:
2132 raise vimconn
.VimConnException(
2133 "Cannot create floating_ip automatically since "
2134 "multiple external networks are present",
2135 http_code
=vimconn
.HTTP_Conflict
,
2138 pool_id
= external_nets
[0].get("id")
2142 "floating_network_id": pool_id
,
2143 "tenant_id": server
.tenant_id
,
2148 # self.logger.debug("Creating floating IP")
2149 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2150 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2152 "floating_ip:" + str(free_floating_ip
)
2154 except Exception as e
:
2155 raise vimconn
.VimConnException(
2157 + ": Cannot create new floating_ip "
2159 http_code
=vimconn
.HTTP_Conflict
,
2163 # for race condition ensure not already assigned
2164 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2166 if fip
["floatingip"]["port_id"]:
2169 # the vim_id key contains the neutron.port_id
2170 self
.neutron
.update_floatingip(
2172 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2174 # for race condition ensure not re-assigned to other VM after 5 seconds
2176 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2179 fip
["floatingip"]["port_id"]
2180 != floating_network
["vim_id"]
2183 "floating_ip {} re-assigned to other port".format(
2190 "Assigned floating_ip {} to VM {}".format(
2191 free_floating_ip
, server
.id
2195 except Exception as e
:
2196 # openstack need some time after VM creation to assign an IP. So retry if fails
2197 vm_status
= self
.nova
.servers
.get(server
.id).status
2199 if vm_status
not in ("ACTIVE", "ERROR"):
2200 if time
.time() - vm_start_time
< server_timeout
:
2203 elif floating_ip_retries
> 0:
2204 floating_ip_retries
-= 1
2207 raise vimconn
.VimConnException(
2208 "Cannot create floating_ip: {} {}".format(
2211 http_code
=vimconn
.HTTP_Conflict
,
2214 except Exception as e
:
2215 if not floating_network
["exit_on_floating_ip_error"]:
2216 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2221 return server
.id, created_items
2222 # except nvExceptions.NotFound as e:
2223 # error_value=-vimconn.HTTP_Not_Found
2224 # error_text= "vm instance %s not found" % vm_id
2225 # except TypeError as e:
2226 # raise vimconn.VimConnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
2228 except Exception as e
:
2231 server_id
= server
.id
2234 self
.delete_vminstance(server_id
, created_items
)
2235 except Exception as e2
:
2236 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2238 self
._format
_exception
(e
)
2240 def get_vminstance(self
, vm_id
):
2241 """Returns the VM instance information from VIM"""
2242 # self.logger.debug("Getting VM from VIM")
2244 self
._reload
_connection
()
2245 server
= self
.nova
.servers
.find(id=vm_id
)
2246 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2248 return server
.to_dict()
2250 ksExceptions
.ClientException
,
2251 nvExceptions
.ClientException
,
2252 nvExceptions
.NotFound
,
2255 self
._format
_exception
(e
)
2257 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2259 Get a console for the virtual machine
2261 vm_id: uuid of the VM
2262 console_type, can be:
2263 "novnc" (by default), "xvpvnc" for VNC types,
2264 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2265 Returns dict with the console parameters:
2266 protocol: ssh, ftp, http, https, ...
2267 server: usually ip address
2268 port: the http, ssh, ... port
2269 suffix: extra text, e.g. the http path and query string
2271 self
.logger
.debug("Getting VM CONSOLE from VIM")
2274 self
._reload
_connection
()
2275 server
= self
.nova
.servers
.find(id=vm_id
)
2277 if console_type
is None or console_type
== "novnc":
2278 console_dict
= server
.get_vnc_console("novnc")
2279 elif console_type
== "xvpvnc":
2280 console_dict
= server
.get_vnc_console(console_type
)
2281 elif console_type
== "rdp-html5":
2282 console_dict
= server
.get_rdp_console(console_type
)
2283 elif console_type
== "spice-html5":
2284 console_dict
= server
.get_spice_console(console_type
)
2286 raise vimconn
.VimConnException(
2287 "console type '{}' not allowed".format(console_type
),
2288 http_code
=vimconn
.HTTP_Bad_Request
,
2291 console_dict1
= console_dict
.get("console")
2294 console_url
= console_dict1
.get("url")
2298 protocol_index
= console_url
.find("//")
2300 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2303 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2308 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2310 -vimconn
.HTTP_Internal_Server_Error
,
2311 "Unexpected response from VIM",
2315 "protocol": console_url
[0:protocol_index
],
2316 "server": console_url
[protocol_index
+ 2 : port_index
],
2317 "port": console_url
[port_index
:suffix_index
],
2318 "suffix": console_url
[suffix_index
+ 1 :],
2323 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2325 nvExceptions
.NotFound
,
2326 ksExceptions
.ClientException
,
2327 nvExceptions
.ClientException
,
2328 nvExceptions
.BadRequest
,
2331 self
._format
_exception
(e
)
2333 def delete_vminstance(self
, vm_id
, created_items
=None, volumes_to_hold
=None):
2334 """Removes a VM instance from VIM. Returns the old identifier"""
2335 # print "osconnector: Getting VM from VIM"
2336 if created_items
is None:
2340 self
._reload
_connection
()
2341 # delete VM ports attached to this networks before the virtual machine
2342 for k
, v
in created_items
.items():
2343 if not v
: # skip already deleted
2347 k_item
, _
, k_id
= k
.partition(":")
2348 if k_item
== "port":
2349 port_dict
= self
.neutron
.list_ports()
2351 port
["id"] for port
in port_dict
["ports"] if port_dict
2353 if k_id
in existing_ports
:
2354 self
.neutron
.delete_port(k_id
)
2355 except Exception as e
:
2357 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
2360 # #commented because detaching the volumes makes the servers.delete not work properly ?!?
2361 # #dettach volumes attached
2362 # server = self.nova.servers.get(vm_id)
2363 # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
2364 # #for volume in volumes_attached_dict:
2365 # # self.cinder.volumes.detach(volume["id"])
2368 self
.nova
.servers
.delete(vm_id
)
2370 # delete volumes. Although having detached, they should have in active status before deleting
2371 # we ensure in this loop
2375 while keep_waiting
and elapsed_time
< volume_timeout
:
2376 keep_waiting
= False
2378 for k
, v
in created_items
.items():
2379 if not v
: # skip already deleted
2383 k_item
, _
, k_id
= k
.partition(":")
2384 if k_item
== "volume":
2385 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
2388 if k_id
not in volumes_to_hold
:
2389 self
.cinder
.volumes
.delete(k_id
)
2390 created_items
[k
] = None
2391 elif k_item
== "floating_ip": # floating ip
2392 self
.neutron
.delete_floatingip(k_id
)
2393 created_items
[k
] = None
2395 except Exception as e
:
2396 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
2404 nvExceptions
.NotFound
,
2405 ksExceptions
.ClientException
,
2406 nvExceptions
.ClientException
,
2409 self
._format
_exception
(e
)
2411 def refresh_vms_status(self
, vm_list
):
2412 """Get the status of the virtual machines and their interfaces/ports
2413 Params: the list of VM identifiers
2414 Returns a dictionary with:
2415 vm_id: #VIM id of this Virtual Machine
2416 status: #Mandatory. Text with one of:
2417 # DELETED (not found at vim)
2418 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2419 # OTHER (Vim reported other status not understood)
2420 # ERROR (VIM indicates an ERROR status)
2421 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2422 # CREATING (on building process), ERROR
2423 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2425 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2426 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2428 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2429 mac_address: #Text format XX:XX:XX:XX:XX:XX
2430 vim_net_id: #network id where this interface is connected
2431 vim_interface_id: #interface/port VIM id
2432 ip_address: #null, or text with IPv4, IPv6 address
2433 compute_node: #identification of compute node where PF,VF interface is allocated
2434 pci: #PCI address of the NIC that hosts the PF,VF
2435 vlan: #physical VLAN used for VF
2439 "refresh_vms status: Getting tenant VM instance information from VIM"
2442 for vm_id
in vm_list
:
2446 vm_vim
= self
.get_vminstance(vm_id
)
2448 if vm_vim
["status"] in vmStatus2manoFormat
:
2449 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
2451 vm
["status"] = "OTHER"
2452 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
2454 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
2455 vm_vim
.pop("user_data", None)
2456 vm
["vim_info"] = self
.serialize(vm_vim
)
2458 vm
["interfaces"] = []
2459 if vm_vim
.get("fault"):
2460 vm
["error_msg"] = str(vm_vim
["fault"])
2464 self
._reload
_connection
()
2465 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
2467 for port
in port_dict
["ports"]:
2469 interface
["vim_info"] = self
.serialize(port
)
2470 interface
["mac_address"] = port
.get("mac_address")
2471 interface
["vim_net_id"] = port
["network_id"]
2472 interface
["vim_interface_id"] = port
["id"]
2473 # check if OS-EXT-SRV-ATTR:host is there,
2474 # in case of non-admin credentials, it will be missing
2476 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
2477 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
2479 interface
["pci"] = None
2481 # check if binding:profile is there,
2482 # in case of non-admin credentials, it will be missing
2483 if port
.get("binding:profile"):
2484 if port
["binding:profile"].get("pci_slot"):
2485 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
2487 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
2488 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
2489 pci
= port
["binding:profile"]["pci_slot"]
2490 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
2491 interface
["pci"] = pci
2493 interface
["vlan"] = None
2495 if port
.get("binding:vif_details"):
2496 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
2498 # Get vlan from network in case not present in port for those old openstacks and cases where
2499 # it is needed vlan at PT
2500 if not interface
["vlan"]:
2501 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
2502 network
= self
.neutron
.show_network(port
["network_id"])
2505 network
["network"].get("provider:network_type")
2508 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
2509 interface
["vlan"] = network
["network"].get(
2510 "provider:segmentation_id"
2514 # look for floating ip address
2516 floating_ip_dict
= self
.neutron
.list_floatingips(
2520 if floating_ip_dict
.get("floatingips"):
2522 floating_ip_dict
["floatingips"][0].get(
2523 "floating_ip_address"
2529 for subnet
in port
["fixed_ips"]:
2530 ips
.append(subnet
["ip_address"])
2532 interface
["ip_address"] = ";".join(ips
)
2533 vm
["interfaces"].append(interface
)
2534 except Exception as e
:
2536 "Error getting vm interface information {}: {}".format(
2541 except vimconn
.VimConnNotFoundException
as e
:
2542 self
.logger
.error("Exception getting vm status: %s", str(e
))
2543 vm
["status"] = "DELETED"
2544 vm
["error_msg"] = str(e
)
2545 except vimconn
.VimConnException
as e
:
2546 self
.logger
.error("Exception getting vm status: %s", str(e
))
2547 vm
["status"] = "VIM_ERROR"
2548 vm
["error_msg"] = str(e
)
2554 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
2555 """Send and action over a VM instance from VIM
2556 Returns None or the console dict if the action was successfully sent to the VIM"""
2557 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
2560 self
._reload
_connection
()
2561 server
= self
.nova
.servers
.find(id=vm_id
)
2563 if "start" in action_dict
:
2564 if action_dict
["start"] == "rebuild":
2567 if server
.status
== "PAUSED":
2569 elif server
.status
== "SUSPENDED":
2571 elif server
.status
== "SHUTOFF":
2575 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
2577 raise vimconn
.VimConnException(
2578 "Cannot 'start' instance while it is in active state",
2579 http_code
=vimconn
.HTTP_Bad_Request
,
2582 elif "pause" in action_dict
:
2584 elif "resume" in action_dict
:
2586 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
2587 self
.logger
.debug("server status %s", server
.status
)
2588 if server
.status
== "ACTIVE":
2591 self
.logger
.debug("ERROR: VM is not in Active state")
2592 raise vimconn
.VimConnException(
2593 "VM is not in active state, stop operation is not allowed",
2594 http_code
=vimconn
.HTTP_Bad_Request
,
2596 elif "forceOff" in action_dict
:
2597 server
.stop() # TODO
2598 elif "terminate" in action_dict
:
2600 elif "createImage" in action_dict
:
2601 server
.create_image()
2602 # "path":path_schema,
2603 # "description":description_schema,
2604 # "name":name_schema,
2605 # "metadata":metadata_schema,
2606 # "imageRef": id_schema,
2607 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
2608 elif "rebuild" in action_dict
:
2609 server
.rebuild(server
.image
["id"])
2610 elif "reboot" in action_dict
:
2611 server
.reboot() # reboot_type="SOFT"
2612 elif "console" in action_dict
:
2613 console_type
= action_dict
["console"]
2615 if console_type
is None or console_type
== "novnc":
2616 console_dict
= server
.get_vnc_console("novnc")
2617 elif console_type
== "xvpvnc":
2618 console_dict
= server
.get_vnc_console(console_type
)
2619 elif console_type
== "rdp-html5":
2620 console_dict
= server
.get_rdp_console(console_type
)
2621 elif console_type
== "spice-html5":
2622 console_dict
= server
.get_spice_console(console_type
)
2624 raise vimconn
.VimConnException(
2625 "console type '{}' not allowed".format(console_type
),
2626 http_code
=vimconn
.HTTP_Bad_Request
,
2630 console_url
= console_dict
["console"]["url"]
2632 protocol_index
= console_url
.find("//")
2634 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2637 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2642 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2643 raise vimconn
.VimConnException(
2644 "Unexpected response from VIM " + str(console_dict
)
2648 "protocol": console_url
[0:protocol_index
],
2649 "server": console_url
[protocol_index
+ 2 : port_index
],
2650 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
2651 "suffix": console_url
[suffix_index
+ 1 :],
2654 return console_dict2
2656 raise vimconn
.VimConnException(
2657 "Unexpected response from VIM " + str(console_dict
)
2662 ksExceptions
.ClientException
,
2663 nvExceptions
.ClientException
,
2664 nvExceptions
.NotFound
,
2667 self
._format
_exception
(e
)
2668 # TODO insert exception vimconn.HTTP_Unauthorized
2670 # ###### VIO Specific Changes #########
2671 def _generate_vlanID(self
):
2673 Method to get unused vlanID
2681 networks
= self
.get_network_list()
2683 for net
in networks
:
2684 if net
.get("provider:segmentation_id"):
2685 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
2687 used_vlanIDs
= set(usedVlanIDs
)
2689 # find unused VLAN ID
2690 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
2692 start_vlanid
, end_vlanid
= map(
2693 int, vlanID_range
.replace(" ", "").split("-")
2696 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
2697 if vlanID
not in used_vlanIDs
:
2699 except Exception as exp
:
2700 raise vimconn
.VimConnException(
2701 "Exception {} occurred while generating VLAN ID.".format(exp
)
2704 raise vimconn
.VimConnConflictException(
2705 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
2706 self
.config
.get("dataplane_net_vlan_range")
2710 def _generate_multisegment_vlanID(self
):
2712 Method to get unused vlanID
2720 networks
= self
.get_network_list()
2721 for net
in networks
:
2722 if net
.get("provider:network_type") == "vlan" and net
.get(
2723 "provider:segmentation_id"
2725 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
2726 elif net
.get("segments"):
2727 for segment
in net
.get("segments"):
2728 if segment
.get("provider:network_type") == "vlan" and segment
.get(
2729 "provider:segmentation_id"
2731 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
2733 used_vlanIDs
= set(usedVlanIDs
)
2735 # find unused VLAN ID
2736 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
2738 start_vlanid
, end_vlanid
= map(
2739 int, vlanID_range
.replace(" ", "").split("-")
2742 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
2743 if vlanID
not in used_vlanIDs
:
2745 except Exception as exp
:
2746 raise vimconn
.VimConnException(
2747 "Exception {} occurred while generating VLAN ID.".format(exp
)
2750 raise vimconn
.VimConnConflictException(
2751 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
2752 self
.config
.get("multisegment_vlan_range")
2756 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
2758 Method to validate user given vlanID ranges
2762 for vlanID_range
in input_vlan_range
:
2763 vlan_range
= vlanID_range
.replace(" ", "")
2765 vlanID_pattern
= r
"(\d)*-(\d)*$"
2766 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
2768 raise vimconn
.VimConnConflictException(
2769 "Invalid VLAN range for {}: {}.You must provide "
2770 "'{}' in format [start_ID - end_ID].".format(
2771 text_vlan_range
, vlanID_range
, text_vlan_range
2775 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
2776 if start_vlanid
<= 0:
2777 raise vimconn
.VimConnConflictException(
2778 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
2779 "networks valid IDs are 1 to 4094 ".format(
2780 text_vlan_range
, vlanID_range
2784 if end_vlanid
> 4094:
2785 raise vimconn
.VimConnConflictException(
2786 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
2787 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
2788 text_vlan_range
, vlanID_range
2792 if start_vlanid
> end_vlanid
:
2793 raise vimconn
.VimConnConflictException(
2794 "Invalid VLAN range for {}: {}. You must provide '{}'"
2795 " in format start_ID - end_ID and start_ID < end_ID ".format(
2796 text_vlan_range
, vlanID_range
, text_vlan_range
2800 # NOT USED FUNCTIONS
2802 def new_external_port(self
, port_data
):
2803 """Adds a external port to VIM
2804 Returns the port identifier"""
2805 # TODO openstack if needed
2807 -vimconn
.HTTP_Internal_Server_Error
,
2808 "osconnector.new_external_port() not implemented",
2811 def connect_port_network(self
, port_id
, network_id
, admin
=False):
2812 """Connects a external port to a network
2813 Returns status code of the VIM response"""
2814 # TODO openstack if needed
2816 -vimconn
.HTTP_Internal_Server_Error
,
2817 "osconnector.connect_port_network() not implemented",
2820 def new_user(self
, user_name
, user_passwd
, tenant_id
=None):
2821 """Adds a new user to openstack VIM
2822 Returns the user identifier"""
2823 self
.logger
.debug("osconnector: Adding a new user to VIM")
2826 self
._reload
_connection
()
2827 user
= self
.keystone
.users
.create(
2828 user_name
, password
=user_passwd
, default_project
=tenant_id
2830 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
2833 except ksExceptions
.ConnectionError
as e
:
2834 error_value
= -vimconn
.HTTP_Bad_Request
2838 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2840 except ksExceptions
.ClientException
as e
: # TODO remove
2841 error_value
= -vimconn
.HTTP_Bad_Request
2845 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2848 # TODO insert exception vimconn.HTTP_Unauthorized
2849 # if reaching here is because an exception
2850 self
.logger
.debug("new_user " + error_text
)
2852 return error_value
, error_text
2854 def delete_user(self
, user_id
):
2855 """Delete a user from openstack VIM
2856 Returns the user identifier"""
2858 print("osconnector: Deleting a user from VIM")
2861 self
._reload
_connection
()
2862 self
.keystone
.users
.delete(user_id
)
2865 except ksExceptions
.ConnectionError
as e
:
2866 error_value
= -vimconn
.HTTP_Bad_Request
2870 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2872 except ksExceptions
.NotFound
as e
:
2873 error_value
= -vimconn
.HTTP_Not_Found
2877 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2879 except ksExceptions
.ClientException
as e
: # TODO remove
2880 error_value
= -vimconn
.HTTP_Bad_Request
2884 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2887 # TODO insert exception vimconn.HTTP_Unauthorized
2888 # if reaching here is because an exception
2889 self
.logger
.debug("delete_tenant " + error_text
)
2891 return error_value
, error_text
2893 def get_hosts_info(self
):
2894 """Get the information of deployed hosts
2895 Returns the hosts content"""
2897 print("osconnector: Getting Host info from VIM")
2901 self
._reload
_connection
()
2902 hypervisors
= self
.nova
.hypervisors
.list()
2904 for hype
in hypervisors
:
2905 h_list
.append(hype
.to_dict())
2907 return 1, {"hosts": h_list
}
2908 except nvExceptions
.NotFound
as e
:
2909 error_value
= -vimconn
.HTTP_Not_Found
2910 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
2911 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
2912 error_value
= -vimconn
.HTTP_Bad_Request
2916 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2919 # TODO insert exception vimconn.HTTP_Unauthorized
2920 # if reaching here is because an exception
2921 self
.logger
.debug("get_hosts_info " + error_text
)
2923 return error_value
, error_text
2925 def get_hosts(self
, vim_tenant
):
2926 """Get the hosts and deployed instances
2927 Returns the hosts content"""
2928 r
, hype_dict
= self
.get_hosts_info()
2933 hypervisors
= hype_dict
["hosts"]
2936 servers
= self
.nova
.servers
.list()
2937 for hype
in hypervisors
:
2938 for server
in servers
:
2940 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
2941 == hype
["hypervisor_hostname"]
2944 hype
["vm"].append(server
.id)
2946 hype
["vm"] = [server
.id]
2949 except nvExceptions
.NotFound
as e
:
2950 error_value
= -vimconn
.HTTP_Not_Found
2951 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
2952 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
2953 error_value
= -vimconn
.HTTP_Bad_Request
2957 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
2960 # TODO insert exception vimconn.HTTP_Unauthorized
2961 # if reaching here is because an exception
2962 self
.logger
.debug("get_hosts " + error_text
)
2964 return error_value
, error_text
2966 def new_classification(self
, name
, ctype
, definition
):
2968 "Adding a new (Traffic) Classification to VIM, named %s", name
2973 self
._reload
_connection
()
2975 if ctype
not in supportedClassificationTypes
:
2976 raise vimconn
.VimConnNotSupportedException(
2977 "OpenStack VIM connector does not support provided "
2978 "Classification Type {}, supported ones are: {}".format(
2979 ctype
, supportedClassificationTypes
2983 if not self
._validate
_classification
(ctype
, definition
):
2984 raise vimconn
.VimConnException(
2985 "Incorrect Classification definition for the type specified."
2988 classification_dict
= definition
2989 classification_dict
["name"] = name
2990 new_class
= self
.neutron
.create_sfc_flow_classifier(
2991 {"flow_classifier": classification_dict
}
2994 return new_class
["flow_classifier"]["id"]
2996 neExceptions
.ConnectionFailed
,
2997 ksExceptions
.ClientException
,
2998 neExceptions
.NeutronException
,
3001 self
.logger
.error("Creation of Classification failed.")
3002 self
._format
_exception
(e
)
3004 def get_classification(self
, class_id
):
3005 self
.logger
.debug(" Getting Classification %s from VIM", class_id
)
3006 filter_dict
= {"id": class_id
}
3007 class_list
= self
.get_classification_list(filter_dict
)
3009 if len(class_list
) == 0:
3010 raise vimconn
.VimConnNotFoundException(
3011 "Classification '{}' not found".format(class_id
)
3013 elif len(class_list
) > 1:
3014 raise vimconn
.VimConnConflictException(
3015 "Found more than one Classification with this criteria"
3018 classification
= class_list
[0]
3020 return classification
3022 def get_classification_list(self
, filter_dict
={}):
3024 "Getting Classifications from VIM filter: '%s'", str(filter_dict
)
3028 filter_dict_os
= filter_dict
.copy()
3029 self
._reload
_connection
()
3031 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3032 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3034 classification_dict
= self
.neutron
.list_sfc_flow_classifiers(
3037 classification_list
= classification_dict
["flow_classifiers"]
3038 self
.__classification
_os
2mano
(classification_list
)
3040 return classification_list
3042 neExceptions
.ConnectionFailed
,
3043 ksExceptions
.ClientException
,
3044 neExceptions
.NeutronException
,
3047 self
._format
_exception
(e
)
3049 def delete_classification(self
, class_id
):
3050 self
.logger
.debug("Deleting Classification '%s' from VIM", class_id
)
3053 self
._reload
_connection
()
3054 self
.neutron
.delete_sfc_flow_classifier(class_id
)
3058 neExceptions
.ConnectionFailed
,
3059 neExceptions
.NeutronException
,
3060 ksExceptions
.ClientException
,
3061 neExceptions
.NeutronException
,
3064 self
._format
_exception
(e
)
3066 def new_sfi(self
, name
, ingress_ports
, egress_ports
, sfc_encap
=True):
3068 "Adding a new Service Function Instance to VIM, named '%s'", name
3073 self
._reload
_connection
()
3079 if len(ingress_ports
) != 1:
3080 raise vimconn
.VimConnNotSupportedException(
3081 "OpenStack VIM connector can only have 1 ingress port per SFI"
3084 if len(egress_ports
) != 1:
3085 raise vimconn
.VimConnNotSupportedException(
3086 "OpenStack VIM connector can only have 1 egress port per SFI"
3091 "ingress": ingress_ports
[0],
3092 "egress": egress_ports
[0],
3093 "service_function_parameters": {"correlation": correlation
},
3095 new_sfi
= self
.neutron
.create_sfc_port_pair({"port_pair": sfi_dict
})
3097 return new_sfi
["port_pair"]["id"]
3099 neExceptions
.ConnectionFailed
,
3100 ksExceptions
.ClientException
,
3101 neExceptions
.NeutronException
,
3106 self
.neutron
.delete_sfc_port_pair(new_sfi
["port_pair"]["id"])
3109 "Creation of Service Function Instance failed, with "
3110 "subsequent deletion failure as well."
3113 self
._format
_exception
(e
)
3115 def get_sfi(self
, sfi_id
):
3116 self
.logger
.debug("Getting Service Function Instance %s from VIM", sfi_id
)
3117 filter_dict
= {"id": sfi_id
}
3118 sfi_list
= self
.get_sfi_list(filter_dict
)
3120 if len(sfi_list
) == 0:
3121 raise vimconn
.VimConnNotFoundException(
3122 "Service Function Instance '{}' not found".format(sfi_id
)
3124 elif len(sfi_list
) > 1:
3125 raise vimconn
.VimConnConflictException(
3126 "Found more than one Service Function Instance with this criteria"
3133 def get_sfi_list(self
, filter_dict
={}):
3135 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict
)
3139 self
._reload
_connection
()
3140 filter_dict_os
= filter_dict
.copy()
3142 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3143 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3145 sfi_dict
= self
.neutron
.list_sfc_port_pairs(**filter_dict_os
)
3146 sfi_list
= sfi_dict
["port_pairs"]
3147 self
.__sfi
_os
2mano
(sfi_list
)
3151 neExceptions
.ConnectionFailed
,
3152 ksExceptions
.ClientException
,
3153 neExceptions
.NeutronException
,
3156 self
._format
_exception
(e
)
3158 def delete_sfi(self
, sfi_id
):
3159 self
.logger
.debug("Deleting Service Function Instance '%s' from VIM", sfi_id
)
3162 self
._reload
_connection
()
3163 self
.neutron
.delete_sfc_port_pair(sfi_id
)
3167 neExceptions
.ConnectionFailed
,
3168 neExceptions
.NeutronException
,
3169 ksExceptions
.ClientException
,
3170 neExceptions
.NeutronException
,
3173 self
._format
_exception
(e
)
3175 def new_sf(self
, name
, sfis
, sfc_encap
=True):
3176 self
.logger
.debug("Adding a new Service Function to VIM, named '%s'", name
)
3180 self
._reload
_connection
()
3181 # correlation = None
3183 # correlation = "nsh"
3185 for instance
in sfis
:
3186 sfi
= self
.get_sfi(instance
)
3188 if sfi
.get("sfc_encap") != sfc_encap
:
3189 raise vimconn
.VimConnNotSupportedException(
3190 "OpenStack VIM connector requires all SFIs of the "
3191 "same SF to share the same SFC Encapsulation"
3194 sf_dict
= {"name": name
, "port_pairs": sfis
}
3195 new_sf
= self
.neutron
.create_sfc_port_pair_group(
3196 {"port_pair_group": sf_dict
}
3199 return new_sf
["port_pair_group"]["id"]
3201 neExceptions
.ConnectionFailed
,
3202 ksExceptions
.ClientException
,
3203 neExceptions
.NeutronException
,
3208 self
.neutron
.delete_sfc_port_pair_group(
3209 new_sf
["port_pair_group"]["id"]
3213 "Creation of Service Function failed, with "
3214 "subsequent deletion failure as well."
3217 self
._format
_exception
(e
)
3219 def get_sf(self
, sf_id
):
3220 self
.logger
.debug("Getting Service Function %s from VIM", sf_id
)
3221 filter_dict
= {"id": sf_id
}
3222 sf_list
= self
.get_sf_list(filter_dict
)
3224 if len(sf_list
) == 0:
3225 raise vimconn
.VimConnNotFoundException(
3226 "Service Function '{}' not found".format(sf_id
)
3228 elif len(sf_list
) > 1:
3229 raise vimconn
.VimConnConflictException(
3230 "Found more than one Service Function with this criteria"
3237 def get_sf_list(self
, filter_dict
={}):
3239 "Getting Service Function from VIM filter: '%s'", str(filter_dict
)
3243 self
._reload
_connection
()
3244 filter_dict_os
= filter_dict
.copy()
3246 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3247 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3249 sf_dict
= self
.neutron
.list_sfc_port_pair_groups(**filter_dict_os
)
3250 sf_list
= sf_dict
["port_pair_groups"]
3251 self
.__sf
_os
2mano
(sf_list
)
3255 neExceptions
.ConnectionFailed
,
3256 ksExceptions
.ClientException
,
3257 neExceptions
.NeutronException
,
3260 self
._format
_exception
(e
)
3262 def delete_sf(self
, sf_id
):
3263 self
.logger
.debug("Deleting Service Function '%s' from VIM", sf_id
)
3266 self
._reload
_connection
()
3267 self
.neutron
.delete_sfc_port_pair_group(sf_id
)
3271 neExceptions
.ConnectionFailed
,
3272 neExceptions
.NeutronException
,
3273 ksExceptions
.ClientException
,
3274 neExceptions
.NeutronException
,
3277 self
._format
_exception
(e
)
3279 def new_sfp(self
, name
, classifications
, sfs
, sfc_encap
=True, spi
=None):
3280 self
.logger
.debug("Adding a new Service Function Path to VIM, named '%s'", name
)
3284 self
._reload
_connection
()
3285 # In networking-sfc the MPLS encapsulation is legacy
3286 # should be used when no full SFC Encapsulation is intended
3287 correlation
= "mpls"
3294 "flow_classifiers": classifications
,
3295 "port_pair_groups": sfs
,
3296 "chain_parameters": {"correlation": correlation
},
3300 sfp_dict
["chain_id"] = spi
3302 new_sfp
= self
.neutron
.create_sfc_port_chain({"port_chain": sfp_dict
})
3304 return new_sfp
["port_chain"]["id"]
3306 neExceptions
.ConnectionFailed
,
3307 ksExceptions
.ClientException
,
3308 neExceptions
.NeutronException
,
3313 self
.neutron
.delete_sfc_port_chain(new_sfp
["port_chain"]["id"])
3316 "Creation of Service Function Path failed, with "
3317 "subsequent deletion failure as well."
3320 self
._format
_exception
(e
)
3322 def get_sfp(self
, sfp_id
):
3323 self
.logger
.debug(" Getting Service Function Path %s from VIM", sfp_id
)
3325 filter_dict
= {"id": sfp_id
}
3326 sfp_list
= self
.get_sfp_list(filter_dict
)
3328 if len(sfp_list
) == 0:
3329 raise vimconn
.VimConnNotFoundException(
3330 "Service Function Path '{}' not found".format(sfp_id
)
3332 elif len(sfp_list
) > 1:
3333 raise vimconn
.VimConnConflictException(
3334 "Found more than one Service Function Path with this criteria"
3341 def get_sfp_list(self
, filter_dict
={}):
3343 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict
)
3347 self
._reload
_connection
()
3348 filter_dict_os
= filter_dict
.copy()
3350 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3351 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3353 sfp_dict
= self
.neutron
.list_sfc_port_chains(**filter_dict_os
)
3354 sfp_list
= sfp_dict
["port_chains"]
3355 self
.__sfp
_os
2mano
(sfp_list
)
3359 neExceptions
.ConnectionFailed
,
3360 ksExceptions
.ClientException
,
3361 neExceptions
.NeutronException
,
3364 self
._format
_exception
(e
)
3366 def delete_sfp(self
, sfp_id
):
3367 self
.logger
.debug("Deleting Service Function Path '%s' from VIM", sfp_id
)
3370 self
._reload
_connection
()
3371 self
.neutron
.delete_sfc_port_chain(sfp_id
)
3375 neExceptions
.ConnectionFailed
,
3376 neExceptions
.NeutronException
,
3377 ksExceptions
.ClientException
,
3378 neExceptions
.NeutronException
,
3381 self
._format
_exception
(e
)
3383 def refresh_sfps_status(self
, sfp_list
):
3384 """Get the status of the service function path
3385 Params: the list of sfp identifiers
3386 Returns a dictionary with:
3387 vm_id: #VIM id of this service function path
3388 status: #Mandatory. Text with one of:
3389 # DELETED (not found at vim)
3390 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3391 # OTHER (Vim reported other status not understood)
3392 # ERROR (VIM indicates an ERROR status)
3394 # CREATING (on building process)
3395 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3396 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3400 "refresh_sfps status: Getting tenant SFP information from VIM"
3403 for sfp_id
in sfp_list
:
3407 sfp_vim
= self
.get_sfp(sfp_id
)
3410 sfp
["status"] = vmStatus2manoFormat
["ACTIVE"]
3412 sfp
["status"] = "OTHER"
3413 sfp
["error_msg"] = "VIM status reported " + sfp
["status"]
3415 sfp
["vim_info"] = self
.serialize(sfp_vim
)
3417 if sfp_vim
.get("fault"):
3418 sfp
["error_msg"] = str(sfp_vim
["fault"])
3419 except vimconn
.VimConnNotFoundException
as e
:
3420 self
.logger
.error("Exception getting sfp status: %s", str(e
))
3421 sfp
["status"] = "DELETED"
3422 sfp
["error_msg"] = str(e
)
3423 except vimconn
.VimConnException
as e
:
3424 self
.logger
.error("Exception getting sfp status: %s", str(e
))
3425 sfp
["status"] = "VIM_ERROR"
3426 sfp
["error_msg"] = str(e
)
3428 sfp_dict
[sfp_id
] = sfp
3432 def refresh_sfis_status(self
, sfi_list
):
3433 """Get the status of the service function instances
3434 Params: the list of sfi identifiers
3435 Returns a dictionary with:
3436 vm_id: #VIM id of this service function instance
3437 status: #Mandatory. Text with one of:
3438 # DELETED (not found at vim)
3439 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3440 # OTHER (Vim reported other status not understood)
3441 # ERROR (VIM indicates an ERROR status)
3443 # CREATING (on building process)
3444 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3445 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3449 "refresh_sfis status: Getting tenant sfi information from VIM"
3452 for sfi_id
in sfi_list
:
3456 sfi_vim
= self
.get_sfi(sfi_id
)
3459 sfi
["status"] = vmStatus2manoFormat
["ACTIVE"]
3461 sfi
["status"] = "OTHER"
3462 sfi
["error_msg"] = "VIM status reported " + sfi
["status"]
3464 sfi
["vim_info"] = self
.serialize(sfi_vim
)
3466 if sfi_vim
.get("fault"):
3467 sfi
["error_msg"] = str(sfi_vim
["fault"])
3468 except vimconn
.VimConnNotFoundException
as e
:
3469 self
.logger
.error("Exception getting sfi status: %s", str(e
))
3470 sfi
["status"] = "DELETED"
3471 sfi
["error_msg"] = str(e
)
3472 except vimconn
.VimConnException
as e
:
3473 self
.logger
.error("Exception getting sfi status: %s", str(e
))
3474 sfi
["status"] = "VIM_ERROR"
3475 sfi
["error_msg"] = str(e
)
3477 sfi_dict
[sfi_id
] = sfi
3481 def refresh_sfs_status(self
, sf_list
):
3482 """Get the status of the service functions
3483 Params: the list of sf identifiers
3484 Returns a dictionary with:
3485 vm_id: #VIM id of this service function
3486 status: #Mandatory. Text with one of:
3487 # DELETED (not found at vim)
3488 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3489 # OTHER (Vim reported other status not understood)
3490 # ERROR (VIM indicates an ERROR status)
3492 # CREATING (on building process)
3493 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3494 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3497 self
.logger
.debug("refresh_sfs status: Getting tenant sf information from VIM")
3499 for sf_id
in sf_list
:
3503 sf_vim
= self
.get_sf(sf_id
)
3506 sf
["status"] = vmStatus2manoFormat
["ACTIVE"]
3508 sf
["status"] = "OTHER"
3509 sf
["error_msg"] = "VIM status reported " + sf_vim
["status"]
3511 sf
["vim_info"] = self
.serialize(sf_vim
)
3513 if sf_vim
.get("fault"):
3514 sf
["error_msg"] = str(sf_vim
["fault"])
3515 except vimconn
.VimConnNotFoundException
as e
:
3516 self
.logger
.error("Exception getting sf status: %s", str(e
))
3517 sf
["status"] = "DELETED"
3518 sf
["error_msg"] = str(e
)
3519 except vimconn
.VimConnException
as e
:
3520 self
.logger
.error("Exception getting sf status: %s", str(e
))
3521 sf
["status"] = "VIM_ERROR"
3522 sf
["error_msg"] = str(e
)
3528 def refresh_classifications_status(self
, classification_list
):
3529 """Get the status of the classifications
3530 Params: the list of classification identifiers
3531 Returns a dictionary with:
3532 vm_id: #VIM id of this classifier
3533 status: #Mandatory. Text with one of:
3534 # DELETED (not found at vim)
3535 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3536 # OTHER (Vim reported other status not understood)
3537 # ERROR (VIM indicates an ERROR status)
3539 # CREATING (on building process)
3540 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3541 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3543 classification_dict
= {}
3545 "refresh_classifications status: Getting tenant classification information from VIM"
3548 for classification_id
in classification_list
:
3552 classification_vim
= self
.get_classification(classification_id
)
3554 if classification_vim
:
3555 classification
["status"] = vmStatus2manoFormat
["ACTIVE"]
3557 classification
["status"] = "OTHER"
3558 classification
["error_msg"] = (
3559 "VIM status reported " + classification
["status"]
3562 classification
["vim_info"] = self
.serialize(classification_vim
)
3564 if classification_vim
.get("fault"):
3565 classification
["error_msg"] = str(classification_vim
["fault"])
3566 except vimconn
.VimConnNotFoundException
as e
:
3567 self
.logger
.error("Exception getting classification status: %s", str(e
))
3568 classification
["status"] = "DELETED"
3569 classification
["error_msg"] = str(e
)
3570 except vimconn
.VimConnException
as e
:
3571 self
.logger
.error("Exception getting classification status: %s", str(e
))
3572 classification
["status"] = "VIM_ERROR"
3573 classification
["error_msg"] = str(e
)
3575 classification_dict
[classification_id
] = classification
3577 return classification_dict
3579 def new_affinity_group(self
, affinity_group_data
):
3580 """Adds a server group to VIM
3581 affinity_group_data contains a dictionary with information, keys:
3582 name: name in VIM for the server group
3583 type: affinity or anti-affinity
3584 scope: Only nfvi-node allowed
3585 Returns the server group identifier"""
3586 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
3589 name
= affinity_group_data
["name"]
3590 policy
= affinity_group_data
["type"]
3592 self
._reload
_connection
()
3593 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
3595 return new_server_group
.id
3597 ksExceptions
.ClientException
,
3598 nvExceptions
.ClientException
,
3602 self
._format
_exception
(e
)
3604 def get_affinity_group(self
, affinity_group_id
):
3605 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3606 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
3608 self
._reload
_connection
()
3609 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
3611 return server_group
.to_dict()
3613 nvExceptions
.NotFound
,
3614 nvExceptions
.ClientException
,
3615 ksExceptions
.ClientException
,
3618 self
._format
_exception
(e
)
3620 def delete_affinity_group(self
, affinity_group_id
):
3621 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3622 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
3624 self
._reload
_connection
()
3625 self
.nova
.server_groups
.delete(affinity_group_id
)
3627 return affinity_group_id
3629 nvExceptions
.NotFound
,
3630 ksExceptions
.ClientException
,
3631 nvExceptions
.ClientException
,
3634 self
._format
_exception
(e
)
3636 def get_vdu_state(self
, vm_id
):
3638 Getting the state of a vdu
3640 vm_id: ID of an instance
3642 self
.logger
.debug("Getting the status of VM")
3643 self
.logger
.debug("VIM VM ID %s", vm_id
)
3644 self
._reload
_connection
()
3645 server
= self
.nova
.servers
.find(id=vm_id
)
3646 server_dict
= server
.to_dict()
3648 server_dict
["status"],
3649 server_dict
["flavor"]["id"],
3650 server_dict
["OS-EXT-SRV-ATTR:host"],
3651 server_dict
["OS-EXT-AZ:availability_zone"],
3653 self
.logger
.debug("vdu_data %s", vdu_data
)
3656 def check_compute_availability(self
, host
, server_flavor_details
):
3657 self
._reload
_connection
()
3658 hypervisor_search
= self
.nova
.hypervisors
.search(
3659 hypervisor_match
=host
, servers
=True
3661 for hypervisor
in hypervisor_search
:
3662 hypervisor_id
= hypervisor
.to_dict()["id"]
3663 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
3664 hypervisor_dict
= hypervisor_details
.to_dict()
3665 hypervisor_temp
= json
.dumps(hypervisor_dict
)
3666 hypervisor_json
= json
.loads(hypervisor_temp
)
3667 resources_available
= [
3668 hypervisor_json
["free_ram_mb"],
3669 hypervisor_json
["disk_available_least"],
3670 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
3672 compute_available
= all(
3673 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
3675 if compute_available
:
3678 def check_availability_zone(
3679 self
, old_az
, server_flavor_details
, old_host
, host
=None
3681 self
._reload
_connection
()
3682 az_check
= {"zone_check": False, "compute_availability": None}
3683 aggregates_list
= self
.nova
.aggregates
.list()
3684 for aggregate
in aggregates_list
:
3685 aggregate_details
= aggregate
.to_dict()
3686 aggregate_temp
= json
.dumps(aggregate_details
)
3687 aggregate_json
= json
.loads(aggregate_temp
)
3688 if aggregate_json
["availability_zone"] == old_az
:
3689 hosts_list
= aggregate_json
["hosts"]
3690 if host
is not None:
3691 if host
in hosts_list
:
3692 az_check
["zone_check"] = True
3693 available_compute_id
= self
.check_compute_availability(
3694 host
, server_flavor_details
3696 if available_compute_id
is not None:
3697 az_check
["compute_availability"] = available_compute_id
3699 for check_host
in hosts_list
:
3700 if check_host
!= old_host
:
3701 available_compute_id
= self
.check_compute_availability(
3702 check_host
, server_flavor_details
3704 if available_compute_id
is not None:
3705 az_check
["zone_check"] = True
3706 az_check
["compute_availability"] = available_compute_id
3709 az_check
["zone_check"] = True
3712 def migrate_instance(self
, vm_id
, compute_host
=None):
3716 vm_id: ID of an instance
3717 compute_host: Host to migrate the vdu to
3719 self
._reload
_connection
()
3721 instance_state
= self
.get_vdu_state(vm_id
)
3722 server_flavor_id
= instance_state
[1]
3723 server_hypervisor_name
= instance_state
[2]
3724 server_availability_zone
= instance_state
[3]
3726 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
3727 server_flavor_details
= [
3728 server_flavor
["ram"],
3729 server_flavor
["disk"],
3730 server_flavor
["vcpus"],
3732 if compute_host
== server_hypervisor_name
:
3733 raise vimconn
.VimConnException(
3734 "Unable to migrate instance '{}' to the same host '{}'".format(
3737 http_code
=vimconn
.HTTP_Bad_Request
,
3739 az_status
= self
.check_availability_zone(
3740 server_availability_zone
,
3741 server_flavor_details
,
3742 server_hypervisor_name
,
3745 availability_zone_check
= az_status
["zone_check"]
3746 available_compute_id
= az_status
.get("compute_availability")
3748 if availability_zone_check
is False:
3749 raise vimconn
.VimConnException(
3750 "Unable to migrate instance '{}' to a different availability zone".format(
3753 http_code
=vimconn
.HTTP_Bad_Request
,
3755 if available_compute_id
is not None:
3756 self
.nova
.servers
.live_migrate(
3758 host
=available_compute_id
,
3759 block_migration
=True,
3760 disk_over_commit
=False,
3763 changed_compute_host
= ""
3764 if state
== "MIGRATING":
3765 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3766 changed_compute_host
= self
.get_vdu_state(vm_id
)[2]
3767 if vm_state
and changed_compute_host
== available_compute_id
:
3769 "Instance '{}' migrated to the new compute host '{}'".format(
3770 vm_id
, changed_compute_host
3773 return state
, available_compute_id
3775 raise vimconn
.VimConnException(
3776 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3777 vm_id
, available_compute_id
3779 http_code
=vimconn
.HTTP_Bad_Request
,
3782 raise vimconn
.VimConnException(
3783 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3784 available_compute_id
3786 http_code
=vimconn
.HTTP_Bad_Request
,
3789 nvExceptions
.BadRequest
,
3790 nvExceptions
.ClientException
,
3791 nvExceptions
.NotFound
,
3793 self
._format
_exception
(e
)
3795 def resize_instance(self
, vm_id
, new_flavor_id
):
3797 For resizing the vm based on the given
3800 vm_id : ID of an instance
3801 new_flavor_id : Flavor id to be resized
3802 Return the status of a resized instance
3804 self
._reload
_connection
()
3805 self
.logger
.debug("resize the flavor of an instance")
3806 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
3807 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
3808 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
3810 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
3811 if old_flavor_disk
> new_flavor_disk
:
3812 raise nvExceptions
.BadRequest(
3814 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
3817 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
3818 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
3820 instance_resized_status
= self
.confirm_resize(vm_id
)
3821 return instance_resized_status
3823 raise nvExceptions
.BadRequest(
3825 message
="Cannot 'resize' vm_state is in ERROR",
3829 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3830 raise nvExceptions
.BadRequest(
3832 message
="Cannot 'resize' instance while it is in vm_state resized",
3835 nvExceptions
.BadRequest
,
3836 nvExceptions
.ClientException
,
3837 nvExceptions
.NotFound
,
3839 self
._format
_exception
(e
)
3841 def confirm_resize(self
, vm_id
):
3843 Confirm the resize of an instance
3845 vm_id: ID of an instance
3847 self
._reload
_connection
()
3848 self
.nova
.servers
.confirm_resize(server
=vm_id
)
3849 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
3850 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3851 instance_status
= self
.get_vdu_state(vm_id
)[0]
3852 return instance_status