1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 import cinderclient
.exceptions
as cExceptions
45 from glanceclient
import client
as glClient
46 import glanceclient
.exc
as gl1Exceptions
47 from keystoneauth1
import session
48 from keystoneauth1
.identity
import v2
, v3
49 import keystoneclient
.exceptions
as ksExceptions
50 import keystoneclient
.v2_0
.client
as ksClient_v2
51 import keystoneclient
.v3
.client
as ksClient_v3
53 from neutronclient
.common
import exceptions
as neExceptions
54 from neutronclient
.neutron
import client
as neClient
55 from novaclient
import client
as nClient
, exceptions
as nvExceptions
56 from osm_ro_plugin
import vimconn
57 from requests
.exceptions
import ConnectionError
60 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
61 __date__
= "$22-sep-2017 23:59:59$"
63 """contain the openstack virtual machine status to openmano status"""
64 vmStatus2manoFormat
= {
67 "SUSPENDED": "SUSPENDED",
68 "SHUTOFF": "INACTIVE",
73 netStatus2manoFormat
= {
76 "INACTIVE": "INACTIVE",
82 supportedClassificationTypes
= ["legacy_flow_classifier"]
84 # global var to have a timeout creating and deleting volumes
89 def catch_any_exception(func
):
90 def format_exception(*args
, **kwargs
):
92 return func(*args
, *kwargs
)
93 except Exception as e
:
94 vimconnector
._format
_exception
(e
)
96 return format_exception
99 class SafeDumper(yaml
.SafeDumper
):
100 def represent_data(self
, data
):
101 # Openstack APIs use custom subclasses of dict and YAML safe dumper
102 # is designed to not handle that (reference issue 142 of pyyaml)
103 if isinstance(data
, dict) and data
.__class
__ != dict:
104 # A simple solution is to convert those items back to dicts
105 data
= dict(data
.items())
107 return super(SafeDumper
, self
).represent_data(data
)
110 class vimconnector(vimconn
.VimConnector
):
125 """using common constructor parameters. In this case
126 'url' is the keystone authorization url,
127 'url_admin' is not use
129 api_version
= config
.get("APIversion")
131 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
132 raise vimconn
.VimConnException(
133 "Invalid value '{}' for config:APIversion. "
134 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
137 vim_type
= config
.get("vim_type")
139 if vim_type
and vim_type
not in ("vio", "VIO"):
140 raise vimconn
.VimConnException(
141 "Invalid value '{}' for config:vim_type."
142 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
145 if config
.get("dataplane_net_vlan_range") is not None:
146 # validate vlan ranges provided by user
147 self
._validate
_vlan
_ranges
(
148 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
151 if config
.get("multisegment_vlan_range") is not None:
152 # validate vlan ranges provided by user
153 self
._validate
_vlan
_ranges
(
154 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
157 vimconn
.VimConnector
.__init
__(
171 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
172 raise vimconn
.VimConnException(
173 "options insecure and ca_cert are mutually exclusive"
178 if self
.config
.get("insecure"):
181 if self
.config
.get("ca_cert"):
182 self
.verify
= self
.config
.get("ca_cert")
185 raise TypeError("url param can not be NoneType")
187 self
.persistent_info
= persistent_info
188 self
.availability_zone
= persistent_info
.get("availability_zone", None)
189 self
.session
= persistent_info
.get("session", {"reload_client": True})
190 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
191 self
.nova
= self
.session
.get("nova")
192 self
.neutron
= self
.session
.get("neutron")
193 self
.cinder
= self
.session
.get("cinder")
194 self
.glance
= self
.session
.get("glance")
195 # self.glancev1 = self.session.get("glancev1")
196 self
.keystone
= self
.session
.get("keystone")
197 self
.api_version3
= self
.session
.get("api_version3")
198 self
.vim_type
= self
.config
.get("vim_type")
201 self
.vim_type
= self
.vim_type
.upper()
203 if self
.config
.get("use_internal_endpoint"):
204 self
.endpoint_type
= "internalURL"
206 self
.endpoint_type
= None
208 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
209 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
210 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
211 self
.logger
= logging
.getLogger("ro.vim.openstack")
213 # allow security_groups to be a list or a single string
214 if isinstance(self
.config
.get("security_groups"), str):
215 self
.config
["security_groups"] = [self
.config
["security_groups"]]
217 self
.security_groups_id
= None
219 # ###### VIO Specific Changes #########
220 if self
.vim_type
== "VIO":
221 self
.logger
= logging
.getLogger("ro.vim.vio")
224 self
.logger
.setLevel(getattr(logging
, log_level
))
226 def __getitem__(self
, index
):
227 """Get individuals parameters.
229 if index
== "project_domain_id":
230 return self
.config
.get("project_domain_id")
231 elif index
== "user_domain_id":
232 return self
.config
.get("user_domain_id")
234 return vimconn
.VimConnector
.__getitem
__(self
, index
)
236 def __setitem__(self
, index
, value
):
237 """Set individuals parameters and it is marked as dirty so to force connection reload.
239 if index
== "project_domain_id":
240 self
.config
["project_domain_id"] = value
241 elif index
== "user_domain_id":
242 self
.config
["user_domain_id"] = value
244 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
246 self
.session
["reload_client"] = True
248 def serialize(self
, value
):
249 """Serialization of python basic types.
251 In the case value is not serializable a message will be logged and a
252 simple representation of the data that cannot be converted back to
255 if isinstance(value
, str):
260 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
262 except yaml
.representer
.RepresenterError
:
264 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
271 def _reload_connection(self
):
272 """Called before any operation, it check if credentials has changed
273 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
275 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
276 if self
.session
["reload_client"]:
277 if self
.config
.get("APIversion"):
278 self
.api_version3
= (
279 self
.config
["APIversion"] == "v3.3"
280 or self
.config
["APIversion"] == "3"
282 else: # get from ending auth_url that end with v3 or with v2.0
283 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
287 self
.session
["api_version3"] = self
.api_version3
289 if self
.api_version3
:
290 if self
.config
.get("project_domain_id") or self
.config
.get(
291 "project_domain_name"
293 project_domain_id_default
= None
295 project_domain_id_default
= "default"
297 if self
.config
.get("user_domain_id") or self
.config
.get(
300 user_domain_id_default
= None
302 user_domain_id_default
= "default"
306 password
=self
.passwd
,
307 project_name
=self
.tenant_name
,
308 project_id
=self
.tenant_id
,
309 project_domain_id
=self
.config
.get(
310 "project_domain_id", project_domain_id_default
312 user_domain_id
=self
.config
.get(
313 "user_domain_id", user_domain_id_default
315 project_domain_name
=self
.config
.get("project_domain_name"),
316 user_domain_name
=self
.config
.get("user_domain_name"),
322 password
=self
.passwd
,
323 tenant_name
=self
.tenant_name
,
324 tenant_id
=self
.tenant_id
,
327 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
328 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
329 # Titanium cloud and StarlingX
330 region_name
= self
.config
.get("region_name")
332 if self
.api_version3
:
333 self
.keystone
= ksClient_v3
.Client(
335 endpoint_type
=self
.endpoint_type
,
336 region_name
=region_name
,
339 self
.keystone
= ksClient_v2
.Client(
340 session
=sess
, endpoint_type
=self
.endpoint_type
343 self
.session
["keystone"] = self
.keystone
344 # In order to enable microversion functionality an explicit microversion must be specified in "config".
345 # This implementation approach is due to the warning message in
346 # https://developer.openstack.org/api-guide/compute/microversions.html
347 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
348 # always require an specific microversion.
349 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
350 version
= self
.config
.get("microversion")
355 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
356 # Titanium cloud and StarlingX
357 self
.nova
= self
.session
["nova"] = nClient
.Client(
360 endpoint_type
=self
.endpoint_type
,
361 region_name
=region_name
,
363 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
366 endpoint_type
=self
.endpoint_type
,
367 region_name
=region_name
,
370 if sess
.get_all_version_data(service_type
="volumev2"):
371 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
374 endpoint_type
=self
.endpoint_type
,
375 region_name
=region_name
,
378 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
381 endpoint_type
=self
.endpoint_type
,
382 region_name
=region_name
,
386 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
388 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
390 if self
.endpoint_type
== "internalURL":
391 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
392 glance_endpoint
= self
.keystone
.endpoints
.list(
393 glance_service_id
, interface
="internal"
396 glance_endpoint
= None
398 self
.glance
= self
.session
["glance"] = glClient
.Client(
399 2, session
=sess
, endpoint
=glance_endpoint
401 # using version 1 of glance client in new_image()
402 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
403 # endpoint=glance_endpoint)
404 self
.session
["reload_client"] = False
405 self
.persistent_info
["session"] = self
.session
406 # add availablity zone info inside self.persistent_info
407 self
._set
_availablity
_zones
()
408 self
.persistent_info
["availability_zone"] = self
.availability_zone
409 # force to get again security_groups_ids next time they are needed
410 self
.security_groups_id
= None
412 def __net_os2mano(self
, net_list_dict
):
413 """Transform the net openstack format to mano format
414 net_list_dict can be a list of dict or a single dict"""
415 if type(net_list_dict
) is dict:
416 net_list_
= (net_list_dict
,)
417 elif type(net_list_dict
) is list:
418 net_list_
= net_list_dict
420 raise TypeError("param net_list_dict must be a list or a dictionary")
421 for net
in net_list_
:
422 if net
.get("provider:network_type") == "vlan":
425 net
["type"] = "bridge"
427 def __classification_os2mano(self
, class_list_dict
):
428 """Transform the openstack format (Flow Classifier) to mano format
429 (Classification) class_list_dict can be a list of dict or a single dict
431 if isinstance(class_list_dict
, dict):
432 class_list_
= [class_list_dict
]
433 elif isinstance(class_list_dict
, list):
434 class_list_
= class_list_dict
436 raise TypeError("param class_list_dict must be a list or a dictionary")
437 for classification
in class_list_
:
438 id = classification
.pop("id")
439 name
= classification
.pop("name")
440 description
= classification
.pop("description")
441 project_id
= classification
.pop("project_id")
442 tenant_id
= classification
.pop("tenant_id")
443 original_classification
= copy
.deepcopy(classification
)
444 classification
.clear()
445 classification
["ctype"] = "legacy_flow_classifier"
446 classification
["definition"] = original_classification
447 classification
["id"] = id
448 classification
["name"] = name
449 classification
["description"] = description
450 classification
["project_id"] = project_id
451 classification
["tenant_id"] = tenant_id
453 def __sfi_os2mano(self
, sfi_list_dict
):
454 """Transform the openstack format (Port Pair) to mano format (SFI)
455 sfi_list_dict can be a list of dict or a single dict
457 if isinstance(sfi_list_dict
, dict):
458 sfi_list_
= [sfi_list_dict
]
459 elif isinstance(sfi_list_dict
, list):
460 sfi_list_
= sfi_list_dict
462 raise TypeError("param sfi_list_dict must be a list or a dictionary")
464 for sfi
in sfi_list_
:
465 sfi
["ingress_ports"] = []
466 sfi
["egress_ports"] = []
468 if sfi
.get("ingress"):
469 sfi
["ingress_ports"].append(sfi
["ingress"])
471 if sfi
.get("egress"):
472 sfi
["egress_ports"].append(sfi
["egress"])
476 params
= sfi
.get("service_function_parameters")
480 correlation
= params
.get("correlation")
485 sfi
["sfc_encap"] = sfc_encap
486 del sfi
["service_function_parameters"]
488 def __sf_os2mano(self
, sf_list_dict
):
489 """Transform the openstack format (Port Pair Group) to mano format (SF)
490 sf_list_dict can be a list of dict or a single dict
492 if isinstance(sf_list_dict
, dict):
493 sf_list_
= [sf_list_dict
]
494 elif isinstance(sf_list_dict
, list):
495 sf_list_
= sf_list_dict
497 raise TypeError("param sf_list_dict must be a list or a dictionary")
500 del sf
["port_pair_group_parameters"]
501 sf
["sfis"] = sf
["port_pairs"]
504 def __sfp_os2mano(self
, sfp_list_dict
):
505 """Transform the openstack format (Port Chain) to mano format (SFP)
506 sfp_list_dict can be a list of dict or a single dict
508 if isinstance(sfp_list_dict
, dict):
509 sfp_list_
= [sfp_list_dict
]
510 elif isinstance(sfp_list_dict
, list):
511 sfp_list_
= sfp_list_dict
513 raise TypeError("param sfp_list_dict must be a list or a dictionary")
515 for sfp
in sfp_list_
:
516 params
= sfp
.pop("chain_parameters")
520 correlation
= params
.get("correlation")
525 sfp
["sfc_encap"] = sfc_encap
526 sfp
["spi"] = sfp
.pop("chain_id")
527 sfp
["classifications"] = sfp
.pop("flow_classifiers")
528 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
530 # placeholder for now; read TODO note below
531 def _validate_classification(self
, type, definition
):
532 # only legacy_flow_classifier Type is supported at this point
534 # TODO(igordcard): this method should be an abstract method of an
535 # abstract Classification class to be implemented by the specific
536 # Types. Also, abstract vimconnector should call the validation
537 # method before the implemented VIM connectors are called.
540 def _format_exception(exception
):
541 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
542 message_error
= str(exception
)
548 neExceptions
.NetworkNotFoundClient
,
549 nvExceptions
.NotFound
,
550 nvExceptions
.ResourceNotFound
,
551 ksExceptions
.NotFound
,
552 gl1Exceptions
.HTTPNotFound
,
553 cExceptions
.NotFound
,
556 raise vimconn
.VimConnNotFoundException(
557 type(exception
).__name
__ + ": " + message_error
563 gl1Exceptions
.HTTPException
,
564 gl1Exceptions
.CommunicationError
,
566 ksExceptions
.ConnectionError
,
567 neExceptions
.ConnectionFailed
,
568 cExceptions
.ConnectionError
,
571 if type(exception
).__name
__ == "SSLError":
572 tip
= " (maybe option 'insecure' must be added to the VIM)"
574 raise vimconn
.VimConnConnectionException(
575 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
581 nvExceptions
.BadRequest
,
582 ksExceptions
.BadRequest
,
583 gl1Exceptions
.BadRequest
,
584 cExceptions
.BadRequest
,
587 if message_error
== "OS-EXT-SRV-ATTR:host":
588 tip
= " (If the user does not have non-admin credentials, this attribute will be missing)"
589 raise vimconn
.VimConnInsufficientCredentials(
590 type(exception
).__name
__ + ": " + message_error
+ tip
592 raise vimconn
.VimConnException(
593 type(exception
).__name
__ + ": " + message_error
599 nvExceptions
.ClientException
,
600 ksExceptions
.ClientException
,
601 neExceptions
.NeutronException
,
602 cExceptions
.ClientException
,
605 raise vimconn
.VimConnUnexpectedResponse(
606 type(exception
).__name
__ + ": " + message_error
608 elif isinstance(exception
, nvExceptions
.Conflict
):
609 raise vimconn
.VimConnConflictException(
610 type(exception
).__name
__ + ": " + message_error
612 elif isinstance(exception
, vimconn
.VimConnException
):
615 logger
= logging
.getLogger("ro.vim.openstack")
616 logger
.error("General Exception " + message_error
, exc_info
=True)
618 raise vimconn
.VimConnException(
619 type(exception
).__name
__ + ": " + message_error
622 def _get_ids_from_name(self
):
624 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
627 # get tenant_id if only tenant_name is supplied
628 self
._reload
_connection
()
630 if not self
.my_tenant_id
:
631 raise vimconn
.VimConnConnectionException(
632 "Error getting tenant information from name={} id={}".format(
633 self
.tenant_name
, self
.tenant_id
637 if self
.config
.get("security_groups") and not self
.security_groups_id
:
638 # convert from name to id
639 neutron_sg_list
= self
.neutron
.list_security_groups(
640 tenant_id
=self
.my_tenant_id
643 self
.security_groups_id
= []
644 for sg
in self
.config
.get("security_groups"):
645 for neutron_sg
in neutron_sg_list
:
646 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
647 self
.security_groups_id
.append(neutron_sg
["id"])
650 self
.security_groups_id
= None
652 raise vimconn
.VimConnConnectionException(
653 "Not found security group {} for this tenant".format(sg
)
656 def _find_nova_server(self
, vm_id
):
658 Returns the VM instance from Openstack and completes it with flavor ID
659 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
662 self
._reload
_connection
()
663 server
= self
.nova
.servers
.find(id=vm_id
)
664 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
665 server_dict
= server
.to_dict()
667 if server_dict
["flavor"].get("original_name"):
668 server_dict
["flavor"]["id"] = self
.nova
.flavors
.find(
669 name
=server_dict
["flavor"]["original_name"]
671 except nClient
.exceptions
.NotFound
as e
:
672 self
.logger
.warning(str(e
.message
))
675 ksExceptions
.ClientException
,
676 nvExceptions
.ClientException
,
677 nvExceptions
.NotFound
,
680 self
._format
_exception
(e
)
682 def check_vim_connectivity(self
):
683 # just get network list to check connectivity and credentials
684 self
.get_network_list(filter_dict
={})
686 def get_tenant_list(self
, filter_dict
={}):
687 """Obtain tenants of VIM
688 filter_dict can contain the following keys:
689 name: filter by tenant name
690 id: filter by tenant uuid/id
692 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
694 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
696 self
._reload
_connection
()
698 if self
.api_version3
:
699 project_class_list
= self
.keystone
.projects
.list(
700 name
=filter_dict
.get("name")
703 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
707 for project
in project_class_list
:
708 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
711 project_list
.append(project
.to_dict())
715 ksExceptions
.ConnectionError
,
716 ksExceptions
.ClientException
,
719 self
._format
_exception
(e
)
721 def new_tenant(self
, tenant_name
, tenant_description
):
722 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
723 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
725 self
._reload
_connection
()
727 if self
.api_version3
:
728 project
= self
.keystone
.projects
.create(
730 self
.config
.get("project_domain_id", "default"),
731 description
=tenant_description
,
735 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
739 ksExceptions
.ConnectionError
,
740 ksExceptions
.ClientException
,
741 ksExceptions
.BadRequest
,
744 self
._format
_exception
(e
)
746 def delete_tenant(self
, tenant_id
):
747 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
748 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
750 self
._reload
_connection
()
752 if self
.api_version3
:
753 self
.keystone
.projects
.delete(tenant_id
)
755 self
.keystone
.tenants
.delete(tenant_id
)
760 ksExceptions
.ConnectionError
,
761 ksExceptions
.ClientException
,
762 ksExceptions
.NotFound
,
765 self
._format
_exception
(e
)
773 provider_network_profile
=None,
775 """Adds a tenant network to VIM
777 'net_name': name of the network
779 'bridge': overlay isolated network
780 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
781 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
782 'ip_profile': is a dict containing the IP parameters of the network
783 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
784 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
785 'gateway_address': (Optional) ip_schema, that is X.X.X.X
786 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
787 'dhcp_enabled': True or False
788 'dhcp_start_address': ip_schema, first IP to grant
789 'dhcp_count': number of IPs to grant.
790 'shared': if this network can be seen/use by other tenants/organization
791 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
792 physical-network: physnet-label}
793 Returns a tuple with the network identifier and created_items, or raises an exception on error
794 created_items can be None or a dictionary where this method can include key-values that will be passed to
795 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
796 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
800 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
802 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
807 if provider_network_profile
:
808 vlan
= provider_network_profile
.get("segmentation-id")
812 self
._reload
_connection
()
813 network_dict
= {"name": net_name
, "admin_state_up": True}
815 if net_type
in ("data", "ptp") or provider_network_profile
:
816 provider_physical_network
= None
818 if provider_network_profile
and provider_network_profile
.get(
821 provider_physical_network
= provider_network_profile
.get(
825 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
826 # or not declared, just ignore the checking
829 self
.config
.get("dataplane_physical_net"), (tuple, list)
831 and provider_physical_network
832 not in self
.config
["dataplane_physical_net"]
834 raise vimconn
.VimConnConflictException(
835 "Invalid parameter 'provider-network:physical-network' "
836 "for network creation. '{}' is not one of the declared "
837 "list at VIM_config:dataplane_physical_net".format(
838 provider_physical_network
842 # use the default dataplane_physical_net
843 if not provider_physical_network
:
844 provider_physical_network
= self
.config
.get(
845 "dataplane_physical_net"
848 # if it is non-empty list, use the first value. If it is a string use the value directly
850 isinstance(provider_physical_network
, (tuple, list))
851 and provider_physical_network
853 provider_physical_network
= provider_physical_network
[0]
855 if not provider_physical_network
:
856 raise vimconn
.VimConnConflictException(
857 "missing information needed for underlay networks. Provide "
858 "'dataplane_physical_net' configuration at VIM or use the NS "
859 "instantiation parameter 'provider-network.physical-network'"
863 if not self
.config
.get("multisegment_support"):
865 "provider:physical_network"
866 ] = provider_physical_network
869 provider_network_profile
870 and "network-type" in provider_network_profile
873 "provider:network_type"
874 ] = provider_network_profile
["network-type"]
876 network_dict
["provider:network_type"] = self
.config
.get(
877 "dataplane_network_type", "vlan"
881 network_dict
["provider:segmentation_id"] = vlan
886 "provider:physical_network": "",
887 "provider:network_type": "vxlan",
889 segment_list
.append(segment1_dict
)
891 "provider:physical_network": provider_physical_network
,
892 "provider:network_type": "vlan",
896 segment2_dict
["provider:segmentation_id"] = vlan
897 elif self
.config
.get("multisegment_vlan_range"):
898 vlanID
= self
._generate
_multisegment
_vlanID
()
899 segment2_dict
["provider:segmentation_id"] = vlanID
902 # raise vimconn.VimConnConflictException(
903 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
905 segment_list
.append(segment2_dict
)
906 network_dict
["segments"] = segment_list
908 # VIO Specific Changes. It needs a concrete VLAN
909 if self
.vim_type
== "VIO" and vlan
is None:
910 if self
.config
.get("dataplane_net_vlan_range") is None:
911 raise vimconn
.VimConnConflictException(
912 "You must provide 'dataplane_net_vlan_range' in format "
913 "[start_ID - end_ID] at VIM_config for creating underlay "
917 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
919 network_dict
["shared"] = shared
921 if self
.config
.get("disable_network_port_security"):
922 network_dict
["port_security_enabled"] = False
924 if self
.config
.get("neutron_availability_zone_hints"):
925 hints
= self
.config
.get("neutron_availability_zone_hints")
927 if isinstance(hints
, str):
930 network_dict
["availability_zone_hints"] = hints
932 new_net
= self
.neutron
.create_network({"network": network_dict
})
934 # create subnetwork, even if there is no profile
939 if not ip_profile
.get("subnet_address"):
940 # Fake subnet is required
941 subnet_rand
= random
.SystemRandom().randint(0, 255)
942 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
944 if "ip_version" not in ip_profile
:
945 ip_profile
["ip_version"] = "IPv4"
948 "name": net_name
+ "-subnet",
949 "network_id": new_net
["network"]["id"],
950 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
951 "cidr": ip_profile
["subnet_address"],
954 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
955 if ip_profile
.get("gateway_address"):
956 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
958 subnet
["gateway_ip"] = None
960 if ip_profile
.get("dns_address"):
961 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
963 if "dhcp_enabled" in ip_profile
:
964 subnet
["enable_dhcp"] = (
966 if ip_profile
["dhcp_enabled"] == "false"
967 or ip_profile
["dhcp_enabled"] is False
971 if ip_profile
.get("dhcp_start_address"):
972 subnet
["allocation_pools"] = []
973 subnet
["allocation_pools"].append(dict())
974 subnet
["allocation_pools"][0]["start"] = ip_profile
[
978 if ip_profile
.get("dhcp_count"):
979 # parts = ip_profile["dhcp_start_address"].split(".")
980 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
981 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
982 ip_int
+= ip_profile
["dhcp_count"] - 1
983 ip_str
= str(netaddr
.IPAddress(ip_int
))
984 subnet
["allocation_pools"][0]["end"] = ip_str
987 ip_profile
.get("ipv6_address_mode")
988 and ip_profile
["ip_version"] != "IPv4"
990 subnet
["ipv6_address_mode"] = ip_profile
["ipv6_address_mode"]
991 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
992 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
993 subnet
["ipv6_ra_mode"] = ip_profile
["ipv6_address_mode"]
995 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
996 self
.neutron
.create_subnet({"subnet": subnet
})
998 if net_type
== "data" and self
.config
.get("multisegment_support"):
999 if self
.config
.get("l2gw_support"):
1000 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
1001 for l2gw
in l2gw_list
:
1003 "l2_gateway_id": l2gw
["id"],
1004 "network_id": new_net
["network"]["id"],
1005 "segmentation_id": str(vlanID
),
1007 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
1008 {"l2_gateway_connection": l2gw_conn
}
1012 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
1015 return new_net
["network"]["id"], created_items
1016 except Exception as e
:
1017 # delete l2gw connections (if any) before deleting the network
1018 for k
, v
in created_items
.items():
1019 if not v
: # skip already deleted
1023 k_item
, _
, k_id
= k
.partition(":")
1025 if k_item
== "l2gwconn":
1026 self
.neutron
.delete_l2_gateway_connection(k_id
)
1028 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e2
:
1030 "Error deleting l2 gateway connection: {}: {}".format(
1031 type(e2
).__name
__, e2
1034 self
._format
_exception
(e2
)
1035 except Exception as e2
:
1037 "Error deleting l2 gateway connection: {}: {}".format(
1038 type(e2
).__name
__, e2
1043 self
.neutron
.delete_network(new_net
["network"]["id"])
1045 self
._format
_exception
(e
)
1047 def get_network_list(self
, filter_dict
={}):
1048 """Obtain tenant networks of VIM
1054 admin_state_up: boolean
1056 Returns the network list of dictionaries
1058 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
1060 self
._reload
_connection
()
1061 filter_dict_os
= filter_dict
.copy()
1063 if self
.api_version3
and "tenant_id" in filter_dict_os
:
1065 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
1067 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
1068 net_list
= net_dict
["networks"]
1069 self
.__net
_os
2mano
(net_list
)
1073 neExceptions
.ConnectionFailed
,
1074 ksExceptions
.ClientException
,
1075 neExceptions
.NeutronException
,
1078 self
._format
_exception
(e
)
1080 def get_network(self
, net_id
):
1081 """Obtain details of network from VIM
1082 Returns the network information from a network id"""
1083 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1084 filter_dict
= {"id": net_id
}
1085 net_list
= self
.get_network_list(filter_dict
)
1087 if len(net_list
) == 0:
1088 raise vimconn
.VimConnNotFoundException(
1089 "Network '{}' not found".format(net_id
)
1091 elif len(net_list
) > 1:
1092 raise vimconn
.VimConnConflictException(
1093 "Found more than one network with this criteria"
1098 for subnet_id
in net
.get("subnets", ()):
1100 subnet
= self
.neutron
.show_subnet(subnet_id
)
1101 except Exception as e
:
1103 "osconnector.get_network(): Error getting subnet %s %s"
1106 subnet
= {"id": subnet_id
, "fault": str(e
)}
1108 subnets
.append(subnet
)
1110 net
["subnets"] = subnets
1111 net
["encapsulation"] = net
.get("provider:network_type")
1112 net
["encapsulation_type"] = net
.get("provider:network_type")
1113 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1114 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1118 @catch_any_exception
1119 def delete_network(self
, net_id
, created_items
=None):
1121 Removes a tenant network from VIM and its associated elements
1122 :param net_id: VIM identifier of the network, provided by method new_network
1123 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1124 Returns the network identifier or raises an exception upon error or when network is not found
1126 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1128 if created_items
is None:
1132 self
._reload
_connection
()
1133 # delete l2gw connections (if any) before deleting the network
1134 for k
, v
in created_items
.items():
1135 if not v
: # skip already deleted
1139 k_item
, _
, k_id
= k
.partition(":")
1140 if k_item
== "l2gwconn":
1141 self
.neutron
.delete_l2_gateway_connection(k_id
)
1143 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
1145 "Error deleting l2 gateway connection: {}: {}".format(
1149 self
._format
_exception
(e
)
1150 except Exception as e
:
1152 "Error deleting l2 gateway connection: {}: {}".format(
1157 # delete VM ports attached to this networks before the network
1158 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1159 for p
in ports
["ports"]:
1161 self
.neutron
.delete_port(p
["id"])
1163 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
1164 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1165 # If there is connection error, it raises.
1166 self
._format
_exception
(e
)
1167 except Exception as e
:
1168 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1170 self
.neutron
.delete_network(net_id
)
1173 except (neExceptions
.NetworkNotFoundClient
, neExceptions
.NotFound
) as e
:
1174 # If network to be deleted is not found, it does not raise.
1175 self
.logger
.warning(
1176 f
"Error deleting network: {net_id} is not found, {str(e)}"
1179 def refresh_nets_status(self
, net_list
):
1180 """Get the status of the networks
1181 Params: the list of network identifiers
1182 Returns a dictionary with:
1183 net_id: #VIM id of this network
1184 status: #Mandatory. Text with one of:
1185 # DELETED (not found at vim)
1186 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1187 # OTHER (Vim reported other status not understood)
1188 # ERROR (VIM indicates an ERROR status)
1189 # ACTIVE, INACTIVE, DOWN (admin down),
1190 # BUILD (on building process)
1192 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1193 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1197 for net_id
in net_list
:
1201 net_vim
= self
.get_network(net_id
)
1203 if net_vim
["status"] in netStatus2manoFormat
:
1204 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1206 net
["status"] = "OTHER"
1207 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1209 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1210 net
["status"] = "DOWN"
1212 net
["vim_info"] = self
.serialize(net_vim
)
1214 if net_vim
.get("fault"): # TODO
1215 net
["error_msg"] = str(net_vim
["fault"])
1216 except vimconn
.VimConnNotFoundException
as e
:
1217 self
.logger
.error("Exception getting net status: %s", str(e
))
1218 net
["status"] = "DELETED"
1219 net
["error_msg"] = str(e
)
1220 except vimconn
.VimConnException
as e
:
1221 self
.logger
.error("Exception getting net status: %s", str(e
))
1222 net
["status"] = "VIM_ERROR"
1223 net
["error_msg"] = str(e
)
1224 net_dict
[net_id
] = net
1227 def get_flavor(self
, flavor_id
):
1228 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1229 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1231 self
._reload
_connection
()
1232 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1233 return flavor
.to_dict()
1236 nvExceptions
.NotFound
,
1237 nvExceptions
.ClientException
,
1238 ksExceptions
.ClientException
,
1241 self
._format
_exception
(e
)
1243 def get_flavor_id_from_data(self
, flavor_dict
):
1244 """Obtain flavor id that match the flavor description
1245 Returns the flavor_id or raises a vimconnNotFoundException
1246 flavor_dict: contains the required ram, vcpus, disk
1247 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1248 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1249 vimconnNotFoundException is raised
1251 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1254 self
._reload
_connection
()
1255 flavor_candidate_id
= None
1256 flavor_candidate_data
= (10000, 10000, 10000)
1259 flavor_dict
["vcpus"],
1260 flavor_dict
["disk"],
1261 flavor_dict
.get("ephemeral", 0),
1262 flavor_dict
.get("swap", 0),
1265 extended
= flavor_dict
.get("extended", {})
1268 raise vimconn
.VimConnNotFoundException(
1269 "Flavor with EPA still not implemented"
1271 # if len(numas) > 1:
1272 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1274 # numas = extended.get("numas")
1275 for flavor
in self
.nova
.flavors
.list():
1276 epa
= flavor
.get_keys()
1287 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1289 if flavor_data
== flavor_target
:
1293 and flavor_target
< flavor_data
< flavor_candidate_data
1295 flavor_candidate_id
= flavor
.id
1296 flavor_candidate_data
= flavor_data
1298 if not exact_match
and flavor_candidate_id
:
1299 return flavor_candidate_id
1301 raise vimconn
.VimConnNotFoundException(
1302 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1305 nvExceptions
.NotFound
,
1306 nvExceptions
.BadRequest
,
1307 nvExceptions
.ClientException
,
1308 ksExceptions
.ClientException
,
1311 self
._format
_exception
(e
)
1314 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1315 """Process resource quota and fill up extra_specs.
1317 quota (dict): Keeping the quota of resurces
1319 extra_specs (dict) Dict to be filled to be used during flavor creation
1322 if "limit" in quota
:
1323 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1325 if "reserve" in quota
:
1326 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1328 if "shares" in quota
:
1329 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1330 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1333 def process_numa_memory(
1334 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1336 """Set the memory in extra_specs.
1338 numa (dict): A dictionary which includes numa information
1339 node_id (int): ID of numa node
1340 extra_specs (dict): To be filled.
1343 if not numa
.get("memory"):
1345 memory_mb
= numa
["memory"] * 1024
1346 memory
= "hw:numa_mem.{}".format(node_id
)
1347 extra_specs
[memory
] = int(memory_mb
)
1350 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1351 """Set the cpu in extra_specs.
1353 numa (dict): A dictionary which includes numa information
1354 node_id (int): ID of numa node
1355 extra_specs (dict): To be filled.
1358 if not numa
.get("vcpu"):
1361 cpu
= "hw:numa_cpus.{}".format(node_id
)
1362 vcpu
= ",".join(map(str, vcpu
))
1363 extra_specs
[cpu
] = vcpu
1366 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1367 """Fill up extra_specs if numa has paired-threads.
1369 numa (dict): A dictionary which includes numa information
1370 extra_specs (dict): To be filled.
1373 threads (int) Number of virtual cpus
1376 if not numa
.get("paired-threads"):
1379 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1380 threads
= numa
["paired-threads"] * 2
1381 extra_specs
["hw:cpu_thread_policy"] = "require"
1382 extra_specs
["hw:cpu_policy"] = "dedicated"
1386 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1387 """Fill up extra_specs if numa has cores.
1389 numa (dict): A dictionary which includes numa information
1390 extra_specs (dict): To be filled.
1393 cores (int) Number of virtual cpus
1396 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1397 # architecture, or a non-SMT architecture will be emulated
1398 if not numa
.get("cores"):
1400 cores
= numa
["cores"]
1401 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1402 extra_specs
["hw:cpu_policy"] = "dedicated"
1406 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1407 """Fill up extra_specs if numa has threads.
1409 numa (dict): A dictionary which includes numa information
1410 extra_specs (dict): To be filled.
1413 threads (int) Number of virtual cpus
1416 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1417 if not numa
.get("threads"):
1419 threads
= numa
["threads"]
1420 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1421 extra_specs
["hw:cpu_policy"] = "dedicated"
1424 def _process_numa_parameters_of_flavor(
1425 self
, numas
: List
, extra_specs
: Dict
1427 """Process numa parameters and fill up extra_specs.
1430 numas (list): List of dictionary which includes numa information
1431 extra_specs (dict): To be filled.
1434 numa_nodes
= len(numas
)
1435 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1436 cpu_cores
, cpu_threads
= 0, 0
1438 if self
.vim_type
== "VIO":
1439 self
.process_vio_numa_nodes(numa_nodes
, extra_specs
)
1443 node_id
= numa
["id"]
1444 # overwrite ram and vcpus
1445 # check if key "memory" is present in numa else use ram value at flavor
1446 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1447 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1449 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1450 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1452 if "paired-threads" in numa
:
1453 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1454 cpu_threads
+= threads
1456 elif "cores" in numa
:
1457 cores
= self
.process_numa_cores(numa
, extra_specs
)
1460 elif "threads" in numa
:
1461 threads
= self
.process_numa_threads(numa
, extra_specs
)
1462 cpu_threads
+= threads
1465 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1467 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1470 def process_vio_numa_nodes(numa_nodes
: int, extra_specs
: Dict
) -> None:
1471 """According to number of numa nodes, updates the extra_specs for VIO.
1475 numa_nodes (int): List keeps the numa node numbers
1476 extra_specs (dict): Extra specs dict to be updated
1479 # If there are several numas, we do not define specific affinity.
1480 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1482 def _change_flavor_name(
1483 self
, name
: str, name_suffix
: int, flavor_data
: dict
1485 """Change the flavor name if the name already exists.
1488 name (str): Flavor name to be checked
1489 name_suffix (int): Suffix to be appended to name
1490 flavor_data (dict): Flavor dict
1493 name (str): New flavor name to be used
1497 fl
= self
.nova
.flavors
.list()
1498 fl_names
= [f
.name
for f
in fl
]
1500 while name
in fl_names
:
1502 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1506 def _process_extended_config_of_flavor(
1507 self
, extended
: dict, extra_specs
: dict
1509 """Process the extended dict to fill up extra_specs.
1512 extended (dict): Keeping the extra specification of flavor
1513 extra_specs (dict) Dict to be filled to be used during flavor creation
1518 "mem-quota": "memory",
1520 "disk-io-quota": "disk_io",
1528 "PREFER_LARGE": "any",
1532 "cpu-pinning-policy": "hw:cpu_policy",
1533 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1534 "mem-policy": "hw:numa_mempolicy",
1537 numas
= extended
.get("numas")
1539 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1541 for quota
, item
in quotas
.items():
1542 if quota
in extended
.keys():
1543 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1545 # Set the mempage size as specified in the descriptor
1546 if extended
.get("mempage-size"):
1547 if extended
["mempage-size"] in page_sizes
.keys():
1548 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1550 # Normally, validations in NBI should not allow to this condition.
1552 "Invalid mempage-size %s. Will be ignored",
1553 extended
.get("mempage-size"),
1556 for policy
, hw_policy
in policies
.items():
1557 if extended
.get(policy
):
1558 extra_specs
[hw_policy
] = extended
[policy
].lower()
1561 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1562 """Returns the details of flavor
1564 flavor_data (dict): Dictionary that includes required flavor details
1567 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1571 flavor_data
.get("ram", 64),
1572 flavor_data
.get("vcpus", 1),
1574 flavor_data
.get("extended"),
1577 @catch_any_exception
1578 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1579 """Adds a tenant flavor to openstack VIM.
1580 if change_name_if_used is True, it will change name in case of conflict,
1581 because it is not supported name repetition.
1584 flavor_data (dict): Flavor details to be processed
1585 change_name_if_used (bool): Change name in case of conflict
1588 flavor_id (str): flavor identifier
1591 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1595 name
= flavor_data
["name"]
1596 while retry
< max_retries
:
1599 self
._reload
_connection
()
1601 if change_name_if_used
:
1602 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1604 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1608 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1612 new_flavor
= self
.nova
.flavors
.create(
1616 disk
=flavor_data
.get("disk", 0),
1617 ephemeral
=flavor_data
.get("ephemeral", 0),
1618 swap
=flavor_data
.get("swap", 0),
1619 is_public
=flavor_data
.get("is_public", True),
1624 new_flavor
.set_keys(extra_specs
)
1626 return new_flavor
.id
1628 except nvExceptions
.Conflict
as e
:
1629 if change_name_if_used
and retry
< max_retries
:
1632 self
._format
_exception
(e
)
1634 @catch_any_exception
1635 def delete_flavor(self
, flavor_id
):
1636 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1638 self
._reload
_connection
()
1639 self
.nova
.flavors
.delete(flavor_id
)
1642 except (nvExceptions
.NotFound
, nvExceptions
.ResourceNotFound
) as e
:
1643 # If flavor is not found, it does not raise.
1644 self
.logger
.warning(
1645 f
"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
1648 def new_image(self
, image_dict
):
1650 Adds a tenant image to VIM. imge_dict is a dictionary with:
1652 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1653 location: path or URI
1654 public: "yes" or "no"
1655 metadata: metadata of the image
1656 Returns the image_id
1661 while retry
< max_retries
:
1664 self
._reload
_connection
()
1666 # determine format http://docs.openstack.org/developer/glance/formats.html
1667 if "disk_format" in image_dict
:
1668 disk_format
= image_dict
["disk_format"]
1669 else: # autodiscover based on extension
1670 if image_dict
["location"].endswith(".qcow2"):
1671 disk_format
= "qcow2"
1672 elif image_dict
["location"].endswith(".vhd"):
1674 elif image_dict
["location"].endswith(".vmdk"):
1675 disk_format
= "vmdk"
1676 elif image_dict
["location"].endswith(".vdi"):
1678 elif image_dict
["location"].endswith(".iso"):
1680 elif image_dict
["location"].endswith(".aki"):
1682 elif image_dict
["location"].endswith(".ari"):
1684 elif image_dict
["location"].endswith(".ami"):
1690 "new_image: '%s' loading from '%s'",
1692 image_dict
["location"],
1694 if self
.vim_type
== "VIO":
1695 container_format
= "bare"
1696 if "container_format" in image_dict
:
1697 container_format
= image_dict
["container_format"]
1699 new_image
= self
.glance
.images
.create(
1700 name
=image_dict
["name"],
1701 container_format
=container_format
,
1702 disk_format
=disk_format
,
1705 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1707 if image_dict
["location"].startswith("http"):
1708 # TODO there is not a method to direct download. It must be downloaded locally with requests
1709 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1711 with
open(image_dict
["location"]) as fimage
:
1712 self
.glance
.images
.upload(new_image
.id, fimage
)
1713 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1714 # image_dict.get("public","yes")=="yes",
1715 # container_format="bare", data=fimage, disk_format=disk_format)
1717 metadata_to_load
= image_dict
.get("metadata")
1719 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1721 if self
.vim_type
== "VIO":
1722 metadata_to_load
["upload_location"] = image_dict
["location"]
1724 metadata_to_load
["location"] = image_dict
["location"]
1726 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1731 gl1Exceptions
.HTTPException
,
1732 gl1Exceptions
.CommunicationError
,
1735 if retry
== max_retries
:
1738 self
._format
_exception
(e
)
1739 except IOError as e
: # can not open the file
1740 raise vimconn
.VimConnConnectionException(
1741 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1742 http_code
=vimconn
.HTTP_Bad_Request
,
1744 except Exception as e
:
1745 self
._format
_exception
(e
)
1747 @catch_any_exception
1748 def delete_image(self
, image_id
):
1749 """Deletes a tenant image from openstack VIM. Returns the old id"""
1751 self
._reload
_connection
()
1752 self
.glance
.images
.delete(image_id
)
1755 except gl1Exceptions
.NotFound
as e
:
1756 # If image is not found, it does not raise.
1757 self
.logger
.warning(
1758 f
"Error deleting image: {image_id} is not found, {str(e)}"
1761 @catch_any_exception
1762 def get_image_id_from_path(self
, path
):
1763 """Get the image id from image path in the VIM database. Returns the image_id"""
1764 self
._reload
_connection
()
1765 images
= self
.glance
.images
.list()
1767 for image
in images
:
1768 if image
.metadata
.get("location") == path
:
1771 raise vimconn
.VimConnNotFoundException(
1772 "image with location '{}' not found".format(path
)
1775 def get_image_list(self
, filter_dict
={}):
1776 """Obtain tenant images from VIM
1780 checksum: image checksum
1781 Returns the image list of dictionaries:
1782 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1785 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1787 self
._reload
_connection
()
1788 # filter_dict_os = filter_dict.copy()
1789 # First we filter by the available filter fields: name, id. The others are removed.
1790 image_list
= self
.glance
.images
.list()
1793 for image
in image_list
:
1795 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1798 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1802 filter_dict
.get("checksum")
1803 and image
["checksum"] != filter_dict
["checksum"]
1807 filtered_list
.append(image
.copy())
1808 except gl1Exceptions
.HTTPNotFound
:
1811 return filtered_list
1814 ksExceptions
.ClientException
,
1815 nvExceptions
.ClientException
,
1816 gl1Exceptions
.CommunicationError
,
1819 self
._format
_exception
(e
)
1821 def __wait_for_vm(self
, vm_id
, status
):
1822 """wait until vm is in the desired status and return True.
1823 If the VM gets in ERROR status, return false.
1824 If the timeout is reached generate an exception"""
1826 while elapsed_time
< server_timeout
:
1827 vm_status
= self
.nova
.servers
.get(vm_id
).status
1829 if vm_status
== status
:
1832 if vm_status
== "ERROR":
1838 # if we exceeded the timeout rollback
1839 if elapsed_time
>= server_timeout
:
1840 raise vimconn
.VimConnException(
1841 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1842 http_code
=vimconn
.HTTP_Request_Timeout
,
1845 def _get_openstack_availablity_zones(self
):
1847 Get from openstack availability zones available
1851 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1852 openstack_availability_zone
= [
1854 for zone
in openstack_availability_zone
1855 if zone
.zoneName
!= "internal"
1858 return openstack_availability_zone
1862 def _set_availablity_zones(self
):
1864 Set vim availablity zone
1867 if "availability_zone" in self
.config
:
1868 vim_availability_zones
= self
.config
.get("availability_zone")
1870 if isinstance(vim_availability_zones
, str):
1871 self
.availability_zone
= [vim_availability_zones
]
1872 elif isinstance(vim_availability_zones
, list):
1873 self
.availability_zone
= vim_availability_zones
1875 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1877 def _get_vm_availability_zone(
1878 self
, availability_zone_index
, availability_zone_list
1881 Return thge availability zone to be used by the created VM.
1882 :return: The VIM availability zone to be used or None
1884 if availability_zone_index
is None:
1885 if not self
.config
.get("availability_zone"):
1887 elif isinstance(self
.config
.get("availability_zone"), str):
1888 return self
.config
["availability_zone"]
1890 # TODO consider using a different parameter at config for default AV and AV list match
1891 return self
.config
["availability_zone"][0]
1893 vim_availability_zones
= self
.availability_zone
1894 # check if VIM offer enough availability zones describe in the VNFD
1895 if vim_availability_zones
and len(availability_zone_list
) <= len(
1896 vim_availability_zones
1898 # check if all the names of NFV AV match VIM AV names
1899 match_by_index
= False
1900 for av
in availability_zone_list
:
1901 if av
not in vim_availability_zones
:
1902 match_by_index
= True
1906 return vim_availability_zones
[availability_zone_index
]
1908 return availability_zone_list
[availability_zone_index
]
1910 raise vimconn
.VimConnConflictException(
1911 "No enough availability zones at VIM for this deployment"
1914 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1915 """Fill up the security_groups in the port_dict.
1918 net (dict): Network details
1919 port_dict (dict): Port details
1923 self
.config
.get("security_groups")
1924 and net
.get("port_security") is not False
1925 and not self
.config
.get("no_port_security_extension")
1927 if not self
.security_groups_id
:
1928 self
._get
_ids
_from
_name
()
1930 port_dict
["security_groups"] = self
.security_groups_id
1932 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1933 """Fill up the network binding depending on network type in the port_dict.
1936 net (dict): Network details
1937 port_dict (dict): Port details
1940 if not net
.get("type"):
1941 raise vimconn
.VimConnException("Type is missing in the network details.")
1943 if net
["type"] == "virtual":
1947 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1948 port_dict
["binding:vnic_type"] = "direct"
1950 # VIO specific Changes
1951 if self
.vim_type
== "VIO":
1952 # Need to create port with port_security_enabled = False and no-security-groups
1953 port_dict
["port_security_enabled"] = False
1954 port_dict
["provider_security_groups"] = []
1955 port_dict
["security_groups"] = []
1958 # For PT PCI-PASSTHROUGH
1959 port_dict
["binding:vnic_type"] = "direct-physical"
1962 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1963 """Set the "ip" parameter in net dictionary.
1966 new_port (dict): New created port
1967 net (dict): Network details
1970 fixed_ips
= new_port
["port"].get("fixed_ips")
1973 net
["ip"] = fixed_ips
[0].get("ip_address")
1978 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1979 """Fill up the mac_address and fixed_ips in port_dict.
1982 net (dict): Network details
1983 port_dict (dict): Port details
1986 if net
.get("mac_address"):
1987 port_dict
["mac_address"] = net
["mac_address"]
1990 if ip_list
:= net
.get("ip_address"):
1991 if not isinstance(ip_list
, list):
1994 ip_dict
= {"ip_address": ip
}
1995 ip_dual_list
.append(ip_dict
)
1996 port_dict
["fixed_ips"] = ip_dual_list
1997 # TODO add "subnet_id": <subnet_id>
1999 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
2000 """Create new port using neutron.
2003 port_dict (dict): Port details
2004 created_items (dict): All created items
2005 net (dict): Network details
2008 new_port (dict): New created port
2011 new_port
= self
.neutron
.create_port({"port": port_dict
})
2012 created_items
["port:" + str(new_port
["port"]["id"])] = True
2013 net
["mac_address"] = new_port
["port"]["mac_address"]
2014 net
["vim_id"] = new_port
["port"]["id"]
2019 self
, net
: dict, name
: str, created_items
: dict
2020 ) -> Tuple
[dict, dict]:
2021 """Create port using net details.
2024 net (dict): Network details
2025 name (str): Name to be used as network name if net dict does not include name
2026 created_items (dict): All created items
2029 new_port, port New created port, port dictionary
2034 "network_id": net
["net_id"],
2035 "name": net
.get("name"),
2036 "admin_state_up": True,
2039 if not port_dict
["name"]:
2040 port_dict
["name"] = name
2042 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
2044 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
2046 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
2048 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
2050 vimconnector
._set
_fixed
_ip
(new_port
, net
)
2052 port
= {"port-id": new_port
["port"]["id"]}
2054 if float(self
.nova
.api_version
.get_string()) >= 2.32:
2055 port
["tag"] = new_port
["port"]["name"]
2057 return new_port
, port
2059 def _prepare_network_for_vminstance(
2063 created_items
: dict,
2065 external_network
: list,
2066 no_secured_ports
: list,
2068 """Create port and fill up net dictionary for new VM instance creation.
2071 name (str): Name of network
2072 net_list (list): List of networks
2073 created_items (dict): All created items belongs to a VM
2074 net_list_vim (list): List of ports
2075 external_network (list): List of external-networks
2076 no_secured_ports (list): Port security disabled ports
2079 self
._reload
_connection
()
2081 for net
in net_list
:
2082 # Skip non-connected iface
2083 if not net
.get("net_id"):
2086 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2088 net_list_vim
.append(port
)
2090 if net
.get("floating_ip", False):
2091 net
["exit_on_floating_ip_error"] = True
2092 external_network
.append(net
)
2094 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2095 net
["exit_on_floating_ip_error"] = False
2096 external_network
.append(net
)
2097 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2099 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2100 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2101 if net
.get("port_security") is False and not self
.config
.get(
2102 "no_port_security_extension"
2104 no_secured_ports
.append(
2106 new_port
["port"]["id"],
2107 net
.get("port_security_disable_strategy"),
2111 def _prepare_persistent_root_volumes(
2116 base_disk_index
: int,
2117 block_device_mapping
: dict,
2118 existing_vim_volumes
: list,
2119 created_items
: dict,
2121 """Prepare persistent root volumes for new VM instance.
2124 name (str): Name of VM instance
2125 vm_av_zone (list): List of availability zones
2126 disk (dict): Disk details
2127 base_disk_index (int): Disk index
2128 block_device_mapping (dict): Block device details
2129 existing_vim_volumes (list): Existing disk details
2130 created_items (dict): All created items belongs to VM
2133 boot_volume_id (str): ID of boot volume
2136 # Disk may include only vim_volume_id or only vim_id."
2137 # Use existing persistent root volume finding with volume_id or vim_id
2138 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2140 if disk
.get(key_id
):
2141 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2142 existing_vim_volumes
.append({"id": disk
[key_id
]})
2145 # Create persistent root volume
2146 volume
= self
.cinder
.volumes
.create(
2148 name
=name
+ "vd" + chr(base_disk_index
),
2149 imageRef
=disk
["image_id"],
2150 # Make sure volume is in the same AZ as the VM to be attached to
2151 availability_zone
=vm_av_zone
,
2153 boot_volume_id
= volume
.id
2154 self
.update_block_device_mapping(
2156 block_device_mapping
=block_device_mapping
,
2157 base_disk_index
=base_disk_index
,
2159 created_items
=created_items
,
2162 return boot_volume_id
2165 def update_block_device_mapping(
2167 block_device_mapping
: dict,
2168 base_disk_index
: int,
2170 created_items
: dict,
2172 """Add volume information to block device mapping dict.
2174 volume (object): Created volume object
2175 block_device_mapping (dict): Block device details
2176 base_disk_index (int): Disk index
2177 disk (dict): Disk details
2178 created_items (dict): All created items belongs to VM
2181 raise vimconn
.VimConnException("Volume is empty.")
2183 if not hasattr(volume
, "id"):
2184 raise vimconn
.VimConnException(
2185 "Created volume is not valid, does not have id attribute."
2188 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2189 if disk
.get("multiattach"): # multiattach volumes do not belong to VDUs
2191 volume_txt
= "volume:" + str(volume
.id)
2192 if disk
.get("keep"):
2193 volume_txt
+= ":keep"
2194 created_items
[volume_txt
] = True
2196 @catch_any_exception
2197 def new_shared_volumes(self
, shared_volume_data
) -> (str, str):
2198 volume
= self
.cinder
.volumes
.create(
2199 size
=shared_volume_data
["size"],
2200 name
=shared_volume_data
["name"],
2201 volume_type
="multiattach",
2203 return volume
.name
, volume
.id
2205 def _prepare_shared_volumes(
2209 base_disk_index
: int,
2210 block_device_mapping
: dict,
2211 existing_vim_volumes
: list,
2212 created_items
: dict,
2214 volumes
= {volume
.name
: volume
.id for volume
in self
.cinder
.volumes
.list()}
2215 if volumes
.get(disk
["name"]):
2216 sv_id
= volumes
[disk
["name"]]
2219 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2222 volume
= self
.cinder
.volumes
.get(sv_id
)
2223 vol_status
= volume
.status
2224 if volume
.status
not in ("in-use", "available"):
2227 self
.update_block_device_mapping(
2229 block_device_mapping
=block_device_mapping
,
2230 base_disk_index
=base_disk_index
,
2232 created_items
=created_items
,
2235 raise vimconn
.VimConnException(
2236 "Shared volume is not prepared, status is: {}".format(vol_status
),
2237 http_code
=vimconn
.HTTP_Internal_Server_Error
,
2240 def _prepare_non_root_persistent_volumes(
2245 block_device_mapping
: dict,
2246 base_disk_index
: int,
2247 existing_vim_volumes
: list,
2248 created_items
: dict,
2250 """Prepare persistent volumes for new VM instance.
2253 name (str): Name of VM instance
2254 disk (dict): Disk details
2255 vm_av_zone (list): List of availability zones
2256 block_device_mapping (dict): Block device details
2257 base_disk_index (int): Disk index
2258 existing_vim_volumes (list): Existing disk details
2259 created_items (dict): All created items belongs to VM
2261 # Non-root persistent volumes
2262 # Disk may include only vim_volume_id or only vim_id."
2263 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2264 if disk
.get(key_id
):
2265 # Use existing persistent volume
2266 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2267 existing_vim_volumes
.append({"id": disk
[key_id
]})
2269 volume_name
= f
"{name}vd{chr(base_disk_index)}"
2270 volume
= self
.cinder
.volumes
.create(
2273 # Make sure volume is in the same AZ as the VM to be attached to
2274 availability_zone
=vm_av_zone
,
2276 self
.update_block_device_mapping(
2278 block_device_mapping
=block_device_mapping
,
2279 base_disk_index
=base_disk_index
,
2281 created_items
=created_items
,
2284 def _wait_for_created_volumes_availability(
2285 self
, elapsed_time
: int, created_items
: dict
2287 """Wait till created volumes become available.
2290 elapsed_time (int): Passed time while waiting
2291 created_items (dict): All created items belongs to VM
2294 elapsed_time (int): Time spent while waiting
2297 while elapsed_time
< volume_timeout
:
2298 for created_item
in created_items
:
2300 created_item
.split(":")[0],
2301 created_item
.split(":")[1],
2304 volume
= self
.cinder
.volumes
.get(volume_id
)
2306 volume
.volume_type
== "multiattach"
2307 and volume
.status
== "in-use"
2310 elif volume
.status
!= "available":
2313 # All ready: break from while
2321 def _wait_for_existing_volumes_availability(
2322 self
, elapsed_time
: int, existing_vim_volumes
: list
2324 """Wait till existing volumes become available.
2327 elapsed_time (int): Passed time while waiting
2328 existing_vim_volumes (list): Existing volume details
2331 elapsed_time (int): Time spent while waiting
2335 while elapsed_time
< volume_timeout
:
2336 for volume
in existing_vim_volumes
:
2337 v
= self
.cinder
.volumes
.get(volume
["id"])
2338 if v
.volume_type
== "multiattach" and v
.status
== "in-use":
2340 elif v
.status
!= "available":
2342 else: # all ready: break from while
2350 def _prepare_disk_for_vminstance(
2353 existing_vim_volumes
: list,
2354 created_items
: dict,
2356 block_device_mapping
: dict,
2357 disk_list
: list = None,
2359 """Prepare all volumes for new VM instance.
2362 name (str): Name of Instance
2363 existing_vim_volumes (list): List of existing volumes
2364 created_items (dict): All created items belongs to VM
2365 vm_av_zone (list): VM availability zone
2366 block_device_mapping (dict): Block devices to be attached to VM
2367 disk_list (list): List of disks
2370 # Create additional volumes in case these are present in disk_list
2371 base_disk_index
= ord("b")
2372 boot_volume_id
= None
2374 for disk
in disk_list
:
2375 if "image_id" in disk
:
2376 # Root persistent volume
2377 base_disk_index
= ord("a")
2378 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2380 vm_av_zone
=vm_av_zone
,
2382 base_disk_index
=base_disk_index
,
2383 block_device_mapping
=block_device_mapping
,
2384 existing_vim_volumes
=existing_vim_volumes
,
2385 created_items
=created_items
,
2387 elif disk
.get("multiattach"):
2388 self
._prepare
_shared
_volumes
(
2391 base_disk_index
=base_disk_index
,
2392 block_device_mapping
=block_device_mapping
,
2393 existing_vim_volumes
=existing_vim_volumes
,
2394 created_items
=created_items
,
2397 # Non-root persistent volume
2398 self
._prepare
_non
_root
_persistent
_volumes
(
2401 vm_av_zone
=vm_av_zone
,
2402 block_device_mapping
=block_device_mapping
,
2403 base_disk_index
=base_disk_index
,
2404 existing_vim_volumes
=existing_vim_volumes
,
2405 created_items
=created_items
,
2407 base_disk_index
+= 1
2409 # Wait until created volumes are with status available
2410 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2411 elapsed_time
, created_items
2413 # Wait until existing volumes in vim are with status available
2414 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2415 elapsed_time
, existing_vim_volumes
2417 # If we exceeded the timeout rollback
2418 if elapsed_time
>= volume_timeout
:
2419 raise vimconn
.VimConnException(
2420 "Timeout creating volumes for instance " + name
,
2421 http_code
=vimconn
.HTTP_Request_Timeout
,
2424 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2426 def _find_the_external_network_for_floating_ip(self
):
2427 """Get the external network ip in order to create floating IP.
2430 pool_id (str): External network pool ID
2434 # Find the external network
2435 external_nets
= list()
2437 for net
in self
.neutron
.list_networks()["networks"]:
2438 if net
["router:external"]:
2439 external_nets
.append(net
)
2441 if len(external_nets
) == 0:
2442 raise vimconn
.VimConnException(
2443 "Cannot create floating_ip automatically since "
2444 "no external network is present",
2445 http_code
=vimconn
.HTTP_Conflict
,
2448 if len(external_nets
) > 1:
2449 raise vimconn
.VimConnException(
2450 "Cannot create floating_ip automatically since "
2451 "multiple external networks are present",
2452 http_code
=vimconn
.HTTP_Conflict
,
2456 return external_nets
[0].get("id")
2458 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2459 """Trigger neutron to create a new floating IP using external network ID.
2462 param (dict): Input parameters to create a floating IP
2463 created_items (dict): All created items belongs to new VM instance
2470 self
.logger
.debug("Creating floating IP")
2471 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2472 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2473 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2475 except Exception as e
:
2476 raise vimconn
.VimConnException(
2477 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2478 http_code
=vimconn
.HTTP_Conflict
,
2481 def _create_floating_ip(
2482 self
, floating_network
: dict, server
: object, created_items
: dict
2484 """Get the available Pool ID and create a new floating IP.
2487 floating_network (dict): Dict including external network ID
2488 server (object): Server object
2489 created_items (dict): All created items belongs to new VM instance
2493 # Pool_id is available
2495 isinstance(floating_network
["floating_ip"], str)
2496 and floating_network
["floating_ip"].lower() != "true"
2498 pool_id
= floating_network
["floating_ip"]
2502 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2506 "floating_network_id": pool_id
,
2507 "tenant_id": server
.tenant_id
,
2511 self
._neutron
_create
_float
_ip
(param
, created_items
)
2513 def _find_floating_ip(
2517 floating_network
: dict,
2519 """Find the available free floating IPs if there are.
2522 server (object): Server object
2523 floating_ips (list): List of floating IPs
2524 floating_network (dict): Details of floating network such as ID
2527 free_floating_ip (str): Free floating ip address
2530 for fip
in floating_ips
:
2531 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2534 if isinstance(floating_network
["floating_ip"], str):
2535 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2540 def _assign_floating_ip(
2541 self
, free_floating_ip
: str, floating_network
: dict
2543 """Assign the free floating ip address to port.
2546 free_floating_ip (str): Floating IP to be assigned
2547 floating_network (dict): ID of floating network
2550 fip (dict) (dict): Floating ip details
2553 # The vim_id key contains the neutron.port_id
2554 self
.neutron
.update_floatingip(
2556 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2558 # For race condition ensure not re-assigned to other VM after 5 seconds
2561 return self
.neutron
.show_floatingip(free_floating_ip
)
2563 def _get_free_floating_ip(
2564 self
, server
: object, floating_network
: dict
2566 """Get the free floating IP address.
2569 server (object): Server Object
2570 floating_network (dict): Floating network details
2573 free_floating_ip (str): Free floating ip addr
2577 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2580 random
.shuffle(floating_ips
)
2582 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2584 def _prepare_external_network_for_vminstance(
2586 external_network
: list,
2588 created_items
: dict,
2589 vm_start_time
: float,
2591 """Assign floating IP address for VM instance.
2594 external_network (list): ID of External network
2595 server (object): Server Object
2596 created_items (dict): All created items belongs to new VM instance
2597 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2603 for floating_network
in external_network
:
2606 floating_ip_retries
= 3
2607 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2610 free_floating_ip
= self
._get
_free
_floating
_ip
(
2611 server
, floating_network
2614 if not free_floating_ip
:
2615 self
._create
_floating
_ip
(
2616 floating_network
, server
, created_items
2620 # For race condition ensure not already assigned
2621 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2623 if fip
["floatingip"].get("port_id"):
2626 # Assign floating ip
2627 fip
= self
._assign
_floating
_ip
(
2628 free_floating_ip
, floating_network
2631 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2632 self
.logger
.warning(
2633 "floating_ip {} re-assigned to other port".format(
2640 "Assigned floating_ip {} to VM {}".format(
2641 free_floating_ip
, server
.id
2647 except Exception as e
:
2648 # Openstack need some time after VM creation to assign an IP. So retry if fails
2649 vm_status
= self
.nova
.servers
.get(server
.id).status
2651 if vm_status
not in ("ACTIVE", "ERROR"):
2652 if time
.time() - vm_start_time
< server_timeout
:
2655 elif floating_ip_retries
> 0:
2656 floating_ip_retries
-= 1
2659 raise vimconn
.VimConnException(
2660 "Cannot create floating_ip: {} {}".format(
2663 http_code
=vimconn
.HTTP_Conflict
,
2666 except Exception as e
:
2667 if not floating_network
["exit_on_floating_ip_error"]:
2668 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2673 def _update_port_security_for_vminstance(
2675 no_secured_ports
: list,
2678 """Updates the port security according to no_secured_ports list.
2681 no_secured_ports (list): List of ports that security will be disabled
2682 server (object): Server Object
2688 # Wait until the VM is active and then disable the port-security
2689 if no_secured_ports
:
2690 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2692 for port
in no_secured_ports
:
2694 "port": {"port_security_enabled": False, "security_groups": None}
2697 if port
[1] == "allow-address-pairs":
2699 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2703 self
.neutron
.update_port(port
[0], port_update
)
2706 raise vimconn
.VimConnException(
2707 "It was not possible to disable port security for port {}".format(
2719 affinity_group_list
: list,
2723 availability_zone_index
=None,
2724 availability_zone_list
=None,
2726 """Adds a VM instance to VIM.
2729 name (str): name of VM
2730 description (str): description
2731 start (bool): indicates if VM must start or boot in pause mode. Ignored
2732 image_id (str) image uuid
2733 flavor_id (str) flavor uuid
2734 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2735 net_list (list): list of interfaces, each one is a dictionary with:
2736 name: name of network
2737 net_id: network uuid to connect
2738 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2739 model: interface model, ignored #TODO
2740 mac_address: used for SR-IOV ifaces #TODO for other types
2741 use: 'data', 'bridge', 'mgmt'
2742 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2743 vim_id: filled/added by this function
2744 floating_ip: True/False (or it can be None)
2745 port_security: True/False
2746 cloud_config (dict): (optional) dictionary with:
2747 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2748 users: (optional) list of users to be inserted, each item is a dict with:
2749 name: (mandatory) user name,
2750 key-pairs: (optional) list of strings with the public key to be inserted to the user
2751 user-data: (optional) string is a text script to be passed directly to cloud-init
2752 config-files: (optional). List of files to be transferred. Each item is a dict with:
2753 dest: (mandatory) string with the destination absolute path
2754 encoding: (optional, by default text). Can be one of:
2755 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2756 content : (mandatory) string with the content of the file
2757 permissions: (optional) string with file permissions, typically octal notation '0644'
2758 owner: (optional) file owner, string with the format 'owner:group'
2759 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2760 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2761 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2762 size: (mandatory) string with the size of the disk in GB
2763 vim_id: (optional) should use this existing volume id
2764 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2765 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2766 availability_zone_index is None
2767 #TODO ip, security groups
2770 A tuple with the instance identifier and created_items or raises an exception on error
2771 created_items can be None or a dictionary where this method can include key-values that will be passed to
2772 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2773 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2778 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2786 # list of external networks to be connected to instance, later on used to create floating_ip
2787 external_network
= []
2788 # List of ports with port-security disabled
2789 no_secured_ports
= []
2790 block_device_mapping
= {}
2791 existing_vim_volumes
= []
2792 server_group_id
= None
2793 scheduller_hints
= {}
2796 # Check the Openstack Connection
2797 self
._reload
_connection
()
2799 # Prepare network list
2800 self
._prepare
_network
_for
_vminstance
(
2803 created_items
=created_items
,
2804 net_list_vim
=net_list_vim
,
2805 external_network
=external_network
,
2806 no_secured_ports
=no_secured_ports
,
2810 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2812 # Get availability Zone
2813 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2814 availability_zone_index
, availability_zone_list
2819 self
._prepare
_disk
_for
_vminstance
(
2821 existing_vim_volumes
=existing_vim_volumes
,
2822 created_items
=created_items
,
2823 vm_av_zone
=vm_av_zone
,
2824 block_device_mapping
=block_device_mapping
,
2825 disk_list
=disk_list
,
2828 if affinity_group_list
:
2829 # Only first id on the list will be used. Openstack restriction
2830 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2831 scheduller_hints
["group"] = server_group_id
2834 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2835 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2836 "block_device_mapping={}, server_group={})".format(
2841 self
.config
.get("security_groups"),
2843 self
.config
.get("keypair"),
2846 block_device_mapping
,
2851 server
= self
.nova
.servers
.create(
2856 security_groups
=self
.config
.get("security_groups"),
2857 # TODO remove security_groups in future versions. Already at neutron port
2858 availability_zone
=vm_av_zone
,
2859 key_name
=self
.config
.get("keypair"),
2861 config_drive
=config_drive
,
2862 block_device_mapping
=block_device_mapping
,
2863 scheduler_hints
=scheduller_hints
,
2866 vm_start_time
= time
.time()
2868 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2870 self
._prepare
_external
_network
_for
_vminstance
(
2871 external_network
=external_network
,
2873 created_items
=created_items
,
2874 vm_start_time
=vm_start_time
,
2877 return server
.id, created_items
2879 except Exception as e
:
2882 server_id
= server
.id
2885 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2889 self
.delete_vminstance(server_id
, created_items
)
2891 except Exception as e2
:
2892 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2894 self
._format
_exception
(e
)
2897 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2898 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2901 created_items (dict): All created items belongs to VM
2904 updated_created_items (dict): Dict which does not include keep flag for volumes.
2908 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2911 def get_vminstance(self
, vm_id
):
2912 """Returns the VM instance information from VIM"""
2913 return self
._find
_nova
_server
(vm_id
)
2915 @catch_any_exception
2916 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2918 Get a console for the virtual machine
2920 vm_id: uuid of the VM
2921 console_type, can be:
2922 "novnc" (by default), "xvpvnc" for VNC types,
2923 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2924 Returns dict with the console parameters:
2925 protocol: ssh, ftp, http, https, ...
2926 server: usually ip address
2927 port: the http, ssh, ... port
2928 suffix: extra text, e.g. the http path and query string
2930 self
.logger
.debug("Getting VM CONSOLE from VIM")
2931 self
._reload
_connection
()
2932 server
= self
.nova
.servers
.find(id=vm_id
)
2934 if console_type
is None or console_type
== "novnc":
2935 console_dict
= server
.get_vnc_console("novnc")
2936 elif console_type
== "xvpvnc":
2937 console_dict
= server
.get_vnc_console(console_type
)
2938 elif console_type
== "rdp-html5":
2939 console_dict
= server
.get_rdp_console(console_type
)
2940 elif console_type
== "spice-html5":
2941 console_dict
= server
.get_spice_console(console_type
)
2943 raise vimconn
.VimConnException(
2944 "console type '{}' not allowed".format(console_type
),
2945 http_code
=vimconn
.HTTP_Bad_Request
,
2948 console_dict1
= console_dict
.get("console")
2951 console_url
= console_dict1
.get("url")
2955 protocol_index
= console_url
.find("//")
2957 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2960 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2965 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2967 -vimconn
.HTTP_Internal_Server_Error
,
2968 "Unexpected response from VIM",
2972 "protocol": console_url
[0:protocol_index
],
2973 "server": console_url
[protocol_index
+ 2 : port_index
],
2974 "port": console_url
[port_index
:suffix_index
],
2975 "suffix": console_url
[suffix_index
+ 1 :],
2980 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2982 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2983 """Neutron delete ports by id.
2985 k_id (str): Port id in the VIM
2988 self
.neutron
.delete_port(k_id
)
2990 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
2991 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2992 # If there is connection error, raise.
2993 self
._format
_exception
(e
)
2994 except Exception as e
:
2995 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2997 def delete_shared_volumes(self
, shared_volume_vim_id
: str) -> bool:
2998 """Cinder delete volume by id.
3000 shared_volume_vim_id (str): ID of shared volume in VIM
3004 while elapsed_time
< server_timeout
:
3005 vol_status
= self
.cinder
.volumes
.get(shared_volume_vim_id
).status
3006 if vol_status
== "available":
3007 self
.cinder
.volumes
.delete(shared_volume_vim_id
)
3013 if elapsed_time
>= server_timeout
:
3014 raise vimconn
.VimConnException(
3015 "Timeout waiting for volume "
3016 + shared_volume_vim_id
3017 + " to be available",
3018 http_code
=vimconn
.HTTP_Request_Timeout
,
3021 except Exception as e
:
3023 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3025 self
._format
_exception
(e
)
3027 def _delete_volumes_by_id_wth_cinder(
3028 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
3030 """Cinder delete volume by id.
3032 k (str): Full item name in created_items
3033 k_id (str): ID of floating ip in VIM
3034 volumes_to_hold (list): Volumes not to delete
3035 created_items (dict): All created items belongs to VM
3038 if k_id
in volumes_to_hold
:
3041 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
3045 self
.cinder
.volumes
.delete(k_id
)
3046 created_items
[k
] = None
3048 except (cExceptions
.ConnectionError
, ConnectionError
) as e
:
3050 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3052 self
._format
_exception
(e
)
3053 except Exception as e
:
3055 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3058 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
3059 """Neutron delete floating ip by id.
3061 k (str): Full item name in created_items
3062 k_id (str): ID of floating ip in VIM
3063 created_items (dict): All created items belongs to VM
3066 self
.neutron
.delete_floatingip(k_id
)
3067 created_items
[k
] = None
3069 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
3071 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3073 self
._format
_exception
(e
)
3074 except Exception as e
:
3076 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3080 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
3081 k_item
, _
, k_id
= k
.partition(":")
3084 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
3085 """Delete VM ports attached to the networks before deleting virtual machine.
3087 created_items (dict): All created items belongs to VM
3090 for k
, v
in created_items
.items():
3091 if not v
: # skip already deleted
3095 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3096 if k_item
== "port":
3097 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
3099 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
3101 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3103 self
._format
_exception
(e
)
3104 except Exception as e
:
3106 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3109 def _delete_created_items(
3110 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
3112 """Delete Volumes and floating ip if they exist in created_items."""
3113 for k
, v
in created_items
.items():
3114 if not v
: # skip already deleted
3118 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3119 if k_item
== "volume":
3120 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
3121 k
, k_id
, volumes_to_hold
, created_items
3127 elif k_item
== "floating_ip":
3128 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
3131 cExceptions
.ConnectionError
,
3132 neExceptions
.ConnectionFailed
,
3137 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3138 self
._format
_exception
(e
)
3140 except Exception as e
:
3141 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3146 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
3147 """Remove the volumes which has key flag from created_items
3150 created_items (dict): All created items belongs to VM
3153 created_items (dict): Persistent volumes eliminated created_items
3157 for (key
, value
) in created_items
.items()
3158 if len(key
.split(":")) == 2
3161 @catch_any_exception
3162 def delete_vminstance(
3163 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
3165 """Removes a VM instance from VIM. Returns the old identifier.
3167 vm_id (str): Identifier of VM instance
3168 created_items (dict): All created items belongs to VM
3169 volumes_to_hold (list): Volumes_to_hold
3171 if created_items
is None:
3173 if volumes_to_hold
is None:
3174 volumes_to_hold
= []
3177 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
3181 self
._reload
_connection
()
3183 # Delete VM ports attached to the networks before the virtual machine
3185 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
3188 self
.nova
.servers
.delete(vm_id
)
3190 # Although having detached, volumes should have in active status before deleting.
3191 # We ensure in this loop
3195 while keep_waiting
and elapsed_time
< volume_timeout
:
3196 keep_waiting
= False
3198 # Delete volumes and floating IP.
3199 keep_waiting
= self
._delete
_created
_items
(
3200 created_items
, volumes_to_hold
, keep_waiting
3206 except (nvExceptions
.NotFound
, nvExceptions
.ResourceNotFound
) as e
:
3207 # If VM does not exist, it does not raise
3208 self
.logger
.warning(f
"Error deleting VM: {vm_id} is not found, {str(e)}")
3210 def refresh_vms_status(self
, vm_list
):
3211 """Get the status of the virtual machines and their interfaces/ports
3212 Params: the list of VM identifiers
3213 Returns a dictionary with:
3214 vm_id: #VIM id of this Virtual Machine
3215 status: #Mandatory. Text with one of:
3216 # DELETED (not found at vim)
3217 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3218 # OTHER (Vim reported other status not understood)
3219 # ERROR (VIM indicates an ERROR status)
3220 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3221 # CREATING (on building process), ERROR
3222 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3224 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3225 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3227 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3228 mac_address: #Text format XX:XX:XX:XX:XX:XX
3229 vim_net_id: #network id where this interface is connected
3230 vim_interface_id: #interface/port VIM id
3231 ip_address: #null, or text with IPv4, IPv6 address
3232 compute_node: #identification of compute node where PF,VF interface is allocated
3233 pci: #PCI address of the NIC that hosts the PF,VF
3234 vlan: #physical VLAN used for VF
3238 "refresh_vms status: Getting tenant VM instance information from VIM"
3240 for vm_id
in vm_list
:
3244 vm_vim
= self
.get_vminstance(vm_id
)
3246 if vm_vim
["status"] in vmStatus2manoFormat
:
3247 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3249 vm
["status"] = "OTHER"
3250 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3252 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3253 vm_vim
.pop("user_data", None)
3254 vm
["vim_info"] = self
.serialize(vm_vim
)
3256 vm
["interfaces"] = []
3257 if vm_vim
.get("fault"):
3258 vm
["error_msg"] = str(vm_vim
["fault"])
3262 self
._reload
_connection
()
3263 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3265 for port
in port_dict
["ports"]:
3267 interface
["vim_info"] = self
.serialize(port
)
3268 interface
["mac_address"] = port
.get("mac_address")
3269 interface
["vim_net_id"] = port
["network_id"]
3270 interface
["vim_interface_id"] = port
["id"]
3271 # check if OS-EXT-SRV-ATTR:host is there,
3272 # in case of non-admin credentials, it will be missing
3274 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3275 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3277 interface
["pci"] = None
3279 # check if binding:profile is there,
3280 # in case of non-admin credentials, it will be missing
3281 if port
.get("binding:profile"):
3282 if port
["binding:profile"].get("pci_slot"):
3283 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3285 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3286 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3287 pci
= port
["binding:profile"]["pci_slot"]
3288 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3289 interface
["pci"] = pci
3291 interface
["vlan"] = None
3293 if port
.get("binding:vif_details"):
3294 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3296 # Get vlan from network in case not present in port for those old openstacks and cases where
3297 # it is needed vlan at PT
3298 if not interface
["vlan"]:
3299 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3300 network
= self
.neutron
.show_network(port
["network_id"])
3303 network
["network"].get("provider:network_type")
3306 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3307 interface
["vlan"] = network
["network"].get(
3308 "provider:segmentation_id"
3312 # look for floating ip address
3314 floating_ip_dict
= self
.neutron
.list_floatingips(
3318 if floating_ip_dict
.get("floatingips"):
3320 floating_ip_dict
["floatingips"][0].get(
3321 "floating_ip_address"
3327 for subnet
in port
["fixed_ips"]:
3328 ips
.append(subnet
["ip_address"])
3330 interface
["ip_address"] = ";".join(ips
)
3331 vm
["interfaces"].append(interface
)
3332 except Exception as e
:
3334 "Error getting vm interface information {}: {}".format(
3339 except vimconn
.VimConnNotFoundException
as e
:
3340 self
.logger
.error("Exception getting vm status: %s", str(e
))
3341 vm
["status"] = "DELETED"
3342 vm
["error_msg"] = str(e
)
3343 except vimconn
.VimConnException
as e
:
3344 self
.logger
.error("Exception getting vm status: %s", str(e
))
3345 vm
["status"] = "VIM_ERROR"
3346 vm
["error_msg"] = str(e
)
3352 @catch_any_exception
3353 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3354 """Send and action over a VM instance from VIM
3355 Returns None or the console dict if the action was successfully sent to the VIM
3357 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3358 self
._reload
_connection
()
3359 server
= self
.nova
.servers
.find(id=vm_id
)
3360 if "start" in action_dict
:
3361 if action_dict
["start"] == "rebuild":
3364 if server
.status
== "PAUSED":
3366 elif server
.status
== "SUSPENDED":
3368 elif server
.status
== "SHUTOFF":
3372 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3374 raise vimconn
.VimConnException(
3375 "Cannot 'start' instance while it is in active state",
3376 http_code
=vimconn
.HTTP_Bad_Request
,
3378 elif "pause" in action_dict
:
3380 elif "resume" in action_dict
:
3382 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3383 self
.logger
.debug("server status %s", server
.status
)
3384 if server
.status
== "ACTIVE":
3387 self
.logger
.debug("ERROR: VM is not in Active state")
3388 raise vimconn
.VimConnException(
3389 "VM is not in active state, stop operation is not allowed",
3390 http_code
=vimconn
.HTTP_Bad_Request
,
3392 elif "forceOff" in action_dict
:
3393 server
.stop() # TODO
3394 elif "terminate" in action_dict
:
3396 elif "createImage" in action_dict
:
3397 server
.create_image()
3398 # "path":path_schema,
3399 # "description":description_schema,
3400 # "name":name_schema,
3401 # "metadata":metadata_schema,
3402 # "imageRef": id_schema,
3403 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3404 elif "rebuild" in action_dict
:
3405 server
.rebuild(server
.image
["id"])
3406 elif "reboot" in action_dict
:
3407 server
.reboot() # reboot_type="SOFT"
3408 elif "console" in action_dict
:
3409 console_type
= action_dict
["console"]
3411 if console_type
is None or console_type
== "novnc":
3412 console_dict
= server
.get_vnc_console("novnc")
3413 elif console_type
== "xvpvnc":
3414 console_dict
= server
.get_vnc_console(console_type
)
3415 elif console_type
== "rdp-html5":
3416 console_dict
= server
.get_rdp_console(console_type
)
3417 elif console_type
== "spice-html5":
3418 console_dict
= server
.get_spice_console(console_type
)
3420 raise vimconn
.VimConnException(
3421 "console type '{}' not allowed".format(console_type
),
3422 http_code
=vimconn
.HTTP_Bad_Request
,
3426 console_url
= console_dict
["console"]["url"]
3428 protocol_index
= console_url
.find("//")
3430 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3433 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3438 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3439 raise vimconn
.VimConnException(
3440 "Unexpected response from VIM " + str(console_dict
)
3444 "protocol": console_url
[0:protocol_index
],
3445 "server": console_url
[protocol_index
+ 2 : port_index
],
3446 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3447 "suffix": console_url
[suffix_index
+ 1 :],
3450 return console_dict2
3452 raise vimconn
.VimConnException(
3453 "Unexpected response from VIM " + str(console_dict
)
3458 # ###### VIO Specific Changes #########
3459 def _generate_vlanID(self
):
3461 Method to get unused vlanID
3469 networks
= self
.get_network_list()
3471 for net
in networks
:
3472 if net
.get("provider:segmentation_id"):
3473 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3475 used_vlanIDs
= set(usedVlanIDs
)
3477 # find unused VLAN ID
3478 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3480 start_vlanid
, end_vlanid
= map(
3481 int, vlanID_range
.replace(" ", "").split("-")
3484 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3485 if vlanID
not in used_vlanIDs
:
3487 except Exception as exp
:
3488 raise vimconn
.VimConnException(
3489 "Exception {} occurred while generating VLAN ID.".format(exp
)
3492 raise vimconn
.VimConnConflictException(
3493 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3494 self
.config
.get("dataplane_net_vlan_range")
3498 def _generate_multisegment_vlanID(self
):
3500 Method to get unused vlanID
3508 networks
= self
.get_network_list()
3509 for net
in networks
:
3510 if net
.get("provider:network_type") == "vlan" and net
.get(
3511 "provider:segmentation_id"
3513 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3514 elif net
.get("segments"):
3515 for segment
in net
.get("segments"):
3516 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3517 "provider:segmentation_id"
3519 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3521 used_vlanIDs
= set(usedVlanIDs
)
3523 # find unused VLAN ID
3524 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3526 start_vlanid
, end_vlanid
= map(
3527 int, vlanID_range
.replace(" ", "").split("-")
3530 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3531 if vlanID
not in used_vlanIDs
:
3533 except Exception as exp
:
3534 raise vimconn
.VimConnException(
3535 "Exception {} occurred while generating VLAN ID.".format(exp
)
3538 raise vimconn
.VimConnConflictException(
3539 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3540 self
.config
.get("multisegment_vlan_range")
3544 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3546 Method to validate user given vlanID ranges
3550 for vlanID_range
in input_vlan_range
:
3551 vlan_range
= vlanID_range
.replace(" ", "")
3553 vlanID_pattern
= r
"(\d)*-(\d)*$"
3554 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3556 raise vimconn
.VimConnConflictException(
3557 "Invalid VLAN range for {}: {}.You must provide "
3558 "'{}' in format [start_ID - end_ID].".format(
3559 text_vlan_range
, vlanID_range
, text_vlan_range
3563 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3564 if start_vlanid
<= 0:
3565 raise vimconn
.VimConnConflictException(
3566 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3567 "networks valid IDs are 1 to 4094 ".format(
3568 text_vlan_range
, vlanID_range
3572 if end_vlanid
> 4094:
3573 raise vimconn
.VimConnConflictException(
3574 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3575 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3576 text_vlan_range
, vlanID_range
3580 if start_vlanid
> end_vlanid
:
3581 raise vimconn
.VimConnConflictException(
3582 "Invalid VLAN range for {}: {}. You must provide '{}'"
3583 " in format start_ID - end_ID and start_ID < end_ID ".format(
3584 text_vlan_range
, vlanID_range
, text_vlan_range
3588 def get_hosts_info(self
):
3589 """Get the information of deployed hosts
3590 Returns the hosts content"""
3592 print("osconnector: Getting Host info from VIM")
3596 self
._reload
_connection
()
3597 hypervisors
= self
.nova
.hypervisors
.list()
3599 for hype
in hypervisors
:
3600 h_list
.append(hype
.to_dict())
3602 return 1, {"hosts": h_list
}
3603 except nvExceptions
.NotFound
as e
:
3604 error_value
= -vimconn
.HTTP_Not_Found
3605 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3606 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3607 error_value
= -vimconn
.HTTP_Bad_Request
3611 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3614 # TODO insert exception vimconn.HTTP_Unauthorized
3615 # if reaching here is because an exception
3616 self
.logger
.debug("get_hosts_info " + error_text
)
3618 return error_value
, error_text
3620 def get_hosts(self
, vim_tenant
):
3621 """Get the hosts and deployed instances
3622 Returns the hosts content"""
3623 r
, hype_dict
= self
.get_hosts_info()
3628 hypervisors
= hype_dict
["hosts"]
3631 servers
= self
.nova
.servers
.list()
3632 for hype
in hypervisors
:
3633 for server
in servers
:
3635 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3636 == hype
["hypervisor_hostname"]
3639 hype
["vm"].append(server
.id)
3641 hype
["vm"] = [server
.id]
3644 except nvExceptions
.NotFound
as e
:
3645 error_value
= -vimconn
.HTTP_Not_Found
3646 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3647 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3648 error_value
= -vimconn
.HTTP_Bad_Request
3652 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3655 # TODO insert exception vimconn.HTTP_Unauthorized
3656 # if reaching here is because an exception
3657 self
.logger
.debug("get_hosts " + error_text
)
3659 return error_value
, error_text
3661 @catch_any_exception
3662 def new_affinity_group(self
, affinity_group_data
):
3663 """Adds a server group to VIM
3664 affinity_group_data contains a dictionary with information, keys:
3665 name: name in VIM for the server group
3666 type: affinity or anti-affinity
3667 scope: Only nfvi-node allowed
3668 Returns the server group identifier"""
3669 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
3670 name
= affinity_group_data
["name"]
3671 policy
= affinity_group_data
["type"]
3672 self
._reload
_connection
()
3673 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
3674 return new_server_group
.id
3676 @catch_any_exception
3677 def get_affinity_group(self
, affinity_group_id
):
3678 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3679 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
3680 self
._reload
_connection
()
3681 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
3682 return server_group
.to_dict()
3684 @catch_any_exception
3685 def delete_affinity_group(self
, affinity_group_id
):
3686 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3687 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
3688 self
._reload
_connection
()
3689 self
.nova
.server_groups
.delete(affinity_group_id
)
3690 return affinity_group_id
3692 @catch_any_exception
3693 def get_vdu_state(self
, vm_id
, host_is_required
=False) -> list:
3694 """Getting the state of a VDU.
3696 vm_id (str): ID of an instance
3697 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3698 and if this is set to True, it raises KeyError.
3700 vdu_data (list): VDU details including state, flavor, host_info, AZ
3702 self
.logger
.debug("Getting the status of VM")
3703 self
.logger
.debug("VIM VM ID %s", vm_id
)
3704 self
._reload
_connection
()
3705 server_dict
= self
._find
_nova
_server
(vm_id
)
3706 srv_attr
= "OS-EXT-SRV-ATTR:host"
3708 server_dict
[srv_attr
] if host_is_required
else server_dict
.get(srv_attr
)
3711 server_dict
["status"],
3712 server_dict
["flavor"]["id"],
3714 server_dict
["OS-EXT-AZ:availability_zone"],
3716 self
.logger
.debug("vdu_data %s", vdu_data
)
3719 def check_compute_availability(self
, host
, server_flavor_details
):
3720 self
._reload
_connection
()
3721 hypervisor_search
= self
.nova
.hypervisors
.search(
3722 hypervisor_match
=host
, servers
=True
3724 for hypervisor
in hypervisor_search
:
3725 hypervisor_id
= hypervisor
.to_dict()["id"]
3726 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
3727 hypervisor_dict
= hypervisor_details
.to_dict()
3728 hypervisor_temp
= json
.dumps(hypervisor_dict
)
3729 hypervisor_json
= json
.loads(hypervisor_temp
)
3730 resources_available
= [
3731 hypervisor_json
["free_ram_mb"],
3732 hypervisor_json
["disk_available_least"],
3733 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
3735 compute_available
= all(
3736 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
3738 if compute_available
:
3741 def check_availability_zone(
3742 self
, old_az
, server_flavor_details
, old_host
, host
=None
3744 self
._reload
_connection
()
3745 az_check
= {"zone_check": False, "compute_availability": None}
3746 aggregates_list
= self
.nova
.aggregates
.list()
3747 for aggregate
in aggregates_list
:
3748 aggregate_details
= aggregate
.to_dict()
3749 aggregate_temp
= json
.dumps(aggregate_details
)
3750 aggregate_json
= json
.loads(aggregate_temp
)
3751 if aggregate_json
["availability_zone"] == old_az
:
3752 hosts_list
= aggregate_json
["hosts"]
3753 if host
is not None:
3754 if host
in hosts_list
:
3755 az_check
["zone_check"] = True
3756 available_compute_id
= self
.check_compute_availability(
3757 host
, server_flavor_details
3759 if available_compute_id
is not None:
3760 az_check
["compute_availability"] = available_compute_id
3762 for check_host
in hosts_list
:
3763 if check_host
!= old_host
:
3764 available_compute_id
= self
.check_compute_availability(
3765 check_host
, server_flavor_details
3767 if available_compute_id
is not None:
3768 az_check
["zone_check"] = True
3769 az_check
["compute_availability"] = available_compute_id
3772 az_check
["zone_check"] = True
3775 @catch_any_exception
3776 def migrate_instance(self
, vm_id
, compute_host
=None):
3780 vm_id: ID of an instance
3781 compute_host: Host to migrate the vdu to
3783 self
._reload
_connection
()
3785 instance_state
= self
.get_vdu_state(vm_id
, host_is_required
=True)
3786 server_flavor_id
= instance_state
[1]
3787 server_hypervisor_name
= instance_state
[2]
3788 server_availability_zone
= instance_state
[3]
3789 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
3790 server_flavor_details
= [
3791 server_flavor
["ram"],
3792 server_flavor
["disk"],
3793 server_flavor
["vcpus"],
3795 if compute_host
== server_hypervisor_name
:
3796 raise vimconn
.VimConnException(
3797 "Unable to migrate instance '{}' to the same host '{}'".format(
3800 http_code
=vimconn
.HTTP_Bad_Request
,
3802 az_status
= self
.check_availability_zone(
3803 server_availability_zone
,
3804 server_flavor_details
,
3805 server_hypervisor_name
,
3808 availability_zone_check
= az_status
["zone_check"]
3809 available_compute_id
= az_status
.get("compute_availability")
3811 if availability_zone_check
is False:
3812 raise vimconn
.VimConnException(
3813 "Unable to migrate instance '{}' to a different availability zone".format(
3816 http_code
=vimconn
.HTTP_Bad_Request
,
3818 if available_compute_id
is not None:
3819 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3820 self
.nova
.servers
.live_migrate(
3822 host
=available_compute_id
,
3823 block_migration
=True,
3826 changed_compute_host
= ""
3827 if state
== "MIGRATING":
3828 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3829 changed_compute_host
= self
.get_vdu_state(vm_id
, host_is_required
=True)[
3832 if vm_state
and changed_compute_host
== available_compute_id
:
3834 "Instance '{}' migrated to the new compute host '{}'".format(
3835 vm_id
, changed_compute_host
3838 return state
, available_compute_id
3840 raise vimconn
.VimConnException(
3841 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3842 vm_id
, available_compute_id
3844 http_code
=vimconn
.HTTP_Bad_Request
,
3847 raise vimconn
.VimConnException(
3848 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3849 available_compute_id
3851 http_code
=vimconn
.HTTP_Bad_Request
,
3854 @catch_any_exception
3855 def resize_instance(self
, vm_id
, new_flavor_id
):
3857 For resizing the vm based on the given
3860 vm_id : ID of an instance
3861 new_flavor_id : Flavor id to be resized
3862 Return the status of a resized instance
3864 self
._reload
_connection
()
3865 self
.logger
.debug("resize the flavor of an instance")
3866 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
3867 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
3868 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
3869 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
3870 if old_flavor_disk
> new_flavor_disk
:
3871 raise nvExceptions
.BadRequest(
3873 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
3876 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
3877 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
3879 instance_resized_status
= self
.confirm_resize(vm_id
)
3880 return instance_resized_status
3882 raise nvExceptions
.BadRequest(
3884 message
="Cannot 'resize' vm_state is in ERROR",
3888 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3889 raise nvExceptions
.BadRequest(
3891 message
="Cannot 'resize' instance while it is in vm_state resized",
3894 def confirm_resize(self
, vm_id
):
3896 Confirm the resize of an instance
3898 vm_id: ID of an instance
3900 self
._reload
_connection
()
3901 self
.nova
.servers
.confirm_resize(server
=vm_id
)
3902 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
3903 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3904 instance_status
= self
.get_vdu_state(vm_id
)[0]
3905 return instance_status
3907 def get_monitoring_data(self
):
3909 self
.logger
.debug("Getting servers and ports data from Openstack VIMs.")
3910 self
._reload
_connection
()
3911 all_servers
= self
.nova
.servers
.list(detailed
=True)
3913 for server
in all_servers
:
3914 if server
.flavor
.get("original_name"):
3915 server
.flavor
["id"] = self
.nova
.flavors
.find(
3916 name
=server
.flavor
["original_name"]
3918 except nClient
.exceptions
.NotFound
as e
:
3919 self
.logger
.warning(str(e
.message
))
3920 all_ports
= self
.neutron
.list_ports()
3921 return all_servers
, all_ports
3922 except Exception as e
:
3923 raise vimconn
.VimConnException(
3924 f
"Exception in monitoring while getting VMs and ports status: {str(e)}"