1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 import cinderclient
.exceptions
as cExceptions
45 from glanceclient
import client
as glClient
46 import glanceclient
.exc
as gl1Exceptions
47 from keystoneauth1
import session
48 from keystoneauth1
.identity
import v2
, v3
49 import keystoneclient
.exceptions
as ksExceptions
50 import keystoneclient
.v2_0
.client
as ksClient_v2
51 import keystoneclient
.v3
.client
as ksClient_v3
53 from neutronclient
.common
import exceptions
as neExceptions
54 from neutronclient
.neutron
import client
as neClient
55 from novaclient
import client
as nClient
, exceptions
as nvExceptions
56 from osm_ro_plugin
import vimconn
57 from requests
.exceptions
import ConnectionError
60 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
61 __date__
= "$22-sep-2017 23:59:59$"
63 """contain the openstack virtual machine status to openmano status"""
64 vmStatus2manoFormat
= {
67 "SUSPENDED": "SUSPENDED",
68 "SHUTOFF": "INACTIVE",
73 netStatus2manoFormat
= {
76 "INACTIVE": "INACTIVE",
82 supportedClassificationTypes
= ["legacy_flow_classifier"]
84 # global var to have a timeout creating and deleting volumes
89 def catch_any_exception(func
):
90 def format_exception(*args
, **kwargs
):
92 return func(*args
, *kwargs
)
93 except Exception as e
:
94 vimconnector
._format
_exception
(e
)
96 return format_exception
99 class SafeDumper(yaml
.SafeDumper
):
100 def represent_data(self
, data
):
101 # Openstack APIs use custom subclasses of dict and YAML safe dumper
102 # is designed to not handle that (reference issue 142 of pyyaml)
103 if isinstance(data
, dict) and data
.__class
__ != dict:
104 # A simple solution is to convert those items back to dicts
105 data
= dict(data
.items())
107 return super(SafeDumper
, self
).represent_data(data
)
110 class vimconnector(vimconn
.VimConnector
):
125 """using common constructor parameters. In this case
126 'url' is the keystone authorization url,
127 'url_admin' is not use
129 api_version
= config
.get("APIversion")
131 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
132 raise vimconn
.VimConnException(
133 "Invalid value '{}' for config:APIversion. "
134 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
137 vim_type
= config
.get("vim_type")
139 if vim_type
and vim_type
not in ("vio", "VIO"):
140 raise vimconn
.VimConnException(
141 "Invalid value '{}' for config:vim_type."
142 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
145 if config
.get("dataplane_net_vlan_range") is not None:
146 # validate vlan ranges provided by user
147 self
._validate
_vlan
_ranges
(
148 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
151 if config
.get("multisegment_vlan_range") is not None:
152 # validate vlan ranges provided by user
153 self
._validate
_vlan
_ranges
(
154 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
157 vimconn
.VimConnector
.__init
__(
171 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
172 raise vimconn
.VimConnException(
173 "options insecure and ca_cert are mutually exclusive"
178 if self
.config
.get("insecure"):
181 if self
.config
.get("ca_cert"):
182 self
.verify
= self
.config
.get("ca_cert")
185 raise TypeError("url param can not be NoneType")
187 self
.persistent_info
= persistent_info
188 self
.availability_zone
= persistent_info
.get("availability_zone", None)
189 self
.storage_availability_zone
= None
190 self
.vm_av_zone
= None
191 self
.session
= persistent_info
.get("session", {"reload_client": True})
192 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
193 self
.nova
= self
.session
.get("nova")
194 self
.neutron
= self
.session
.get("neutron")
195 self
.cinder
= self
.session
.get("cinder")
196 self
.glance
= self
.session
.get("glance")
197 # self.glancev1 = self.session.get("glancev1")
198 self
.keystone
= self
.session
.get("keystone")
199 self
.api_version3
= self
.session
.get("api_version3")
200 self
.vim_type
= self
.config
.get("vim_type")
203 self
.vim_type
= self
.vim_type
.upper()
205 if self
.config
.get("use_internal_endpoint"):
206 self
.endpoint_type
= "internalURL"
208 self
.endpoint_type
= None
210 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
211 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
212 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
213 self
.logger
= logging
.getLogger("ro.vim.openstack")
215 # allow security_groups to be a list or a single string
216 if isinstance(self
.config
.get("security_groups"), str):
217 self
.config
["security_groups"] = [self
.config
["security_groups"]]
219 self
.security_groups_id
= None
221 # ###### VIO Specific Changes #########
222 if self
.vim_type
== "VIO":
223 self
.logger
= logging
.getLogger("ro.vim.vio")
226 self
.logger
.setLevel(getattr(logging
, log_level
))
228 def __getitem__(self
, index
):
229 """Get individuals parameters.
231 if index
== "project_domain_id":
232 return self
.config
.get("project_domain_id")
233 elif index
== "user_domain_id":
234 return self
.config
.get("user_domain_id")
236 return vimconn
.VimConnector
.__getitem
__(self
, index
)
238 def __setitem__(self
, index
, value
):
239 """Set individuals parameters and it is marked as dirty so to force connection reload.
241 if index
== "project_domain_id":
242 self
.config
["project_domain_id"] = value
243 elif index
== "user_domain_id":
244 self
.config
["user_domain_id"] = value
246 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
248 self
.session
["reload_client"] = True
250 def serialize(self
, value
):
251 """Serialization of python basic types.
253 In the case value is not serializable a message will be logged and a
254 simple representation of the data that cannot be converted back to
257 if isinstance(value
, str):
262 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
264 except yaml
.representer
.RepresenterError
:
266 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
273 def _reload_connection(self
):
274 """Called before any operation, it check if credentials has changed
275 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
277 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
278 if self
.session
["reload_client"]:
279 if self
.config
.get("APIversion"):
280 self
.api_version3
= (
281 self
.config
["APIversion"] == "v3.3"
282 or self
.config
["APIversion"] == "3"
284 else: # get from ending auth_url that end with v3 or with v2.0
285 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
289 self
.session
["api_version3"] = self
.api_version3
291 if self
.api_version3
:
292 if self
.config
.get("project_domain_id") or self
.config
.get(
293 "project_domain_name"
295 project_domain_id_default
= None
297 project_domain_id_default
= "default"
299 if self
.config
.get("user_domain_id") or self
.config
.get(
302 user_domain_id_default
= None
304 user_domain_id_default
= "default"
308 password
=self
.passwd
,
309 project_name
=self
.tenant_name
,
310 project_id
=self
.tenant_id
,
311 project_domain_id
=self
.config
.get(
312 "project_domain_id", project_domain_id_default
314 user_domain_id
=self
.config
.get(
315 "user_domain_id", user_domain_id_default
317 project_domain_name
=self
.config
.get("project_domain_name"),
318 user_domain_name
=self
.config
.get("user_domain_name"),
324 password
=self
.passwd
,
325 tenant_name
=self
.tenant_name
,
326 tenant_id
=self
.tenant_id
,
329 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
330 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
331 # Titanium cloud and StarlingX
332 region_name
= self
.config
.get("region_name")
334 if self
.api_version3
:
335 self
.keystone
= ksClient_v3
.Client(
337 endpoint_type
=self
.endpoint_type
,
338 region_name
=region_name
,
341 self
.keystone
= ksClient_v2
.Client(
342 session
=sess
, endpoint_type
=self
.endpoint_type
345 self
.session
["keystone"] = self
.keystone
346 # In order to enable microversion functionality an explicit microversion must be specified in "config".
347 # This implementation approach is due to the warning message in
348 # https://developer.openstack.org/api-guide/compute/microversions.html
349 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
350 # always require an specific microversion.
351 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
352 version
= self
.config
.get("microversion")
357 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
358 # Titanium cloud and StarlingX
359 self
.nova
= self
.session
["nova"] = nClient
.Client(
362 endpoint_type
=self
.endpoint_type
,
363 region_name
=region_name
,
365 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
368 endpoint_type
=self
.endpoint_type
,
369 region_name
=region_name
,
372 if sess
.get_all_version_data(service_type
="volumev2"):
373 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
376 endpoint_type
=self
.endpoint_type
,
377 region_name
=region_name
,
380 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
383 endpoint_type
=self
.endpoint_type
,
384 region_name
=region_name
,
388 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
390 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
392 if self
.endpoint_type
== "internalURL":
393 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
394 glance_endpoint
= self
.keystone
.endpoints
.list(
395 glance_service_id
, interface
="internal"
398 glance_endpoint
= None
400 self
.glance
= self
.session
["glance"] = glClient
.Client(
401 2, session
=sess
, endpoint
=glance_endpoint
403 # using version 1 of glance client in new_image()
404 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
405 # endpoint=glance_endpoint)
406 self
.session
["reload_client"] = False
407 self
.persistent_info
["session"] = self
.session
408 # add availablity zone info inside self.persistent_info
409 self
._set
_availablity
_zones
()
410 self
.persistent_info
["availability_zone"] = self
.availability_zone
411 # force to get again security_groups_ids next time they are needed
412 self
.security_groups_id
= None
414 def __net_os2mano(self
, net_list_dict
):
415 """Transform the net openstack format to mano format
416 net_list_dict can be a list of dict or a single dict"""
417 if type(net_list_dict
) is dict:
418 net_list_
= (net_list_dict
,)
419 elif type(net_list_dict
) is list:
420 net_list_
= net_list_dict
422 raise TypeError("param net_list_dict must be a list or a dictionary")
423 for net
in net_list_
:
424 if net
.get("provider:network_type") == "vlan":
427 net
["type"] = "bridge"
429 def __classification_os2mano(self
, class_list_dict
):
430 """Transform the openstack format (Flow Classifier) to mano format
431 (Classification) class_list_dict can be a list of dict or a single dict
433 if isinstance(class_list_dict
, dict):
434 class_list_
= [class_list_dict
]
435 elif isinstance(class_list_dict
, list):
436 class_list_
= class_list_dict
438 raise TypeError("param class_list_dict must be a list or a dictionary")
439 for classification
in class_list_
:
440 id = classification
.pop("id")
441 name
= classification
.pop("name")
442 description
= classification
.pop("description")
443 project_id
= classification
.pop("project_id")
444 tenant_id
= classification
.pop("tenant_id")
445 original_classification
= copy
.deepcopy(classification
)
446 classification
.clear()
447 classification
["ctype"] = "legacy_flow_classifier"
448 classification
["definition"] = original_classification
449 classification
["id"] = id
450 classification
["name"] = name
451 classification
["description"] = description
452 classification
["project_id"] = project_id
453 classification
["tenant_id"] = tenant_id
455 def __sfi_os2mano(self
, sfi_list_dict
):
456 """Transform the openstack format (Port Pair) to mano format (SFI)
457 sfi_list_dict can be a list of dict or a single dict
459 if isinstance(sfi_list_dict
, dict):
460 sfi_list_
= [sfi_list_dict
]
461 elif isinstance(sfi_list_dict
, list):
462 sfi_list_
= sfi_list_dict
464 raise TypeError("param sfi_list_dict must be a list or a dictionary")
466 for sfi
in sfi_list_
:
467 sfi
["ingress_ports"] = []
468 sfi
["egress_ports"] = []
470 if sfi
.get("ingress"):
471 sfi
["ingress_ports"].append(sfi
["ingress"])
473 if sfi
.get("egress"):
474 sfi
["egress_ports"].append(sfi
["egress"])
478 params
= sfi
.get("service_function_parameters")
482 correlation
= params
.get("correlation")
487 sfi
["sfc_encap"] = sfc_encap
488 del sfi
["service_function_parameters"]
490 def __sf_os2mano(self
, sf_list_dict
):
491 """Transform the openstack format (Port Pair Group) to mano format (SF)
492 sf_list_dict can be a list of dict or a single dict
494 if isinstance(sf_list_dict
, dict):
495 sf_list_
= [sf_list_dict
]
496 elif isinstance(sf_list_dict
, list):
497 sf_list_
= sf_list_dict
499 raise TypeError("param sf_list_dict must be a list or a dictionary")
502 del sf
["port_pair_group_parameters"]
503 sf
["sfis"] = sf
["port_pairs"]
506 def __sfp_os2mano(self
, sfp_list_dict
):
507 """Transform the openstack format (Port Chain) to mano format (SFP)
508 sfp_list_dict can be a list of dict or a single dict
510 if isinstance(sfp_list_dict
, dict):
511 sfp_list_
= [sfp_list_dict
]
512 elif isinstance(sfp_list_dict
, list):
513 sfp_list_
= sfp_list_dict
515 raise TypeError("param sfp_list_dict must be a list or a dictionary")
517 for sfp
in sfp_list_
:
518 params
= sfp
.pop("chain_parameters")
522 correlation
= params
.get("correlation")
527 sfp
["sfc_encap"] = sfc_encap
528 sfp
["spi"] = sfp
.pop("chain_id")
529 sfp
["classifications"] = sfp
.pop("flow_classifiers")
530 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
532 # placeholder for now; read TODO note below
533 def _validate_classification(self
, type, definition
):
534 # only legacy_flow_classifier Type is supported at this point
536 # TODO(igordcard): this method should be an abstract method of an
537 # abstract Classification class to be implemented by the specific
538 # Types. Also, abstract vimconnector should call the validation
539 # method before the implemented VIM connectors are called.
542 def _format_exception(exception
):
543 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
544 message_error
= str(exception
)
550 neExceptions
.NetworkNotFoundClient
,
551 nvExceptions
.NotFound
,
552 nvExceptions
.ResourceNotFound
,
553 ksExceptions
.NotFound
,
554 gl1Exceptions
.HTTPNotFound
,
555 cExceptions
.NotFound
,
558 raise vimconn
.VimConnNotFoundException(
559 type(exception
).__name
__ + ": " + message_error
565 gl1Exceptions
.HTTPException
,
566 gl1Exceptions
.CommunicationError
,
568 ksExceptions
.ConnectionError
,
569 neExceptions
.ConnectionFailed
,
570 cExceptions
.ConnectionError
,
573 if type(exception
).__name
__ == "SSLError":
574 tip
= " (maybe option 'insecure' must be added to the VIM)"
576 raise vimconn
.VimConnConnectionException(
577 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
583 nvExceptions
.BadRequest
,
584 ksExceptions
.BadRequest
,
585 gl1Exceptions
.BadRequest
,
586 cExceptions
.BadRequest
,
589 if message_error
== "OS-EXT-SRV-ATTR:host":
590 tip
= " (If the user does not have non-admin credentials, this attribute will be missing)"
591 raise vimconn
.VimConnInsufficientCredentials(
592 type(exception
).__name
__ + ": " + message_error
+ tip
594 raise vimconn
.VimConnException(
595 type(exception
).__name
__ + ": " + message_error
601 nvExceptions
.ClientException
,
602 ksExceptions
.ClientException
,
603 neExceptions
.NeutronException
,
604 cExceptions
.ClientException
,
607 raise vimconn
.VimConnUnexpectedResponse(
608 type(exception
).__name
__ + ": " + message_error
610 elif isinstance(exception
, nvExceptions
.Conflict
):
611 raise vimconn
.VimConnConflictException(
612 type(exception
).__name
__ + ": " + message_error
614 elif isinstance(exception
, vimconn
.VimConnException
):
617 logger
= logging
.getLogger("ro.vim.openstack")
618 logger
.error("General Exception " + message_error
, exc_info
=True)
620 raise vimconn
.VimConnException(
621 type(exception
).__name
__ + ": " + message_error
624 def _get_ids_from_name(self
):
626 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
629 # get tenant_id if only tenant_name is supplied
630 self
._reload
_connection
()
632 if not self
.my_tenant_id
:
633 raise vimconn
.VimConnConnectionException(
634 "Error getting tenant information from name={} id={}".format(
635 self
.tenant_name
, self
.tenant_id
639 if self
.config
.get("security_groups") and not self
.security_groups_id
:
640 # convert from name to id
641 neutron_sg_list
= self
.neutron
.list_security_groups(
642 tenant_id
=self
.my_tenant_id
645 self
.security_groups_id
= []
646 for sg
in self
.config
.get("security_groups"):
647 for neutron_sg
in neutron_sg_list
:
648 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
649 self
.security_groups_id
.append(neutron_sg
["id"])
652 self
.security_groups_id
= None
654 raise vimconn
.VimConnConnectionException(
655 "Not found security group {} for this tenant".format(sg
)
658 def _find_nova_server(self
, vm_id
):
660 Returns the VM instance from Openstack and completes it with flavor ID
661 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
664 self
._reload
_connection
()
665 server
= self
.nova
.servers
.find(id=vm_id
)
666 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
667 server_dict
= server
.to_dict()
669 if server_dict
["flavor"].get("original_name"):
670 server_dict
["flavor"]["id"] = self
.nova
.flavors
.find(
671 name
=server_dict
["flavor"]["original_name"]
673 except nClient
.exceptions
.NotFound
as e
:
674 self
.logger
.warning(str(e
.message
))
677 ksExceptions
.ClientException
,
678 nvExceptions
.ClientException
,
679 nvExceptions
.NotFound
,
682 self
._format
_exception
(e
)
684 def check_vim_connectivity(self
):
685 # just get network list to check connectivity and credentials
686 self
.get_network_list(filter_dict
={})
688 def get_tenant_list(self
, filter_dict
={}):
689 """Obtain tenants of VIM
690 filter_dict can contain the following keys:
691 name: filter by tenant name
692 id: filter by tenant uuid/id
694 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
696 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
698 self
._reload
_connection
()
700 if self
.api_version3
:
701 project_class_list
= self
.keystone
.projects
.list(
702 name
=filter_dict
.get("name")
705 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
709 for project
in project_class_list
:
710 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
713 project_list
.append(project
.to_dict())
717 ksExceptions
.ConnectionError
,
718 ksExceptions
.ClientException
,
721 self
._format
_exception
(e
)
723 def new_tenant(self
, tenant_name
, tenant_description
):
724 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
725 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
727 self
._reload
_connection
()
729 if self
.api_version3
:
730 project
= self
.keystone
.projects
.create(
732 self
.config
.get("project_domain_id", "default"),
733 description
=tenant_description
,
737 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
741 ksExceptions
.ConnectionError
,
742 ksExceptions
.ClientException
,
743 ksExceptions
.BadRequest
,
746 self
._format
_exception
(e
)
748 def delete_tenant(self
, tenant_id
):
749 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
750 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
752 self
._reload
_connection
()
754 if self
.api_version3
:
755 self
.keystone
.projects
.delete(tenant_id
)
757 self
.keystone
.tenants
.delete(tenant_id
)
762 ksExceptions
.ConnectionError
,
763 ksExceptions
.ClientException
,
764 ksExceptions
.NotFound
,
767 self
._format
_exception
(e
)
775 provider_network_profile
=None,
777 """Adds a tenant network to VIM
779 'net_name': name of the network
781 'bridge': overlay isolated network
782 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
783 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
784 'ip_profile': is a dict containing the IP parameters of the network
785 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
786 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
787 'gateway_address': (Optional) ip_schema, that is X.X.X.X
788 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
789 'dhcp_enabled': True or False
790 'dhcp_start_address': ip_schema, first IP to grant
791 'dhcp_count': number of IPs to grant.
792 'shared': if this network can be seen/use by other tenants/organization
793 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
794 physical-network: physnet-label}
795 Returns a tuple with the network identifier and created_items, or raises an exception on error
796 created_items can be None or a dictionary where this method can include key-values that will be passed to
797 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
798 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
802 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
804 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
809 if provider_network_profile
:
810 vlan
= provider_network_profile
.get("segmentation-id")
814 self
._reload
_connection
()
815 network_dict
= {"name": net_name
, "admin_state_up": True}
817 if net_type
in ("data", "ptp") or provider_network_profile
:
818 provider_physical_network
= None
820 if provider_network_profile
and provider_network_profile
.get(
823 provider_physical_network
= provider_network_profile
.get(
827 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
828 # or not declared, just ignore the checking
831 self
.config
.get("dataplane_physical_net"), (tuple, list)
833 and provider_physical_network
834 not in self
.config
["dataplane_physical_net"]
836 raise vimconn
.VimConnConflictException(
837 "Invalid parameter 'provider-network:physical-network' "
838 "for network creation. '{}' is not one of the declared "
839 "list at VIM_config:dataplane_physical_net".format(
840 provider_physical_network
844 # use the default dataplane_physical_net
845 if not provider_physical_network
:
846 provider_physical_network
= self
.config
.get(
847 "dataplane_physical_net"
850 # if it is non-empty list, use the first value. If it is a string use the value directly
852 isinstance(provider_physical_network
, (tuple, list))
853 and provider_physical_network
855 provider_physical_network
= provider_physical_network
[0]
857 if not provider_physical_network
:
858 raise vimconn
.VimConnConflictException(
859 "missing information needed for underlay networks. Provide "
860 "'dataplane_physical_net' configuration at VIM or use the NS "
861 "instantiation parameter 'provider-network.physical-network'"
865 if not self
.config
.get("multisegment_support"):
866 network_dict
["provider:physical_network"] = (
867 provider_physical_network
871 provider_network_profile
872 and "network-type" in provider_network_profile
874 network_dict
["provider:network_type"] = (
875 provider_network_profile
["network-type"]
878 network_dict
["provider:network_type"] = self
.config
.get(
879 "dataplane_network_type", "vlan"
883 network_dict
["provider:segmentation_id"] = vlan
888 "provider:physical_network": "",
889 "provider:network_type": "vxlan",
891 segment_list
.append(segment1_dict
)
893 "provider:physical_network": provider_physical_network
,
894 "provider:network_type": "vlan",
898 segment2_dict
["provider:segmentation_id"] = vlan
899 elif self
.config
.get("multisegment_vlan_range"):
900 vlanID
= self
._generate
_multisegment
_vlanID
()
901 segment2_dict
["provider:segmentation_id"] = vlanID
904 # raise vimconn.VimConnConflictException(
905 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
907 segment_list
.append(segment2_dict
)
908 network_dict
["segments"] = segment_list
910 # VIO Specific Changes. It needs a concrete VLAN
911 if self
.vim_type
== "VIO" and vlan
is None:
912 if self
.config
.get("dataplane_net_vlan_range") is None:
913 raise vimconn
.VimConnConflictException(
914 "You must provide 'dataplane_net_vlan_range' in format "
915 "[start_ID - end_ID] at VIM_config for creating underlay "
919 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
921 network_dict
["shared"] = shared
923 if self
.config
.get("disable_network_port_security"):
924 network_dict
["port_security_enabled"] = False
926 if self
.config
.get("neutron_availability_zone_hints"):
927 hints
= self
.config
.get("neutron_availability_zone_hints")
929 if isinstance(hints
, str):
932 network_dict
["availability_zone_hints"] = hints
934 new_net
= self
.neutron
.create_network({"network": network_dict
})
936 # create subnetwork, even if there is no profile
941 if not ip_profile
.get("subnet_address"):
942 # Fake subnet is required
943 subnet_rand
= random
.SystemRandom().randint(0, 255)
944 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
946 if "ip_version" not in ip_profile
:
947 ip_profile
["ip_version"] = "IPv4"
950 "name": net_name
+ "-subnet",
951 "network_id": new_net
["network"]["id"],
952 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
953 "cidr": ip_profile
["subnet_address"],
956 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
957 if ip_profile
.get("gateway_address"):
958 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
960 subnet
["gateway_ip"] = None
962 if ip_profile
.get("dns_address"):
963 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
965 if "dhcp_enabled" in ip_profile
:
966 subnet
["enable_dhcp"] = (
968 if ip_profile
["dhcp_enabled"] == "false"
969 or ip_profile
["dhcp_enabled"] is False
973 if ip_profile
.get("dhcp_start_address"):
974 subnet
["allocation_pools"] = []
975 subnet
["allocation_pools"].append(dict())
976 subnet
["allocation_pools"][0]["start"] = ip_profile
[
980 if ip_profile
.get("dhcp_count"):
981 # parts = ip_profile["dhcp_start_address"].split(".")
982 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
983 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
984 ip_int
+= ip_profile
["dhcp_count"] - 1
985 ip_str
= str(netaddr
.IPAddress(ip_int
))
986 subnet
["allocation_pools"][0]["end"] = ip_str
989 ip_profile
.get("ipv6_address_mode")
990 and ip_profile
["ip_version"] != "IPv4"
992 subnet
["ipv6_address_mode"] = ip_profile
["ipv6_address_mode"]
993 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
994 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
995 subnet
["ipv6_ra_mode"] = ip_profile
["ipv6_address_mode"]
997 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
998 self
.neutron
.create_subnet({"subnet": subnet
})
1000 if net_type
== "data" and self
.config
.get("multisegment_support"):
1001 if self
.config
.get("l2gw_support"):
1002 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
1003 for l2gw
in l2gw_list
:
1005 "l2_gateway_id": l2gw
["id"],
1006 "network_id": new_net
["network"]["id"],
1007 "segmentation_id": str(vlanID
),
1009 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
1010 {"l2_gateway_connection": l2gw_conn
}
1014 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
1017 return new_net
["network"]["id"], created_items
1018 except Exception as e
:
1019 # delete l2gw connections (if any) before deleting the network
1020 for k
, v
in created_items
.items():
1021 if not v
: # skip already deleted
1025 k_item
, _
, k_id
= k
.partition(":")
1027 if k_item
== "l2gwconn":
1028 self
.neutron
.delete_l2_gateway_connection(k_id
)
1030 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e2
:
1032 "Error deleting l2 gateway connection: {}: {}".format(
1033 type(e2
).__name
__, e2
1036 self
._format
_exception
(e2
)
1037 except Exception as e2
:
1039 "Error deleting l2 gateway connection: {}: {}".format(
1040 type(e2
).__name
__, e2
1045 self
.neutron
.delete_network(new_net
["network"]["id"])
1047 self
._format
_exception
(e
)
1049 def get_network_list(self
, filter_dict
={}):
1050 """Obtain tenant networks of VIM
1056 admin_state_up: boolean
1058 Returns the network list of dictionaries
1060 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
1062 self
._reload
_connection
()
1063 filter_dict_os
= filter_dict
.copy()
1065 if self
.api_version3
and "tenant_id" in filter_dict_os
:
1067 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
1069 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
1070 net_list
= net_dict
["networks"]
1071 self
.__net
_os
2mano
(net_list
)
1075 neExceptions
.ConnectionFailed
,
1076 ksExceptions
.ClientException
,
1077 neExceptions
.NeutronException
,
1080 self
._format
_exception
(e
)
1082 def get_network(self
, net_id
):
1083 """Obtain details of network from VIM
1084 Returns the network information from a network id"""
1085 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1086 filter_dict
= {"id": net_id
}
1087 net_list
= self
.get_network_list(filter_dict
)
1089 if len(net_list
) == 0:
1090 raise vimconn
.VimConnNotFoundException(
1091 "Network '{}' not found".format(net_id
)
1093 elif len(net_list
) > 1:
1094 raise vimconn
.VimConnConflictException(
1095 "Found more than one network with this criteria"
1100 for subnet_id
in net
.get("subnets", ()):
1102 subnet
= self
.neutron
.show_subnet(subnet_id
)
1103 except Exception as e
:
1105 "osconnector.get_network(): Error getting subnet %s %s"
1108 subnet
= {"id": subnet_id
, "fault": str(e
)}
1110 subnets
.append(subnet
)
1112 net
["subnets"] = subnets
1113 net
["encapsulation"] = net
.get("provider:network_type")
1114 net
["encapsulation_type"] = net
.get("provider:network_type")
1115 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1116 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1120 @catch_any_exception
1121 def delete_network(self
, net_id
, created_items
=None):
1123 Removes a tenant network from VIM and its associated elements
1124 :param net_id: VIM identifier of the network, provided by method new_network
1125 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1126 Returns the network identifier or raises an exception upon error or when network is not found
1128 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1130 if created_items
is None:
1134 self
._reload
_connection
()
1135 # delete l2gw connections (if any) before deleting the network
1136 for k
, v
in created_items
.items():
1137 if not v
: # skip already deleted
1141 k_item
, _
, k_id
= k
.partition(":")
1142 if k_item
== "l2gwconn":
1143 self
.neutron
.delete_l2_gateway_connection(k_id
)
1145 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
1147 "Error deleting l2 gateway connection: {}: {}".format(
1151 self
._format
_exception
(e
)
1152 except Exception as e
:
1154 "Error deleting l2 gateway connection: {}: {}".format(
1159 # delete VM ports attached to this networks before the network
1160 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1161 for p
in ports
["ports"]:
1163 self
.neutron
.delete_port(p
["id"])
1165 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
1166 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1167 # If there is connection error, it raises.
1168 self
._format
_exception
(e
)
1169 except Exception as e
:
1170 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1172 self
.neutron
.delete_network(net_id
)
1175 except (neExceptions
.NetworkNotFoundClient
, neExceptions
.NotFound
) as e
:
1176 # If network to be deleted is not found, it does not raise.
1177 self
.logger
.warning(
1178 f
"Error deleting network: {net_id} is not found, {str(e)}"
1181 def refresh_nets_status(self
, net_list
):
1182 """Get the status of the networks
1183 Params: the list of network identifiers
1184 Returns a dictionary with:
1185 net_id: #VIM id of this network
1186 status: #Mandatory. Text with one of:
1187 # DELETED (not found at vim)
1188 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1189 # OTHER (Vim reported other status not understood)
1190 # ERROR (VIM indicates an ERROR status)
1191 # ACTIVE, INACTIVE, DOWN (admin down),
1192 # BUILD (on building process)
1194 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1195 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1199 for net_id
in net_list
:
1203 net_vim
= self
.get_network(net_id
)
1205 if net_vim
["status"] in netStatus2manoFormat
:
1206 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1208 net
["status"] = "OTHER"
1209 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1211 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1212 net
["status"] = "DOWN"
1214 net
["vim_info"] = self
.serialize(net_vim
)
1216 if net_vim
.get("fault"): # TODO
1217 net
["error_msg"] = str(net_vim
["fault"])
1218 except vimconn
.VimConnNotFoundException
as e
:
1219 self
.logger
.error("Exception getting net status: %s", str(e
))
1220 net
["status"] = "DELETED"
1221 net
["error_msg"] = str(e
)
1222 except vimconn
.VimConnException
as e
:
1223 self
.logger
.error("Exception getting net status: %s", str(e
))
1224 net
["status"] = "VIM_ERROR"
1225 net
["error_msg"] = str(e
)
1226 net_dict
[net_id
] = net
1229 def get_flavor(self
, flavor_id
):
1230 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1231 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1233 self
._reload
_connection
()
1234 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1235 return flavor
.to_dict()
1238 nvExceptions
.NotFound
,
1239 nvExceptions
.ClientException
,
1240 ksExceptions
.ClientException
,
1243 self
._format
_exception
(e
)
1245 def get_flavor_id_from_data(self
, flavor_dict
):
1246 """Obtain flavor id that match the flavor description
1247 Returns the flavor_id or raises a vimconnNotFoundException
1248 flavor_dict: contains the required ram, vcpus, disk
1249 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1250 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1251 vimconnNotFoundException is raised
1253 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1256 self
._reload
_connection
()
1257 flavor_candidate_id
= None
1258 flavor_candidate_data
= (10000, 10000, 10000)
1261 flavor_dict
["vcpus"],
1262 flavor_dict
["disk"],
1263 flavor_dict
.get("ephemeral", 0),
1264 flavor_dict
.get("swap", 0),
1267 extended
= flavor_dict
.get("extended", {})
1270 raise vimconn
.VimConnNotFoundException(
1271 "Flavor with EPA still not implemented"
1273 # if len(numas) > 1:
1274 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1276 # numas = extended.get("numas")
1277 for flavor
in self
.nova
.flavors
.list():
1278 epa
= flavor
.get_keys()
1289 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1291 if flavor_data
== flavor_target
:
1295 and flavor_target
< flavor_data
< flavor_candidate_data
1297 flavor_candidate_id
= flavor
.id
1298 flavor_candidate_data
= flavor_data
1300 if not exact_match
and flavor_candidate_id
:
1301 return flavor_candidate_id
1303 raise vimconn
.VimConnNotFoundException(
1304 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1307 nvExceptions
.NotFound
,
1308 nvExceptions
.BadRequest
,
1309 nvExceptions
.ClientException
,
1310 ksExceptions
.ClientException
,
1313 self
._format
_exception
(e
)
1316 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1317 """Process resource quota and fill up extra_specs.
1319 quota (dict): Keeping the quota of resurces
1321 extra_specs (dict) Dict to be filled to be used during flavor creation
1324 if "limit" in quota
:
1325 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1327 if "reserve" in quota
:
1328 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1330 if "shares" in quota
:
1331 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1332 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1335 def process_numa_memory(
1336 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1338 """Set the memory in extra_specs.
1340 numa (dict): A dictionary which includes numa information
1341 node_id (int): ID of numa node
1342 extra_specs (dict): To be filled.
1345 if not numa
.get("memory"):
1347 memory_mb
= numa
["memory"] * 1024
1348 memory
= "hw:numa_mem.{}".format(node_id
)
1349 extra_specs
[memory
] = int(memory_mb
)
1352 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1353 """Set the cpu in extra_specs.
1355 numa (dict): A dictionary which includes numa information
1356 node_id (int): ID of numa node
1357 extra_specs (dict): To be filled.
1360 if not numa
.get("vcpu"):
1363 cpu
= "hw:numa_cpus.{}".format(node_id
)
1364 vcpu
= ",".join(map(str, vcpu
))
1365 extra_specs
[cpu
] = vcpu
1368 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1369 """Fill up extra_specs if numa has paired-threads.
1371 numa (dict): A dictionary which includes numa information
1372 extra_specs (dict): To be filled.
1375 threads (int) Number of virtual cpus
1378 if not numa
.get("paired-threads"):
1381 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1382 threads
= numa
["paired-threads"] * 2
1383 extra_specs
["hw:cpu_thread_policy"] = "require"
1384 extra_specs
["hw:cpu_policy"] = "dedicated"
1388 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1389 """Fill up extra_specs if numa has cores.
1391 numa (dict): A dictionary which includes numa information
1392 extra_specs (dict): To be filled.
1395 cores (int) Number of virtual cpus
1398 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1399 # architecture, or a non-SMT architecture will be emulated
1400 if not numa
.get("cores"):
1402 cores
= numa
["cores"]
1403 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1404 extra_specs
["hw:cpu_policy"] = "dedicated"
1408 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1409 """Fill up extra_specs if numa has threads.
1411 numa (dict): A dictionary which includes numa information
1412 extra_specs (dict): To be filled.
1415 threads (int) Number of virtual cpus
1418 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1419 if not numa
.get("threads"):
1421 threads
= numa
["threads"]
1422 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1423 extra_specs
["hw:cpu_policy"] = "dedicated"
1426 def _process_numa_parameters_of_flavor(
1427 self
, numas
: List
, extra_specs
: Dict
1429 """Process numa parameters and fill up extra_specs.
1432 numas (list): List of dictionary which includes numa information
1433 extra_specs (dict): To be filled.
1436 numa_nodes
= len(numas
)
1437 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1438 cpu_cores
, cpu_threads
= 0, 0
1440 if self
.vim_type
== "VIO":
1441 self
.process_vio_numa_nodes(numa_nodes
, extra_specs
)
1445 node_id
= numa
["id"]
1446 # overwrite ram and vcpus
1447 # check if key "memory" is present in numa else use ram value at flavor
1448 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1449 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1451 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1452 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1454 if "paired-threads" in numa
:
1455 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1456 cpu_threads
+= threads
1458 elif "cores" in numa
:
1459 cores
= self
.process_numa_cores(numa
, extra_specs
)
1462 elif "threads" in numa
:
1463 threads
= self
.process_numa_threads(numa
, extra_specs
)
1464 cpu_threads
+= threads
1467 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1469 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1472 def process_vio_numa_nodes(numa_nodes
: int, extra_specs
: Dict
) -> None:
1473 """According to number of numa nodes, updates the extra_specs for VIO.
1477 numa_nodes (int): List keeps the numa node numbers
1478 extra_specs (dict): Extra specs dict to be updated
1481 # If there are several numas, we do not define specific affinity.
1482 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1484 def _change_flavor_name(
1485 self
, name
: str, name_suffix
: int, flavor_data
: dict
1487 """Change the flavor name if the name already exists.
1490 name (str): Flavor name to be checked
1491 name_suffix (int): Suffix to be appended to name
1492 flavor_data (dict): Flavor dict
1495 name (str): New flavor name to be used
1499 fl
= self
.nova
.flavors
.list()
1500 fl_names
= [f
.name
for f
in fl
]
1502 while name
in fl_names
:
1504 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1508 def _process_extended_config_of_flavor(
1509 self
, extended
: dict, extra_specs
: dict
1511 """Process the extended dict to fill up extra_specs.
1514 extended (dict): Keeping the extra specification of flavor
1515 extra_specs (dict) Dict to be filled to be used during flavor creation
1520 "mem-quota": "memory",
1522 "disk-io-quota": "disk_io",
1530 "PREFER_LARGE": "any",
1534 "cpu-pinning-policy": "hw:cpu_policy",
1535 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1536 "mem-policy": "hw:numa_mempolicy",
1539 numas
= extended
.get("numas")
1541 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1543 for quota
, item
in quotas
.items():
1544 if quota
in extended
.keys():
1545 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1547 # Set the mempage size as specified in the descriptor
1548 if extended
.get("mempage-size"):
1549 if extended
["mempage-size"] in page_sizes
.keys():
1550 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1552 # Normally, validations in NBI should not allow to this condition.
1554 "Invalid mempage-size %s. Will be ignored",
1555 extended
.get("mempage-size"),
1558 for policy
, hw_policy
in policies
.items():
1559 if extended
.get(policy
):
1560 extra_specs
[hw_policy
] = extended
[policy
].lower()
1563 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1564 """Returns the details of flavor
1566 flavor_data (dict): Dictionary that includes required flavor details
1569 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1573 flavor_data
.get("ram", 64),
1574 flavor_data
.get("vcpus", 1),
1576 flavor_data
.get("extended"),
1579 @catch_any_exception
1580 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1581 """Adds a tenant flavor to openstack VIM.
1582 if change_name_if_used is True, it will change name in case of conflict,
1583 because it is not supported name repetition.
1586 flavor_data (dict): Flavor details to be processed
1587 change_name_if_used (bool): Change name in case of conflict
1590 flavor_id (str): flavor identifier
1593 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1597 name
= flavor_data
["name"]
1598 while retry
< max_retries
:
1601 self
._reload
_connection
()
1603 if change_name_if_used
:
1604 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1606 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1610 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1614 new_flavor
= self
.nova
.flavors
.create(
1618 disk
=flavor_data
.get("disk", 0),
1619 ephemeral
=flavor_data
.get("ephemeral", 0),
1620 swap
=flavor_data
.get("swap", 0),
1621 is_public
=flavor_data
.get("is_public", True),
1626 new_flavor
.set_keys(extra_specs
)
1628 return new_flavor
.id
1630 except nvExceptions
.Conflict
as e
:
1631 if change_name_if_used
and retry
< max_retries
:
1634 self
._format
_exception
(e
)
1636 @catch_any_exception
1637 def delete_flavor(self
, flavor_id
):
1638 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1640 self
._reload
_connection
()
1641 self
.nova
.flavors
.delete(flavor_id
)
1644 except (nvExceptions
.NotFound
, nvExceptions
.ResourceNotFound
) as e
:
1645 # If flavor is not found, it does not raise.
1646 self
.logger
.warning(
1647 f
"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
1650 def new_image(self
, image_dict
):
1652 Adds a tenant image to VIM. imge_dict is a dictionary with:
1654 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1655 location: path or URI
1656 public: "yes" or "no"
1657 metadata: metadata of the image
1658 Returns the image_id
1663 while retry
< max_retries
:
1666 self
._reload
_connection
()
1668 # determine format http://docs.openstack.org/developer/glance/formats.html
1669 if "disk_format" in image_dict
:
1670 disk_format
= image_dict
["disk_format"]
1671 else: # autodiscover based on extension
1672 if image_dict
["location"].endswith(".qcow2"):
1673 disk_format
= "qcow2"
1674 elif image_dict
["location"].endswith(".vhd"):
1676 elif image_dict
["location"].endswith(".vmdk"):
1677 disk_format
= "vmdk"
1678 elif image_dict
["location"].endswith(".vdi"):
1680 elif image_dict
["location"].endswith(".iso"):
1682 elif image_dict
["location"].endswith(".aki"):
1684 elif image_dict
["location"].endswith(".ari"):
1686 elif image_dict
["location"].endswith(".ami"):
1692 "new_image: '%s' loading from '%s'",
1694 image_dict
["location"],
1696 if self
.vim_type
== "VIO":
1697 container_format
= "bare"
1698 if "container_format" in image_dict
:
1699 container_format
= image_dict
["container_format"]
1701 new_image
= self
.glance
.images
.create(
1702 name
=image_dict
["name"],
1703 container_format
=container_format
,
1704 disk_format
=disk_format
,
1707 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1709 if image_dict
["location"].startswith("http"):
1710 # TODO there is not a method to direct download. It must be downloaded locally with requests
1711 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1713 with
open(image_dict
["location"]) as fimage
:
1714 self
.glance
.images
.upload(new_image
.id, fimage
)
1715 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1716 # image_dict.get("public","yes")=="yes",
1717 # container_format="bare", data=fimage, disk_format=disk_format)
1719 metadata_to_load
= image_dict
.get("metadata")
1721 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1723 if self
.vim_type
== "VIO":
1724 metadata_to_load
["upload_location"] = image_dict
["location"]
1726 metadata_to_load
["location"] = image_dict
["location"]
1728 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1733 gl1Exceptions
.HTTPException
,
1734 gl1Exceptions
.CommunicationError
,
1737 if retry
== max_retries
:
1740 self
._format
_exception
(e
)
1741 except IOError as e
: # can not open the file
1742 raise vimconn
.VimConnConnectionException(
1743 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1744 http_code
=vimconn
.HTTP_Bad_Request
,
1746 except Exception as e
:
1747 self
._format
_exception
(e
)
1749 @catch_any_exception
1750 def delete_image(self
, image_id
):
1751 """Deletes a tenant image from openstack VIM. Returns the old id"""
1753 self
._reload
_connection
()
1754 self
.glance
.images
.delete(image_id
)
1757 except gl1Exceptions
.NotFound
as e
:
1758 # If image is not found, it does not raise.
1759 self
.logger
.warning(
1760 f
"Error deleting image: {image_id} is not found, {str(e)}"
1763 @catch_any_exception
1764 def get_image_id_from_path(self
, path
):
1765 """Get the image id from image path in the VIM database. Returns the image_id"""
1766 self
._reload
_connection
()
1767 images
= self
.glance
.images
.list()
1769 for image
in images
:
1770 if image
.metadata
.get("location") == path
:
1773 raise vimconn
.VimConnNotFoundException(
1774 "image with location '{}' not found".format(path
)
1777 def get_image_list(self
, filter_dict
={}):
1778 """Obtain tenant images from VIM
1782 checksum: image checksum
1783 Returns the image list of dictionaries:
1784 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1787 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1789 self
._reload
_connection
()
1790 # filter_dict_os = filter_dict.copy()
1791 # First we filter by the available filter fields: name, id. The others are removed.
1792 image_list
= self
.glance
.images
.list()
1795 for image
in image_list
:
1797 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1800 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1804 filter_dict
.get("checksum")
1805 and image
["checksum"] != filter_dict
["checksum"]
1809 filtered_list
.append(image
.copy())
1810 except gl1Exceptions
.HTTPNotFound
:
1813 return filtered_list
1816 ksExceptions
.ClientException
,
1817 nvExceptions
.ClientException
,
1818 gl1Exceptions
.CommunicationError
,
1821 self
._format
_exception
(e
)
1823 def __wait_for_vm(self
, vm_id
, status
):
1824 """wait until vm is in the desired status and return True.
1825 If the VM gets in ERROR status, return false.
1826 If the timeout is reached generate an exception"""
1828 while elapsed_time
< server_timeout
:
1829 vm_status
= self
.nova
.servers
.get(vm_id
).status
1831 if vm_status
== status
:
1834 if vm_status
== "ERROR":
1840 # if we exceeded the timeout rollback
1841 if elapsed_time
>= server_timeout
:
1842 raise vimconn
.VimConnException(
1843 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1844 http_code
=vimconn
.HTTP_Request_Timeout
,
1847 def _get_openstack_availablity_zones(self
):
1849 Get from openstack availability zones available
1853 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1854 openstack_availability_zone
= [
1856 for zone
in openstack_availability_zone
1857 if zone
.zoneName
!= "internal"
1860 return openstack_availability_zone
1864 def _set_availablity_zones(self
):
1866 Set vim availablity zone
1869 if "availability_zone" in self
.config
:
1870 vim_availability_zones
= self
.config
.get("availability_zone")
1872 if isinstance(vim_availability_zones
, str):
1873 self
.availability_zone
= [vim_availability_zones
]
1874 elif isinstance(vim_availability_zones
, list):
1875 self
.availability_zone
= vim_availability_zones
1877 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1878 if "storage_availability_zone" in self
.config
:
1879 self
.storage_availability_zone
= self
.config
.get(
1880 "storage_availability_zone"
1883 def _get_vm_availability_zone(
1884 self
, availability_zone_index
, availability_zone_list
1887 Return thge availability zone to be used by the created VM.
1888 :return: The VIM availability zone to be used or None
1890 if availability_zone_index
is None:
1891 if not self
.config
.get("availability_zone"):
1893 elif isinstance(self
.config
.get("availability_zone"), str):
1894 return self
.config
["availability_zone"]
1896 # TODO consider using a different parameter at config for default AV and AV list match
1897 return self
.config
["availability_zone"][0]
1899 vim_availability_zones
= self
.availability_zone
1900 # check if VIM offer enough availability zones describe in the VNFD
1901 if vim_availability_zones
and len(availability_zone_list
) <= len(
1902 vim_availability_zones
1904 # check if all the names of NFV AV match VIM AV names
1905 match_by_index
= False
1906 for av
in availability_zone_list
:
1907 if av
not in vim_availability_zones
:
1908 match_by_index
= True
1912 return vim_availability_zones
[availability_zone_index
]
1914 return availability_zone_list
[availability_zone_index
]
1916 raise vimconn
.VimConnConflictException(
1917 "No enough availability zones at VIM for this deployment"
1920 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1921 """Fill up the security_groups in the port_dict.
1924 net (dict): Network details
1925 port_dict (dict): Port details
1929 self
.config
.get("security_groups")
1930 and net
.get("port_security") is not False
1931 and not self
.config
.get("no_port_security_extension")
1933 if not self
.security_groups_id
:
1934 self
._get
_ids
_from
_name
()
1936 port_dict
["security_groups"] = self
.security_groups_id
1938 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1939 """Fill up the network binding depending on network type in the port_dict.
1942 net (dict): Network details
1943 port_dict (dict): Port details
1946 if not net
.get("type"):
1947 raise vimconn
.VimConnException("Type is missing in the network details.")
1949 if net
["type"] == "virtual":
1953 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1954 port_dict
["binding:vnic_type"] = "direct"
1956 # VIO specific Changes
1957 if self
.vim_type
== "VIO":
1958 # Need to create port with port_security_enabled = False and no-security-groups
1959 port_dict
["port_security_enabled"] = False
1960 port_dict
["provider_security_groups"] = []
1961 port_dict
["security_groups"] = []
1964 # For PT PCI-PASSTHROUGH
1965 port_dict
["binding:vnic_type"] = "direct-physical"
1968 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1969 """Set the "ip" parameter in net dictionary.
1972 new_port (dict): New created port
1973 net (dict): Network details
1976 fixed_ips
= new_port
["port"].get("fixed_ips")
1979 net
["ip"] = fixed_ips
[0].get("ip_address")
1984 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1985 """Fill up the mac_address and fixed_ips in port_dict.
1988 net (dict): Network details
1989 port_dict (dict): Port details
1992 if net
.get("mac_address"):
1993 port_dict
["mac_address"] = net
["mac_address"]
1996 if ip_list
:= net
.get("ip_address"):
1997 if not isinstance(ip_list
, list):
2000 ip_dict
= {"ip_address": ip
}
2001 ip_dual_list
.append(ip_dict
)
2002 port_dict
["fixed_ips"] = ip_dual_list
2003 # TODO add "subnet_id": <subnet_id>
2005 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
2006 """Create new port using neutron.
2009 port_dict (dict): Port details
2010 created_items (dict): All created items
2011 net (dict): Network details
2014 new_port (dict): New created port
2017 new_port
= self
.neutron
.create_port({"port": port_dict
})
2018 created_items
["port:" + str(new_port
["port"]["id"])] = True
2019 net
["mac_address"] = new_port
["port"]["mac_address"]
2020 net
["vim_id"] = new_port
["port"]["id"]
2025 self
, net
: dict, name
: str, created_items
: dict
2026 ) -> Tuple
[dict, dict]:
2027 """Create port using net details.
2030 net (dict): Network details
2031 name (str): Name to be used as network name if net dict does not include name
2032 created_items (dict): All created items
2035 new_port, port New created port, port dictionary
2040 "network_id": net
["net_id"],
2041 "name": net
.get("name"),
2042 "admin_state_up": True,
2045 if not port_dict
["name"]:
2046 port_dict
["name"] = name
2048 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
2050 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
2052 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
2054 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
2056 vimconnector
._set
_fixed
_ip
(new_port
, net
)
2058 port
= {"port-id": new_port
["port"]["id"]}
2060 if float(self
.nova
.api_version
.get_string()) >= 2.32:
2061 port
["tag"] = new_port
["port"]["name"]
2063 return new_port
, port
2065 def _prepare_network_for_vminstance(
2069 created_items
: dict,
2071 external_network
: list,
2072 no_secured_ports
: list,
2074 """Create port and fill up net dictionary for new VM instance creation.
2077 name (str): Name of network
2078 net_list (list): List of networks
2079 created_items (dict): All created items belongs to a VM
2080 net_list_vim (list): List of ports
2081 external_network (list): List of external-networks
2082 no_secured_ports (list): Port security disabled ports
2085 self
._reload
_connection
()
2087 for net
in net_list
:
2088 # Skip non-connected iface
2089 if not net
.get("net_id"):
2092 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2094 net_list_vim
.append(port
)
2096 if net
.get("floating_ip", False):
2097 net
["exit_on_floating_ip_error"] = True
2098 external_network
.append(net
)
2100 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2101 net
["exit_on_floating_ip_error"] = False
2102 external_network
.append(net
)
2103 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2105 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2106 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2107 if net
.get("port_security") is False and not self
.config
.get(
2108 "no_port_security_extension"
2110 no_secured_ports
.append(
2112 new_port
["port"]["id"],
2113 net
.get("port_security_disable_strategy"),
2117 def _prepare_persistent_root_volumes(
2120 storage_av_zone
: list,
2122 base_disk_index
: int,
2123 block_device_mapping
: dict,
2124 existing_vim_volumes
: list,
2125 created_items
: dict,
2127 """Prepare persistent root volumes for new VM instance.
2130 name (str): Name of VM instance
2131 storage_av_zone (list): Storage of availability zones
2132 disk (dict): Disk details
2133 base_disk_index (int): Disk index
2134 block_device_mapping (dict): Block device details
2135 existing_vim_volumes (list): Existing disk details
2136 created_items (dict): All created items belongs to VM
2139 boot_volume_id (str): ID of boot volume
2142 # Disk may include only vim_volume_id or only vim_id."
2143 # Use existing persistent root volume finding with volume_id or vim_id
2144 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2145 if disk
.get(key_id
):
2146 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2147 existing_vim_volumes
.append({"id": disk
[key_id
]})
2149 # Create persistent root volume
2150 volume
= self
.cinder
.volumes
.create(
2152 name
=name
+ "vd" + chr(base_disk_index
),
2153 imageRef
=disk
["image_id"],
2154 # Make sure volume is in the same AZ as the VM to be attached to
2155 availability_zone
=storage_av_zone
,
2157 boot_volume_id
= volume
.id
2158 self
.update_block_device_mapping(
2160 block_device_mapping
=block_device_mapping
,
2161 base_disk_index
=base_disk_index
,
2163 created_items
=created_items
,
2166 return boot_volume_id
2169 def update_block_device_mapping(
2171 block_device_mapping
: dict,
2172 base_disk_index
: int,
2174 created_items
: dict,
2176 """Add volume information to block device mapping dict.
2178 volume (object): Created volume object
2179 block_device_mapping (dict): Block device details
2180 base_disk_index (int): Disk index
2181 disk (dict): Disk details
2182 created_items (dict): All created items belongs to VM
2185 raise vimconn
.VimConnException("Volume is empty.")
2187 if not hasattr(volume
, "id"):
2188 raise vimconn
.VimConnException(
2189 "Created volume is not valid, does not have id attribute."
2192 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2193 if disk
.get("multiattach"): # multiattach volumes do not belong to VDUs
2195 volume_txt
= "volume:" + str(volume
.id)
2196 if disk
.get("keep"):
2197 volume_txt
+= ":keep"
2198 created_items
[volume_txt
] = True
2200 @catch_any_exception
2201 def new_shared_volumes(self
, shared_volume_data
) -> (str, str):
2202 availability_zone
= (
2203 self
.storage_availability_zone
2204 if self
.storage_availability_zone
2205 else self
.vm_av_zone
2207 volume
= self
.cinder
.volumes
.create(
2208 size
=shared_volume_data
["size"],
2209 name
=shared_volume_data
["name"],
2210 volume_type
="multiattach",
2211 availability_zone
=availability_zone
,
2213 return volume
.name
, volume
.id
2215 def _prepare_shared_volumes(
2219 base_disk_index
: int,
2220 block_device_mapping
: dict,
2221 existing_vim_volumes
: list,
2222 created_items
: dict,
2224 volumes
= {volume
.name
: volume
.id for volume
in self
.cinder
.volumes
.list()}
2225 if volumes
.get(disk
["name"]):
2226 sv_id
= volumes
[disk
["name"]]
2229 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2232 volume
= self
.cinder
.volumes
.get(sv_id
)
2233 vol_status
= volume
.status
2234 if volume
.status
not in ("in-use", "available"):
2237 self
.update_block_device_mapping(
2239 block_device_mapping
=block_device_mapping
,
2240 base_disk_index
=base_disk_index
,
2242 created_items
=created_items
,
2245 raise vimconn
.VimConnException(
2246 "Shared volume is not prepared, status is: {}".format(vol_status
),
2247 http_code
=vimconn
.HTTP_Internal_Server_Error
,
2250 def _prepare_non_root_persistent_volumes(
2254 storage_av_zone
: list,
2255 block_device_mapping
: dict,
2256 base_disk_index
: int,
2257 existing_vim_volumes
: list,
2258 created_items
: dict,
2260 """Prepare persistent volumes for new VM instance.
2263 name (str): Name of VM instance
2264 disk (dict): Disk details
2265 storage_av_zone (list): Storage of availability zones
2266 block_device_mapping (dict): Block device details
2267 base_disk_index (int): Disk index
2268 existing_vim_volumes (list): Existing disk details
2269 created_items (dict): All created items belongs to VM
2271 # Non-root persistent volumes
2272 # Disk may include only vim_volume_id or only vim_id."
2273 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2274 if disk
.get(key_id
):
2275 # Use existing persistent volume
2276 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2277 existing_vim_volumes
.append({"id": disk
[key_id
]})
2279 volume_name
= f
"{name}vd{chr(base_disk_index)}"
2280 volume
= self
.cinder
.volumes
.create(
2283 # Make sure volume is in the same AZ as the VM to be attached to
2284 availability_zone
=storage_av_zone
,
2286 self
.update_block_device_mapping(
2288 block_device_mapping
=block_device_mapping
,
2289 base_disk_index
=base_disk_index
,
2291 created_items
=created_items
,
2294 def _wait_for_created_volumes_availability(
2295 self
, elapsed_time
: int, created_items
: dict
2297 """Wait till created volumes become available.
2300 elapsed_time (int): Passed time while waiting
2301 created_items (dict): All created items belongs to VM
2304 elapsed_time (int): Time spent while waiting
2307 while elapsed_time
< volume_timeout
:
2308 for created_item
in created_items
:
2310 created_item
.split(":")[0],
2311 created_item
.split(":")[1],
2314 volume
= self
.cinder
.volumes
.get(volume_id
)
2316 volume
.volume_type
== "multiattach"
2317 and volume
.status
== "in-use"
2320 elif volume
.status
!= "available":
2323 # All ready: break from while
2331 def _wait_for_existing_volumes_availability(
2332 self
, elapsed_time
: int, existing_vim_volumes
: list
2334 """Wait till existing volumes become available.
2337 elapsed_time (int): Passed time while waiting
2338 existing_vim_volumes (list): Existing volume details
2341 elapsed_time (int): Time spent while waiting
2345 while elapsed_time
< volume_timeout
:
2346 for volume
in existing_vim_volumes
:
2347 v
= self
.cinder
.volumes
.get(volume
["id"])
2348 if v
.volume_type
== "multiattach" and v
.status
== "in-use":
2350 elif v
.status
!= "available":
2352 else: # all ready: break from while
2360 def _prepare_disk_for_vminstance(
2363 existing_vim_volumes
: list,
2364 created_items
: dict,
2365 storage_av_zone
: list,
2366 block_device_mapping
: dict,
2367 disk_list
: list = None,
2369 """Prepare all volumes for new VM instance.
2372 name (str): Name of Instance
2373 existing_vim_volumes (list): List of existing volumes
2374 created_items (dict): All created items belongs to VM
2375 storage_av_zone (list): Storage availability zone
2376 block_device_mapping (dict): Block devices to be attached to VM
2377 disk_list (list): List of disks
2380 # Create additional volumes in case these are present in disk_list
2381 base_disk_index
= ord("b")
2382 boot_volume_id
= None
2384 for disk
in disk_list
:
2385 if "image_id" in disk
:
2386 # Root persistent volume
2387 base_disk_index
= ord("a")
2388 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2390 storage_av_zone
=storage_av_zone
,
2392 base_disk_index
=base_disk_index
,
2393 block_device_mapping
=block_device_mapping
,
2394 existing_vim_volumes
=existing_vim_volumes
,
2395 created_items
=created_items
,
2397 elif disk
.get("multiattach"):
2398 self
._prepare
_shared
_volumes
(
2401 base_disk_index
=base_disk_index
,
2402 block_device_mapping
=block_device_mapping
,
2403 existing_vim_volumes
=existing_vim_volumes
,
2404 created_items
=created_items
,
2407 # Non-root persistent volume
2408 self
._prepare
_non
_root
_persistent
_volumes
(
2411 storage_av_zone
=storage_av_zone
,
2412 block_device_mapping
=block_device_mapping
,
2413 base_disk_index
=base_disk_index
,
2414 existing_vim_volumes
=existing_vim_volumes
,
2415 created_items
=created_items
,
2417 base_disk_index
+= 1
2419 # Wait until created volumes are with status available
2420 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2421 elapsed_time
, created_items
2423 # Wait until existing volumes in vim are with status available
2424 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2425 elapsed_time
, existing_vim_volumes
2427 # If we exceeded the timeout rollback
2428 if elapsed_time
>= volume_timeout
:
2429 raise vimconn
.VimConnException(
2430 "Timeout creating volumes for instance " + name
,
2431 http_code
=vimconn
.HTTP_Request_Timeout
,
2434 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2436 def _find_the_external_network_for_floating_ip(self
):
2437 """Get the external network ip in order to create floating IP.
2440 pool_id (str): External network pool ID
2444 # Find the external network
2445 external_nets
= list()
2447 for net
in self
.neutron
.list_networks()["networks"]:
2448 if net
["router:external"]:
2449 external_nets
.append(net
)
2451 if len(external_nets
) == 0:
2452 raise vimconn
.VimConnException(
2453 "Cannot create floating_ip automatically since "
2454 "no external network is present",
2455 http_code
=vimconn
.HTTP_Conflict
,
2458 if len(external_nets
) > 1:
2459 raise vimconn
.VimConnException(
2460 "Cannot create floating_ip automatically since "
2461 "multiple external networks are present",
2462 http_code
=vimconn
.HTTP_Conflict
,
2466 return external_nets
[0].get("id")
2468 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2469 """Trigger neutron to create a new floating IP using external network ID.
2472 param (dict): Input parameters to create a floating IP
2473 created_items (dict): All created items belongs to new VM instance
2480 self
.logger
.debug("Creating floating IP")
2481 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2482 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2483 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2485 except Exception as e
:
2486 raise vimconn
.VimConnException(
2487 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2488 http_code
=vimconn
.HTTP_Conflict
,
2491 def _create_floating_ip(
2492 self
, floating_network
: dict, server
: object, created_items
: dict
2494 """Get the available Pool ID and create a new floating IP.
2497 floating_network (dict): Dict including external network ID
2498 server (object): Server object
2499 created_items (dict): All created items belongs to new VM instance
2503 # Pool_id is available
2505 isinstance(floating_network
["floating_ip"], str)
2506 and floating_network
["floating_ip"].lower() != "true"
2508 pool_id
= floating_network
["floating_ip"]
2512 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2516 "floating_network_id": pool_id
,
2517 "tenant_id": server
.tenant_id
,
2521 self
._neutron
_create
_float
_ip
(param
, created_items
)
2523 def _find_floating_ip(
2527 floating_network
: dict,
2529 """Find the available free floating IPs if there are.
2532 server (object): Server object
2533 floating_ips (list): List of floating IPs
2534 floating_network (dict): Details of floating network such as ID
2537 free_floating_ip (str): Free floating ip address
2540 for fip
in floating_ips
:
2541 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2544 if isinstance(floating_network
["floating_ip"], str):
2545 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2550 def _assign_floating_ip(
2551 self
, free_floating_ip
: str, floating_network
: dict
2553 """Assign the free floating ip address to port.
2556 free_floating_ip (str): Floating IP to be assigned
2557 floating_network (dict): ID of floating network
2560 fip (dict) (dict): Floating ip details
2563 # The vim_id key contains the neutron.port_id
2564 self
.neutron
.update_floatingip(
2566 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2568 # For race condition ensure not re-assigned to other VM after 5 seconds
2571 return self
.neutron
.show_floatingip(free_floating_ip
)
2573 def _get_free_floating_ip(
2574 self
, server
: object, floating_network
: dict
2576 """Get the free floating IP address.
2579 server (object): Server Object
2580 floating_network (dict): Floating network details
2583 free_floating_ip (str): Free floating ip addr
2587 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2590 random
.shuffle(floating_ips
)
2592 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2594 def _prepare_external_network_for_vminstance(
2596 external_network
: list,
2598 created_items
: dict,
2599 vm_start_time
: float,
2601 """Assign floating IP address for VM instance.
2604 external_network (list): ID of External network
2605 server (object): Server Object
2606 created_items (dict): All created items belongs to new VM instance
2607 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2613 for floating_network
in external_network
:
2616 floating_ip_retries
= 3
2617 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2620 free_floating_ip
= self
._get
_free
_floating
_ip
(
2621 server
, floating_network
2624 if not free_floating_ip
:
2625 self
._create
_floating
_ip
(
2626 floating_network
, server
, created_items
2630 # For race condition ensure not already assigned
2631 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2633 if fip
["floatingip"].get("port_id"):
2636 # Assign floating ip
2637 fip
= self
._assign
_floating
_ip
(
2638 free_floating_ip
, floating_network
2641 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2642 self
.logger
.warning(
2643 "floating_ip {} re-assigned to other port".format(
2650 "Assigned floating_ip {} to VM {}".format(
2651 free_floating_ip
, server
.id
2657 except Exception as e
:
2658 # Openstack need some time after VM creation to assign an IP. So retry if fails
2659 vm_status
= self
.nova
.servers
.get(server
.id).status
2661 if vm_status
not in ("ACTIVE", "ERROR"):
2662 if time
.time() - vm_start_time
< server_timeout
:
2665 elif floating_ip_retries
> 0:
2666 floating_ip_retries
-= 1
2669 raise vimconn
.VimConnException(
2670 "Cannot create floating_ip: {} {}".format(
2673 http_code
=vimconn
.HTTP_Conflict
,
2676 except Exception as e
:
2677 if not floating_network
["exit_on_floating_ip_error"]:
2678 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2683 def _update_port_security_for_vminstance(
2685 no_secured_ports
: list,
2688 """Updates the port security according to no_secured_ports list.
2691 no_secured_ports (list): List of ports that security will be disabled
2692 server (object): Server Object
2698 # Wait until the VM is active and then disable the port-security
2699 if no_secured_ports
:
2700 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2702 for port
in no_secured_ports
:
2704 "port": {"port_security_enabled": False, "security_groups": None}
2707 if port
[1] == "allow-address-pairs":
2709 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2713 self
.neutron
.update_port(port
[0], port_update
)
2716 raise vimconn
.VimConnException(
2717 "It was not possible to disable port security for port {}".format(
2729 affinity_group_list
: list,
2733 availability_zone_index
=None,
2734 availability_zone_list
=None,
2736 """Adds a VM instance to VIM.
2739 name (str): name of VM
2740 description (str): description
2741 start (bool): indicates if VM must start or boot in pause mode. Ignored
2742 image_id (str) image uuid
2743 flavor_id (str) flavor uuid
2744 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2745 net_list (list): list of interfaces, each one is a dictionary with:
2746 name: name of network
2747 net_id: network uuid to connect
2748 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2749 model: interface model, ignored #TODO
2750 mac_address: used for SR-IOV ifaces #TODO for other types
2751 use: 'data', 'bridge', 'mgmt'
2752 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2753 vim_id: filled/added by this function
2754 floating_ip: True/False (or it can be None)
2755 port_security: True/False
2756 cloud_config (dict): (optional) dictionary with:
2757 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2758 users: (optional) list of users to be inserted, each item is a dict with:
2759 name: (mandatory) user name,
2760 key-pairs: (optional) list of strings with the public key to be inserted to the user
2761 user-data: (optional) string is a text script to be passed directly to cloud-init
2762 config-files: (optional). List of files to be transferred. Each item is a dict with:
2763 dest: (mandatory) string with the destination absolute path
2764 encoding: (optional, by default text). Can be one of:
2765 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2766 content : (mandatory) string with the content of the file
2767 permissions: (optional) string with file permissions, typically octal notation '0644'
2768 owner: (optional) file owner, string with the format 'owner:group'
2769 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2770 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2771 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2772 size: (mandatory) string with the size of the disk in GB
2773 vim_id: (optional) should use this existing volume id
2774 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2775 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2776 availability_zone_index is None
2777 #TODO ip, security groups
2780 A tuple with the instance identifier and created_items or raises an exception on error
2781 created_items can be None or a dictionary where this method can include key-values that will be passed to
2782 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2783 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2788 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2796 # list of external networks to be connected to instance, later on used to create floating_ip
2797 external_network
= []
2798 # List of ports with port-security disabled
2799 no_secured_ports
= []
2800 block_device_mapping
= {}
2801 existing_vim_volumes
= []
2802 server_group_id
= None
2803 scheduller_hints
= {}
2806 # Check the Openstack Connection
2807 self
._reload
_connection
()
2809 # Prepare network list
2810 self
._prepare
_network
_for
_vminstance
(
2813 created_items
=created_items
,
2814 net_list_vim
=net_list_vim
,
2815 external_network
=external_network
,
2816 no_secured_ports
=no_secured_ports
,
2820 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2822 # Get availability Zone
2823 self
.vm_av_zone
= self
._get
_vm
_availability
_zone
(
2824 availability_zone_index
, availability_zone_list
2828 self
.storage_availability_zone
2829 if self
.storage_availability_zone
2830 else self
.vm_av_zone
2835 self
._prepare
_disk
_for
_vminstance
(
2837 existing_vim_volumes
=existing_vim_volumes
,
2838 created_items
=created_items
,
2839 storage_av_zone
=storage_av_zone
,
2840 block_device_mapping
=block_device_mapping
,
2841 disk_list
=disk_list
,
2844 if affinity_group_list
:
2845 # Only first id on the list will be used. Openstack restriction
2846 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2847 scheduller_hints
["group"] = server_group_id
2850 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2851 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2852 "block_device_mapping={}, server_group={})".format(
2857 self
.config
.get("security_groups"),
2859 self
.config
.get("keypair"),
2862 block_device_mapping
,
2867 server
= self
.nova
.servers
.create(
2872 security_groups
=self
.config
.get("security_groups"),
2873 # TODO remove security_groups in future versions. Already at neutron port
2874 availability_zone
=self
.vm_av_zone
,
2875 key_name
=self
.config
.get("keypair"),
2877 config_drive
=config_drive
,
2878 block_device_mapping
=block_device_mapping
,
2879 scheduler_hints
=scheduller_hints
,
2882 vm_start_time
= time
.time()
2884 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2886 self
._prepare
_external
_network
_for
_vminstance
(
2887 external_network
=external_network
,
2889 created_items
=created_items
,
2890 vm_start_time
=vm_start_time
,
2893 return server
.id, created_items
2895 except Exception as e
:
2898 server_id
= server
.id
2901 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2905 self
.delete_vminstance(server_id
, created_items
)
2907 except Exception as e2
:
2908 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2910 self
._format
_exception
(e
)
2913 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2914 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2917 created_items (dict): All created items belongs to VM
2920 updated_created_items (dict): Dict which does not include keep flag for volumes.
2924 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2927 def get_vminstance(self
, vm_id
):
2928 """Returns the VM instance information from VIM"""
2929 return self
._find
_nova
_server
(vm_id
)
2931 @catch_any_exception
2932 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2934 Get a console for the virtual machine
2936 vm_id: uuid of the VM
2937 console_type, can be:
2938 "novnc" (by default), "xvpvnc" for VNC types,
2939 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2940 Returns dict with the console parameters:
2941 protocol: ssh, ftp, http, https, ...
2942 server: usually ip address
2943 port: the http, ssh, ... port
2944 suffix: extra text, e.g. the http path and query string
2946 self
.logger
.debug("Getting VM CONSOLE from VIM")
2947 self
._reload
_connection
()
2948 server
= self
.nova
.servers
.find(id=vm_id
)
2950 if console_type
is None or console_type
== "novnc":
2951 console_dict
= server
.get_vnc_console("novnc")
2952 elif console_type
== "xvpvnc":
2953 console_dict
= server
.get_vnc_console(console_type
)
2954 elif console_type
== "rdp-html5":
2955 console_dict
= server
.get_rdp_console(console_type
)
2956 elif console_type
== "spice-html5":
2957 console_dict
= server
.get_spice_console(console_type
)
2959 raise vimconn
.VimConnException(
2960 "console type '{}' not allowed".format(console_type
),
2961 http_code
=vimconn
.HTTP_Bad_Request
,
2964 console_dict1
= console_dict
.get("console")
2967 console_url
= console_dict1
.get("url")
2971 protocol_index
= console_url
.find("//")
2973 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2976 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2981 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2983 -vimconn
.HTTP_Internal_Server_Error
,
2984 "Unexpected response from VIM",
2988 "protocol": console_url
[0:protocol_index
],
2989 "server": console_url
[protocol_index
+ 2 : port_index
],
2990 "port": console_url
[port_index
:suffix_index
],
2991 "suffix": console_url
[suffix_index
+ 1 :],
2996 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2998 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2999 """Neutron delete ports by id.
3001 k_id (str): Port id in the VIM
3004 self
.neutron
.delete_port(k_id
)
3006 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
3007 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
3008 # If there is connection error, raise.
3009 self
._format
_exception
(e
)
3010 except Exception as e
:
3011 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
3013 def delete_shared_volumes(self
, shared_volume_vim_id
: str) -> bool:
3014 """Cinder delete volume by id.
3016 shared_volume_vim_id (str): ID of shared volume in VIM
3020 while elapsed_time
< server_timeout
:
3021 vol_status
= self
.cinder
.volumes
.get(shared_volume_vim_id
).status
3022 if vol_status
== "available":
3023 self
.cinder
.volumes
.delete(shared_volume_vim_id
)
3029 if elapsed_time
>= server_timeout
:
3030 raise vimconn
.VimConnException(
3031 "Timeout waiting for volume "
3032 + shared_volume_vim_id
3033 + " to be available",
3034 http_code
=vimconn
.HTTP_Request_Timeout
,
3037 except Exception as e
:
3039 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3041 self
._format
_exception
(e
)
3043 def _delete_volumes_by_id_wth_cinder(
3044 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
3046 """Cinder delete volume by id.
3048 k (str): Full item name in created_items
3049 k_id (str): ID of floating ip in VIM
3050 volumes_to_hold (list): Volumes not to delete
3051 created_items (dict): All created items belongs to VM
3054 if k_id
in volumes_to_hold
:
3057 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
3061 self
.cinder
.volumes
.delete(k_id
)
3062 created_items
[k
] = None
3064 except (cExceptions
.ConnectionError
, ConnectionError
) as e
:
3066 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3068 self
._format
_exception
(e
)
3069 except Exception as e
:
3071 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3074 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
3075 """Neutron delete floating ip by id.
3077 k (str): Full item name in created_items
3078 k_id (str): ID of floating ip in VIM
3079 created_items (dict): All created items belongs to VM
3082 self
.neutron
.delete_floatingip(k_id
)
3083 created_items
[k
] = None
3085 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
3087 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3089 self
._format
_exception
(e
)
3090 except Exception as e
:
3092 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3096 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
3097 k_item
, _
, k_id
= k
.partition(":")
3100 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
3101 """Delete VM ports attached to the networks before deleting virtual machine.
3103 created_items (dict): All created items belongs to VM
3106 for k
, v
in created_items
.items():
3107 if not v
: # skip already deleted
3111 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3112 if k_item
== "port":
3113 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
3115 except (neExceptions
.ConnectionFailed
, ConnectionError
) as e
:
3117 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3119 self
._format
_exception
(e
)
3120 except Exception as e
:
3122 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3125 def _delete_created_items(
3126 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
3128 """Delete Volumes and floating ip if they exist in created_items."""
3129 for k
, v
in created_items
.items():
3130 if not v
: # skip already deleted
3134 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3135 if k_item
== "volume":
3136 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
3137 k
, k_id
, volumes_to_hold
, created_items
3143 elif k_item
== "floating_ip":
3144 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
3147 cExceptions
.ConnectionError
,
3148 neExceptions
.ConnectionFailed
,
3153 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3154 self
._format
_exception
(e
)
3156 except Exception as e
:
3157 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3162 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
3163 """Remove the volumes which has key flag from created_items
3166 created_items (dict): All created items belongs to VM
3169 created_items (dict): Persistent volumes eliminated created_items
3173 for (key
, value
) in created_items
.items()
3174 if len(key
.split(":")) == 2
3177 @catch_any_exception
3178 def delete_vminstance(
3179 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
3181 """Removes a VM instance from VIM. Returns the old identifier.
3183 vm_id (str): Identifier of VM instance
3184 created_items (dict): All created items belongs to VM
3185 volumes_to_hold (list): Volumes_to_hold
3187 if created_items
is None:
3189 if volumes_to_hold
is None:
3190 volumes_to_hold
= []
3193 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
3197 self
._reload
_connection
()
3199 # Delete VM ports attached to the networks before the virtual machine
3201 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
3204 self
.nova
.servers
.delete(vm_id
)
3206 # Although having detached, volumes should have in active status before deleting.
3207 # We ensure in this loop
3211 while keep_waiting
and elapsed_time
< volume_timeout
:
3212 keep_waiting
= False
3214 # Delete volumes and floating IP.
3215 keep_waiting
= self
._delete
_created
_items
(
3216 created_items
, volumes_to_hold
, keep_waiting
3222 except (nvExceptions
.NotFound
, nvExceptions
.ResourceNotFound
) as e
:
3223 # If VM does not exist, it does not raise
3224 self
.logger
.warning(f
"Error deleting VM: {vm_id} is not found, {str(e)}")
3226 def refresh_vms_status(self
, vm_list
):
3227 """Get the status of the virtual machines and their interfaces/ports
3228 Params: the list of VM identifiers
3229 Returns a dictionary with:
3230 vm_id: #VIM id of this Virtual Machine
3231 status: #Mandatory. Text with one of:
3232 # DELETED (not found at vim)
3233 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3234 # OTHER (Vim reported other status not understood)
3235 # ERROR (VIM indicates an ERROR status)
3236 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3237 # CREATING (on building process), ERROR
3238 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3240 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3241 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3243 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3244 mac_address: #Text format XX:XX:XX:XX:XX:XX
3245 vim_net_id: #network id where this interface is connected
3246 vim_interface_id: #interface/port VIM id
3247 ip_address: #null, or text with IPv4, IPv6 address
3248 compute_node: #identification of compute node where PF,VF interface is allocated
3249 pci: #PCI address of the NIC that hosts the PF,VF
3250 vlan: #physical VLAN used for VF
3254 "refresh_vms status: Getting tenant VM instance information from VIM"
3256 for vm_id
in vm_list
:
3260 vm_vim
= self
.get_vminstance(vm_id
)
3262 if vm_vim
["status"] in vmStatus2manoFormat
:
3263 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3265 vm
["status"] = "OTHER"
3266 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3268 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3269 vm_vim
.pop("user_data", None)
3270 vm
["vim_info"] = self
.serialize(vm_vim
)
3272 vm
["interfaces"] = []
3273 if vm_vim
.get("fault"):
3274 vm
["error_msg"] = str(vm_vim
["fault"])
3278 self
._reload
_connection
()
3279 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3281 for port
in port_dict
["ports"]:
3283 interface
["vim_info"] = self
.serialize(port
)
3284 interface
["mac_address"] = port
.get("mac_address")
3285 interface
["vim_net_id"] = port
["network_id"]
3286 interface
["vim_interface_id"] = port
["id"]
3287 # check if OS-EXT-SRV-ATTR:host is there,
3288 # in case of non-admin credentials, it will be missing
3290 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3291 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3293 interface
["pci"] = None
3295 # check if binding:profile is there,
3296 # in case of non-admin credentials, it will be missing
3297 if port
.get("binding:profile"):
3298 if port
["binding:profile"].get("pci_slot"):
3299 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3301 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3302 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3303 pci
= port
["binding:profile"]["pci_slot"]
3304 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3305 interface
["pci"] = pci
3307 interface
["vlan"] = None
3309 if port
.get("binding:vif_details"):
3310 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3312 # Get vlan from network in case not present in port for those old openstacks and cases where
3313 # it is needed vlan at PT
3314 if not interface
["vlan"]:
3315 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3316 network
= self
.neutron
.show_network(port
["network_id"])
3319 network
["network"].get("provider:network_type")
3322 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3323 interface
["vlan"] = network
["network"].get(
3324 "provider:segmentation_id"
3328 # look for floating ip address
3330 floating_ip_dict
= self
.neutron
.list_floatingips(
3334 if floating_ip_dict
.get("floatingips"):
3336 floating_ip_dict
["floatingips"][0].get(
3337 "floating_ip_address"
3343 for subnet
in port
["fixed_ips"]:
3344 ips
.append(subnet
["ip_address"])
3346 interface
["ip_address"] = ";".join(ips
)
3347 vm
["interfaces"].append(interface
)
3348 except Exception as e
:
3350 "Error getting vm interface information {}: {}".format(
3355 except vimconn
.VimConnNotFoundException
as e
:
3356 self
.logger
.error("Exception getting vm status: %s", str(e
))
3357 vm
["status"] = "DELETED"
3358 vm
["error_msg"] = str(e
)
3359 except vimconn
.VimConnException
as e
:
3360 self
.logger
.error("Exception getting vm status: %s", str(e
))
3361 vm
["status"] = "VIM_ERROR"
3362 vm
["error_msg"] = str(e
)
3368 @catch_any_exception
3369 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3370 """Send and action over a VM instance from VIM
3371 Returns None or the console dict if the action was successfully sent to the VIM
3373 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3374 self
._reload
_connection
()
3375 server
= self
.nova
.servers
.find(id=vm_id
)
3376 if "start" in action_dict
:
3377 if action_dict
["start"] == "rebuild":
3379 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3381 raise nvExceptions
.BadRequest(
3383 message
="Cannot 'REBUILD' vm_state is in ERROR",
3386 if server
.status
== "PAUSED":
3388 elif server
.status
== "SUSPENDED":
3390 elif server
.status
== "SHUTOFF":
3392 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3394 raise nvExceptions
.BadRequest(
3396 message
="Cannot 'START' vm_state is in ERROR",
3400 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3402 raise vimconn
.VimConnException(
3403 "Cannot 'start' instance while it is in active state",
3404 http_code
=vimconn
.HTTP_Bad_Request
,
3406 elif "pause" in action_dict
:
3408 elif "resume" in action_dict
:
3410 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3411 self
.logger
.debug("server status %s", server
.status
)
3412 if server
.status
== "ACTIVE":
3414 vm_state
= self
.__wait
_for
_vm
(vm_id
, "SHUTOFF")
3416 raise nvExceptions
.BadRequest(
3418 message
="Cannot 'STOP' vm_state is in ERROR",
3421 self
.logger
.debug("ERROR: VM is not in Active state")
3422 raise vimconn
.VimConnException(
3423 "VM is not in active state, stop operation is not allowed",
3424 http_code
=vimconn
.HTTP_Bad_Request
,
3426 elif "forceOff" in action_dict
:
3427 server
.stop() # TODO
3428 elif "terminate" in action_dict
:
3430 elif "createImage" in action_dict
:
3431 server
.create_image()
3432 # "path":path_schema,
3433 # "description":description_schema,
3434 # "name":name_schema,
3435 # "metadata":metadata_schema,
3436 # "imageRef": id_schema,
3437 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3438 elif "rebuild" in action_dict
:
3439 server
.rebuild(server
.image
["id"])
3440 elif "reboot" in action_dict
:
3441 server
.reboot() # reboot_type="SOFT"
3442 elif "console" in action_dict
:
3443 console_type
= action_dict
["console"]
3445 if console_type
is None or console_type
== "novnc":
3446 console_dict
= server
.get_vnc_console("novnc")
3447 elif console_type
== "xvpvnc":
3448 console_dict
= server
.get_vnc_console(console_type
)
3449 elif console_type
== "rdp-html5":
3450 console_dict
= server
.get_rdp_console(console_type
)
3451 elif console_type
== "spice-html5":
3452 console_dict
= server
.get_spice_console(console_type
)
3454 raise vimconn
.VimConnException(
3455 "console type '{}' not allowed".format(console_type
),
3456 http_code
=vimconn
.HTTP_Bad_Request
,
3460 console_url
= console_dict
["console"]["url"]
3462 protocol_index
= console_url
.find("//")
3464 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3467 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3472 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3473 raise vimconn
.VimConnException(
3474 "Unexpected response from VIM " + str(console_dict
)
3478 "protocol": console_url
[0:protocol_index
],
3479 "server": console_url
[protocol_index
+ 2 : port_index
],
3480 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3481 "suffix": console_url
[suffix_index
+ 1 :],
3484 return console_dict2
3486 raise vimconn
.VimConnException(
3487 "Unexpected response from VIM " + str(console_dict
)
3492 # ###### VIO Specific Changes #########
3493 def _generate_vlanID(self
):
3495 Method to get unused vlanID
3503 networks
= self
.get_network_list()
3505 for net
in networks
:
3506 if net
.get("provider:segmentation_id"):
3507 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3509 used_vlanIDs
= set(usedVlanIDs
)
3511 # find unused VLAN ID
3512 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3514 start_vlanid
, end_vlanid
= map(
3515 int, vlanID_range
.replace(" ", "").split("-")
3518 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3519 if vlanID
not in used_vlanIDs
:
3521 except Exception as exp
:
3522 raise vimconn
.VimConnException(
3523 "Exception {} occurred while generating VLAN ID.".format(exp
)
3526 raise vimconn
.VimConnConflictException(
3527 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3528 self
.config
.get("dataplane_net_vlan_range")
3532 def _generate_multisegment_vlanID(self
):
3534 Method to get unused vlanID
3542 networks
= self
.get_network_list()
3543 for net
in networks
:
3544 if net
.get("provider:network_type") == "vlan" and net
.get(
3545 "provider:segmentation_id"
3547 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3548 elif net
.get("segments"):
3549 for segment
in net
.get("segments"):
3550 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3551 "provider:segmentation_id"
3553 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3555 used_vlanIDs
= set(usedVlanIDs
)
3557 # find unused VLAN ID
3558 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3560 start_vlanid
, end_vlanid
= map(
3561 int, vlanID_range
.replace(" ", "").split("-")
3564 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3565 if vlanID
not in used_vlanIDs
:
3567 except Exception as exp
:
3568 raise vimconn
.VimConnException(
3569 "Exception {} occurred while generating VLAN ID.".format(exp
)
3572 raise vimconn
.VimConnConflictException(
3573 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3574 self
.config
.get("multisegment_vlan_range")
3578 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3580 Method to validate user given vlanID ranges
3584 for vlanID_range
in input_vlan_range
:
3585 vlan_range
= vlanID_range
.replace(" ", "")
3587 vlanID_pattern
= r
"(\d)*-(\d)*$"
3588 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3590 raise vimconn
.VimConnConflictException(
3591 "Invalid VLAN range for {}: {}.You must provide "
3592 "'{}' in format [start_ID - end_ID].".format(
3593 text_vlan_range
, vlanID_range
, text_vlan_range
3597 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3598 if start_vlanid
<= 0:
3599 raise vimconn
.VimConnConflictException(
3600 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3601 "networks valid IDs are 1 to 4094 ".format(
3602 text_vlan_range
, vlanID_range
3606 if end_vlanid
> 4094:
3607 raise vimconn
.VimConnConflictException(
3608 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3609 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3610 text_vlan_range
, vlanID_range
3614 if start_vlanid
> end_vlanid
:
3615 raise vimconn
.VimConnConflictException(
3616 "Invalid VLAN range for {}: {}. You must provide '{}'"
3617 " in format start_ID - end_ID and start_ID < end_ID ".format(
3618 text_vlan_range
, vlanID_range
, text_vlan_range
3622 def get_hosts_info(self
):
3623 """Get the information of deployed hosts
3624 Returns the hosts content"""
3626 print("osconnector: Getting Host info from VIM")
3630 self
._reload
_connection
()
3631 hypervisors
= self
.nova
.hypervisors
.list()
3633 for hype
in hypervisors
:
3634 h_list
.append(hype
.to_dict())
3636 return 1, {"hosts": h_list
}
3637 except nvExceptions
.NotFound
as e
:
3638 error_value
= -vimconn
.HTTP_Not_Found
3639 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3640 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3641 error_value
= -vimconn
.HTTP_Bad_Request
3645 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3648 # TODO insert exception vimconn.HTTP_Unauthorized
3649 # if reaching here is because an exception
3650 self
.logger
.debug("get_hosts_info " + error_text
)
3652 return error_value
, error_text
3654 def get_hosts(self
, vim_tenant
):
3655 """Get the hosts and deployed instances
3656 Returns the hosts content"""
3657 r
, hype_dict
= self
.get_hosts_info()
3662 hypervisors
= hype_dict
["hosts"]
3665 servers
= self
.nova
.servers
.list()
3666 for hype
in hypervisors
:
3667 for server
in servers
:
3669 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3670 == hype
["hypervisor_hostname"]
3673 hype
["vm"].append(server
.id)
3675 hype
["vm"] = [server
.id]
3678 except nvExceptions
.NotFound
as e
:
3679 error_value
= -vimconn
.HTTP_Not_Found
3680 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3681 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3682 error_value
= -vimconn
.HTTP_Bad_Request
3686 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3689 # TODO insert exception vimconn.HTTP_Unauthorized
3690 # if reaching here is because an exception
3691 self
.logger
.debug("get_hosts " + error_text
)
3693 return error_value
, error_text
3695 def new_classification(self
, name
, ctype
, definition
):
3697 "Adding a new (Traffic) Classification to VIM, named %s", name
3702 self
._reload
_connection
()
3704 if ctype
not in supportedClassificationTypes
:
3705 raise vimconn
.VimConnNotSupportedException(
3706 "OpenStack VIM connector does not support provided "
3707 "Classification Type {}, supported ones are: {}".format(
3708 ctype
, supportedClassificationTypes
3712 if not self
._validate
_classification
(ctype
, definition
):
3713 raise vimconn
.VimConnException(
3714 "Incorrect Classification definition for the type specified."
3717 classification_dict
= definition
3718 classification_dict
["name"] = name
3721 "Adding a new (Traffic) Classification to VIM, named {} and {}.".format(
3722 name
, classification_dict
3725 new_class
= self
.neutron
.create_sfc_flow_classifier(
3726 {"flow_classifier": classification_dict
}
3729 return new_class
["flow_classifier"]["id"]
3731 neExceptions
.ConnectionFailed
,
3732 ksExceptions
.ClientException
,
3733 neExceptions
.NeutronException
,
3736 self
.logger
.error("Creation of Classification failed.")
3737 self
._format
_exception
(e
)
3739 def get_classification(self
, class_id
):
3740 self
.logger
.debug(" Getting Classification %s from VIM", class_id
)
3741 filter_dict
= {"id": class_id
}
3742 class_list
= self
.get_classification_list(filter_dict
)
3744 if len(class_list
) == 0:
3745 raise vimconn
.VimConnNotFoundException(
3746 "Classification '{}' not found".format(class_id
)
3748 elif len(class_list
) > 1:
3749 raise vimconn
.VimConnConflictException(
3750 "Found more than one Classification with this criteria"
3753 classification
= class_list
[0]
3755 return classification
3757 def get_classification_list(self
, filter_dict
={}):
3759 "Getting Classifications from VIM filter: '%s'", str(filter_dict
)
3763 filter_dict_os
= filter_dict
.copy()
3764 self
._reload
_connection
()
3766 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3767 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3769 classification_dict
= self
.neutron
.list_sfc_flow_classifiers(
3772 classification_list
= classification_dict
["flow_classifiers"]
3773 self
.__classification
_os
2mano
(classification_list
)
3775 return classification_list
3777 neExceptions
.ConnectionFailed
,
3778 ksExceptions
.ClientException
,
3779 neExceptions
.NeutronException
,
3782 self
._format
_exception
(e
)
3784 def delete_classification(self
, class_id
):
3785 self
.logger
.debug("Deleting Classification '%s' from VIM", class_id
)
3788 self
._reload
_connection
()
3789 self
.neutron
.delete_sfc_flow_classifier(class_id
)
3793 neExceptions
.ConnectionFailed
,
3794 neExceptions
.NeutronException
,
3795 ksExceptions
.ClientException
,
3796 neExceptions
.NeutronException
,
3799 self
._format
_exception
(e
)
3801 def new_sfi(self
, name
, ingress_ports
, egress_ports
, sfc_encap
=True):
3803 "Adding a new Service Function Instance to VIM, named '%s'", name
3808 self
._reload
_connection
()
3814 if len(ingress_ports
) != 1:
3815 raise vimconn
.VimConnNotSupportedException(
3816 "OpenStack VIM connector can only have 1 ingress port per SFI"
3819 if len(egress_ports
) != 1:
3820 raise vimconn
.VimConnNotSupportedException(
3821 "OpenStack VIM connector can only have 1 egress port per SFI"
3826 "ingress": ingress_ports
[0],
3827 "egress": egress_ports
[0],
3828 "service_function_parameters": {"correlation": correlation
},
3830 self
.logger
.info("Adding a new SFI to VIM, {}.".format(sfi_dict
))
3831 new_sfi
= self
.neutron
.create_sfc_port_pair({"port_pair": sfi_dict
})
3833 return new_sfi
["port_pair"]["id"]
3835 neExceptions
.ConnectionFailed
,
3836 ksExceptions
.ClientException
,
3837 neExceptions
.NeutronException
,
3842 self
.neutron
.delete_sfc_port_pair(new_sfi
["port_pair"]["id"])
3845 "Creation of Service Function Instance failed, with "
3846 "subsequent deletion failure as well."
3849 self
._format
_exception
(e
)
3851 def get_sfi(self
, sfi_id
):
3852 self
.logger
.debug("Getting Service Function Instance %s from VIM", sfi_id
)
3853 filter_dict
= {"id": sfi_id
}
3854 sfi_list
= self
.get_sfi_list(filter_dict
)
3856 if len(sfi_list
) == 0:
3857 raise vimconn
.VimConnNotFoundException(
3858 "Service Function Instance '{}' not found".format(sfi_id
)
3860 elif len(sfi_list
) > 1:
3861 raise vimconn
.VimConnConflictException(
3862 "Found more than one Service Function Instance with this criteria"
3869 def get_sfi_list(self
, filter_dict
={}):
3871 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict
)
3875 self
._reload
_connection
()
3876 filter_dict_os
= filter_dict
.copy()
3878 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3879 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3881 sfi_dict
= self
.neutron
.list_sfc_port_pairs(**filter_dict_os
)
3882 sfi_list
= sfi_dict
["port_pairs"]
3883 self
.__sfi
_os
2mano
(sfi_list
)
3887 neExceptions
.ConnectionFailed
,
3888 ksExceptions
.ClientException
,
3889 neExceptions
.NeutronException
,
3892 self
._format
_exception
(e
)
3894 def delete_sfi(self
, sfi_id
):
3895 self
.logger
.debug("Deleting Service Function Instance '%s' from VIM", sfi_id
)
3898 self
._reload
_connection
()
3899 self
.neutron
.delete_sfc_port_pair(sfi_id
)
3903 neExceptions
.ConnectionFailed
,
3904 neExceptions
.NeutronException
,
3905 ksExceptions
.ClientException
,
3906 neExceptions
.NeutronException
,
3909 self
._format
_exception
(e
)
3911 def new_sf(self
, name
, sfis
, sfc_encap
=True):
3912 self
.logger
.debug("Adding a new Service Function to VIM, named '%s'", name
)
3917 self
._reload
_connection
()
3919 for instance
in sfis
:
3920 sfi
= self
.get_sfi(instance
)
3922 if sfi
.get("sfc_encap") != sfc_encap
:
3923 raise vimconn
.VimConnNotSupportedException(
3924 "OpenStack VIM connector requires all SFIs of the "
3925 "same SF to share the same SFC Encapsulation"
3928 sf_dict
= {"name": name
, "port_pairs": sfis
}
3930 self
.logger
.info("Adding a new SF to VIM, {}.".format(sf_dict
))
3931 new_sf
= self
.neutron
.create_sfc_port_pair_group(
3932 {"port_pair_group": sf_dict
}
3935 return new_sf
["port_pair_group"]["id"]
3937 neExceptions
.ConnectionFailed
,
3938 ksExceptions
.ClientException
,
3939 neExceptions
.NeutronException
,
3944 new_sf_id
= new_sf
.get("port_pair_group").get("id")
3945 self
.neutron
.delete_sfc_port_pair_group(new_sf_id
)
3948 "Creation of Service Function failed, with "
3949 "subsequent deletion failure as well."
3952 self
._format
_exception
(e
)
3954 def get_sf(self
, sf_id
):
3955 self
.logger
.debug("Getting Service Function %s from VIM", sf_id
)
3956 filter_dict
= {"id": sf_id
}
3957 sf_list
= self
.get_sf_list(filter_dict
)
3959 if len(sf_list
) == 0:
3960 raise vimconn
.VimConnNotFoundException(
3961 "Service Function '{}' not found".format(sf_id
)
3963 elif len(sf_list
) > 1:
3964 raise vimconn
.VimConnConflictException(
3965 "Found more than one Service Function with this criteria"
3972 def get_sf_list(self
, filter_dict
={}):
3974 "Getting Service Function from VIM filter: '%s'", str(filter_dict
)
3978 self
._reload
_connection
()
3979 filter_dict_os
= filter_dict
.copy()
3981 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3982 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3984 sf_dict
= self
.neutron
.list_sfc_port_pair_groups(**filter_dict_os
)
3985 sf_list
= sf_dict
["port_pair_groups"]
3986 self
.__sf
_os
2mano
(sf_list
)
3990 neExceptions
.ConnectionFailed
,
3991 ksExceptions
.ClientException
,
3992 neExceptions
.NeutronException
,
3995 self
._format
_exception
(e
)
3997 def delete_sf(self
, sf_id
):
3998 self
.logger
.debug("Deleting Service Function '%s' from VIM", sf_id
)
4001 self
._reload
_connection
()
4002 self
.neutron
.delete_sfc_port_pair_group(sf_id
)
4006 neExceptions
.ConnectionFailed
,
4007 neExceptions
.NeutronException
,
4008 ksExceptions
.ClientException
,
4009 neExceptions
.NeutronException
,
4012 self
._format
_exception
(e
)
4014 def new_sfp(self
, name
, classifications
, sfs
, sfc_encap
=True, spi
=None):
4015 self
.logger
.debug("Adding a new Service Function Path to VIM, named '%s'", name
)
4020 self
._reload
_connection
()
4021 # In networking-sfc the MPLS encapsulation is legacy
4022 # should be used when no full SFC Encapsulation is intended
4023 correlation
= "mpls"
4030 "flow_classifiers": classifications
,
4031 "port_pair_groups": sfs
,
4032 "chain_parameters": {"correlation": correlation
},
4036 sfp_dict
["chain_id"] = spi
4038 self
.logger
.info("Adding a new SFP to VIM, {}.".format(sfp_dict
))
4039 new_sfp
= self
.neutron
.create_sfc_port_chain({"port_chain": sfp_dict
})
4041 return new_sfp
["port_chain"]["id"]
4043 neExceptions
.ConnectionFailed
,
4044 ksExceptions
.ClientException
,
4045 neExceptions
.NeutronException
,
4050 new_sfp_id
= new_sfp
.get("port_chain").get("id")
4051 self
.neutron
.delete_sfc_port_chain(new_sfp_id
)
4054 "Creation of Service Function Path failed, with "
4055 "subsequent deletion failure as well."
4058 self
._format
_exception
(e
)
4060 def get_sfp(self
, sfp_id
):
4061 self
.logger
.debug(" Getting Service Function Path %s from VIM", sfp_id
)
4063 filter_dict
= {"id": sfp_id
}
4064 sfp_list
= self
.get_sfp_list(filter_dict
)
4066 if len(sfp_list
) == 0:
4067 raise vimconn
.VimConnNotFoundException(
4068 "Service Function Path '{}' not found".format(sfp_id
)
4070 elif len(sfp_list
) > 1:
4071 raise vimconn
.VimConnConflictException(
4072 "Found more than one Service Function Path with this criteria"
4079 def get_sfp_list(self
, filter_dict
={}):
4081 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict
)
4085 self
._reload
_connection
()
4086 filter_dict_os
= filter_dict
.copy()
4088 if self
.api_version3
and "tenant_id" in filter_dict_os
:
4089 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
4091 sfp_dict
= self
.neutron
.list_sfc_port_chains(**filter_dict_os
)
4092 sfp_list
= sfp_dict
["port_chains"]
4093 self
.__sfp
_os
2mano
(sfp_list
)
4097 neExceptions
.ConnectionFailed
,
4098 ksExceptions
.ClientException
,
4099 neExceptions
.NeutronException
,
4102 self
._format
_exception
(e
)
4104 def delete_sfp(self
, sfp_id
):
4105 self
.logger
.debug("Deleting Service Function Path '%s' from VIM", sfp_id
)
4108 self
._reload
_connection
()
4109 self
.neutron
.delete_sfc_port_chain(sfp_id
)
4113 neExceptions
.ConnectionFailed
,
4114 neExceptions
.NeutronException
,
4115 ksExceptions
.ClientException
,
4116 neExceptions
.NeutronException
,
4119 self
._format
_exception
(e
)
4121 def refresh_sfps_status(self
, sfp_list
):
4122 """Get the status of the service function path
4123 Params: the list of sfp identifiers
4124 Returns a dictionary with:
4125 vm_id: #VIM id of this service function path
4126 status: #Mandatory. Text with one of:
4127 # DELETED (not found at vim)
4128 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4129 # OTHER (Vim reported other status not understood)
4130 # ERROR (VIM indicates an ERROR status)
4132 # CREATING (on building process)
4133 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4134 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
4138 "refresh_sfps status: Getting tenant SFP information from VIM"
4141 for sfp_id
in sfp_list
:
4145 sfp_vim
= self
.get_sfp(sfp_id
)
4148 sfp
["status"] = vmStatus2manoFormat
["ACTIVE"]
4150 sfp
["status"] = "OTHER"
4151 sfp
["error_msg"] = "VIM status reported " + sfp
["status"]
4153 sfp
["vim_info"] = self
.serialize(sfp_vim
)
4155 if sfp_vim
.get("fault"):
4156 sfp
["error_msg"] = str(sfp_vim
["fault"])
4157 except vimconn
.VimConnNotFoundException
as e
:
4158 self
.logger
.error("Exception getting sfp status: %s", str(e
))
4159 sfp
["status"] = "DELETED"
4160 sfp
["error_msg"] = str(e
)
4161 except vimconn
.VimConnException
as e
:
4162 self
.logger
.error("Exception getting sfp status: %s", str(e
))
4163 sfp
["status"] = "VIM_ERROR"
4164 sfp
["error_msg"] = str(e
)
4166 sfp_dict
[sfp_id
] = sfp
4170 def refresh_sfis_status(self
, sfi_list
):
4171 """Get the status of the service function instances
4172 Params: the list of sfi identifiers
4173 Returns a dictionary with:
4174 vm_id: #VIM id of this service function instance
4175 status: #Mandatory. Text with one of:
4176 # DELETED (not found at vim)
4177 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4178 # OTHER (Vim reported other status not understood)
4179 # ERROR (VIM indicates an ERROR status)
4181 # CREATING (on building process)
4182 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4183 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4187 "refresh_sfis status: Getting tenant sfi information from VIM"
4190 for sfi_id
in sfi_list
:
4194 sfi_vim
= self
.get_sfi(sfi_id
)
4197 sfi
["status"] = vmStatus2manoFormat
["ACTIVE"]
4199 sfi
["status"] = "OTHER"
4200 sfi
["error_msg"] = "VIM status reported " + sfi
["status"]
4202 sfi
["vim_info"] = self
.serialize(sfi_vim
)
4204 if sfi_vim
.get("fault"):
4205 sfi
["error_msg"] = str(sfi_vim
["fault"])
4206 except vimconn
.VimConnNotFoundException
as e
:
4207 self
.logger
.error("Exception getting sfi status: %s", str(e
))
4208 sfi
["status"] = "DELETED"
4209 sfi
["error_msg"] = str(e
)
4210 except vimconn
.VimConnException
as e
:
4211 self
.logger
.error("Exception getting sfi status: %s", str(e
))
4212 sfi
["status"] = "VIM_ERROR"
4213 sfi
["error_msg"] = str(e
)
4215 sfi_dict
[sfi_id
] = sfi
4219 def refresh_sfs_status(self
, sf_list
):
4220 """Get the status of the service functions
4221 Params: the list of sf identifiers
4222 Returns a dictionary with:
4223 vm_id: #VIM id of this service function
4224 status: #Mandatory. Text with one of:
4225 # DELETED (not found at vim)
4226 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4227 # OTHER (Vim reported other status not understood)
4228 # ERROR (VIM indicates an ERROR status)
4230 # CREATING (on building process)
4231 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4232 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4235 self
.logger
.debug("refresh_sfs status: Getting tenant sf information from VIM")
4237 for sf_id
in sf_list
:
4241 sf_vim
= self
.get_sf(sf_id
)
4244 sf
["status"] = vmStatus2manoFormat
["ACTIVE"]
4246 sf
["status"] = "OTHER"
4247 sf
["error_msg"] = "VIM status reported " + sf_vim
["status"]
4249 sf
["vim_info"] = self
.serialize(sf_vim
)
4251 if sf_vim
.get("fault"):
4252 sf
["error_msg"] = str(sf_vim
["fault"])
4253 except vimconn
.VimConnNotFoundException
as e
:
4254 self
.logger
.error("Exception getting sf status: %s", str(e
))
4255 sf
["status"] = "DELETED"
4256 sf
["error_msg"] = str(e
)
4257 except vimconn
.VimConnException
as e
:
4258 self
.logger
.error("Exception getting sf status: %s", str(e
))
4259 sf
["status"] = "VIM_ERROR"
4260 sf
["error_msg"] = str(e
)
4266 def refresh_classifications_status(self
, classification_list
):
4267 """Get the status of the classifications
4268 Params: the list of classification identifiers
4269 Returns a dictionary with:
4270 vm_id: #VIM id of this classifier
4271 status: #Mandatory. Text with one of:
4272 # DELETED (not found at vim)
4273 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4274 # OTHER (Vim reported other status not understood)
4275 # ERROR (VIM indicates an ERROR status)
4277 # CREATING (on building process)
4278 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4279 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4281 classification_dict
= {}
4283 "refresh_classifications status: Getting tenant classification information from VIM"
4286 for classification_id
in classification_list
:
4290 classification_vim
= self
.get_classification(classification_id
)
4292 if classification_vim
:
4293 classification
["status"] = vmStatus2manoFormat
["ACTIVE"]
4295 classification
["status"] = "OTHER"
4296 classification
["error_msg"] = (
4297 "VIM status reported " + classification
["status"]
4300 classification
["vim_info"] = self
.serialize(classification_vim
)
4302 if classification_vim
.get("fault"):
4303 classification
["error_msg"] = str(classification_vim
["fault"])
4304 except vimconn
.VimConnNotFoundException
as e
:
4305 self
.logger
.error("Exception getting classification status: %s", str(e
))
4306 classification
["status"] = "DELETED"
4307 classification
["error_msg"] = str(e
)
4308 except vimconn
.VimConnException
as e
:
4309 self
.logger
.error("Exception getting classification status: %s", str(e
))
4310 classification
["status"] = "VIM_ERROR"
4311 classification
["error_msg"] = str(e
)
4313 classification_dict
[classification_id
] = classification
4315 return classification_dict
4317 @catch_any_exception
4318 def new_affinity_group(self
, affinity_group_data
):
4319 """Adds a server group to VIM
4320 affinity_group_data contains a dictionary with information, keys:
4321 name: name in VIM for the server group
4322 type: affinity or anti-affinity
4323 scope: Only nfvi-node allowed
4324 Returns the server group identifier"""
4325 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
4326 name
= affinity_group_data
["name"]
4327 policy
= affinity_group_data
["type"]
4328 self
._reload
_connection
()
4329 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
4330 return new_server_group
.id
4332 @catch_any_exception
4333 def get_affinity_group(self
, affinity_group_id
):
4334 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4335 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
4336 self
._reload
_connection
()
4337 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
4338 return server_group
.to_dict()
4340 @catch_any_exception
4341 def delete_affinity_group(self
, affinity_group_id
):
4342 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4343 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
4344 self
._reload
_connection
()
4345 self
.nova
.server_groups
.delete(affinity_group_id
)
4346 return affinity_group_id
4348 @catch_any_exception
4349 def get_vdu_state(self
, vm_id
, host_is_required
=False) -> list:
4350 """Getting the state of a VDU.
4352 vm_id (str): ID of an instance
4353 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
4354 and if this is set to True, it raises KeyError.
4356 vdu_data (list): VDU details including state, flavor, host_info, AZ
4358 self
.logger
.debug("Getting the status of VM")
4359 self
.logger
.debug("VIM VM ID %s", vm_id
)
4360 self
._reload
_connection
()
4361 server_dict
= self
._find
_nova
_server
(vm_id
)
4362 srv_attr
= "OS-EXT-SRV-ATTR:host"
4364 server_dict
[srv_attr
] if host_is_required
else server_dict
.get(srv_attr
)
4367 server_dict
["status"],
4368 server_dict
["flavor"]["id"],
4370 server_dict
["OS-EXT-AZ:availability_zone"],
4372 self
.logger
.debug("vdu_data %s", vdu_data
)
4375 def check_compute_availability(self
, host
, server_flavor_details
):
4376 self
._reload
_connection
()
4377 hypervisor_search
= self
.nova
.hypervisors
.search(
4378 hypervisor_match
=host
, servers
=True
4380 for hypervisor
in hypervisor_search
:
4381 hypervisor_id
= hypervisor
.to_dict()["id"]
4382 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
4383 hypervisor_dict
= hypervisor_details
.to_dict()
4384 hypervisor_temp
= json
.dumps(hypervisor_dict
)
4385 hypervisor_json
= json
.loads(hypervisor_temp
)
4386 resources_available
= [
4387 hypervisor_json
["free_ram_mb"],
4388 hypervisor_json
["disk_available_least"],
4389 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
4391 compute_available
= all(
4392 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
4394 if compute_available
:
4397 def check_availability_zone(
4398 self
, old_az
, server_flavor_details
, old_host
, host
=None
4400 self
._reload
_connection
()
4401 az_check
= {"zone_check": False, "compute_availability": None}
4402 aggregates_list
= self
.nova
.aggregates
.list()
4403 for aggregate
in aggregates_list
:
4404 aggregate_details
= aggregate
.to_dict()
4405 aggregate_temp
= json
.dumps(aggregate_details
)
4406 aggregate_json
= json
.loads(aggregate_temp
)
4407 if aggregate_json
["availability_zone"] == old_az
:
4408 hosts_list
= aggregate_json
["hosts"]
4409 if host
is not None:
4410 if host
in hosts_list
:
4411 az_check
["zone_check"] = True
4412 available_compute_id
= self
.check_compute_availability(
4413 host
, server_flavor_details
4415 if available_compute_id
is not None:
4416 az_check
["compute_availability"] = available_compute_id
4418 for check_host
in hosts_list
:
4419 if check_host
!= old_host
:
4420 available_compute_id
= self
.check_compute_availability(
4421 check_host
, server_flavor_details
4423 if available_compute_id
is not None:
4424 az_check
["zone_check"] = True
4425 az_check
["compute_availability"] = available_compute_id
4428 az_check
["zone_check"] = True
4431 @catch_any_exception
4432 def migrate_instance(self
, vm_id
, compute_host
=None):
4436 vm_id: ID of an instance
4437 compute_host: Host to migrate the vdu to
4439 self
._reload
_connection
()
4441 instance_state
= self
.get_vdu_state(vm_id
, host_is_required
=True)
4442 server_flavor_id
= instance_state
[1]
4443 server_hypervisor_name
= instance_state
[2]
4444 server_availability_zone
= instance_state
[3]
4445 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
4446 server_flavor_details
= [
4447 server_flavor
["ram"],
4448 server_flavor
["disk"],
4449 server_flavor
["vcpus"],
4451 if compute_host
== server_hypervisor_name
:
4452 raise vimconn
.VimConnException(
4453 "Unable to migrate instance '{}' to the same host '{}'".format(
4456 http_code
=vimconn
.HTTP_Bad_Request
,
4458 az_status
= self
.check_availability_zone(
4459 server_availability_zone
,
4460 server_flavor_details
,
4461 server_hypervisor_name
,
4464 availability_zone_check
= az_status
["zone_check"]
4465 available_compute_id
= az_status
.get("compute_availability")
4467 if availability_zone_check
is False:
4468 raise vimconn
.VimConnException(
4469 "Unable to migrate instance '{}' to a different availability zone".format(
4472 http_code
=vimconn
.HTTP_Bad_Request
,
4474 if available_compute_id
is not None:
4475 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
4476 self
.nova
.servers
.live_migrate(
4478 host
=available_compute_id
,
4479 block_migration
=True,
4482 changed_compute_host
= ""
4483 if state
== "MIGRATING":
4484 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4485 changed_compute_host
= self
.get_vdu_state(vm_id
, host_is_required
=True)[
4488 if vm_state
and changed_compute_host
== available_compute_id
:
4490 "Instance '{}' migrated to the new compute host '{}'".format(
4491 vm_id
, changed_compute_host
4494 return state
, available_compute_id
4496 raise vimconn
.VimConnException(
4497 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4498 vm_id
, available_compute_id
4500 http_code
=vimconn
.HTTP_Bad_Request
,
4503 raise vimconn
.VimConnException(
4504 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4505 available_compute_id
4507 http_code
=vimconn
.HTTP_Bad_Request
,
4510 @catch_any_exception
4511 def resize_instance(self
, vm_id
, new_flavor_id
):
4513 For resizing the vm based on the given
4516 vm_id : ID of an instance
4517 new_flavor_id : Flavor id to be resized
4518 Return the status of a resized instance
4520 self
._reload
_connection
()
4521 self
.logger
.debug("resize the flavor of an instance")
4522 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
4523 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
4524 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
4525 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
4526 if old_flavor_disk
> new_flavor_disk
:
4527 raise nvExceptions
.BadRequest(
4529 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
4532 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
4533 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
4535 instance_resized_status
= self
.confirm_resize(
4536 vm_id
, instance_status
4538 return instance_resized_status
4540 raise nvExceptions
.BadRequest(
4542 message
="Cannot 'resize' vm_state is in ERROR",
4546 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4547 raise nvExceptions
.BadRequest(
4549 message
="Cannot 'resize' instance while it is in vm_state resized",
4552 def confirm_resize(self
, vm_id
, instance_state
):
4554 Confirm the resize of an instance
4556 vm_id: ID of an instance
4558 self
._reload
_connection
()
4559 self
.nova
.servers
.confirm_resize(server
=vm_id
)
4560 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
4561 self
.__wait
_for
_vm
(vm_id
, instance_state
)
4562 instance_status
= self
.get_vdu_state(vm_id
)[0]
4563 return instance_status
4565 def get_monitoring_data(self
):
4567 self
.logger
.debug("Getting servers and ports data from Openstack VIMs.")
4568 self
._reload
_connection
()
4569 all_servers
= self
.nova
.servers
.list(detailed
=True)
4571 for server
in all_servers
:
4572 if server
.flavor
.get("original_name"):
4573 server
.flavor
["id"] = self
.nova
.flavors
.find(
4574 name
=server
.flavor
["original_name"]
4576 except nClient
.exceptions
.NotFound
as e
:
4577 self
.logger
.warning(str(e
.message
))
4578 all_ports
= self
.neutron
.list_ports()
4579 return all_servers
, all_ports
4580 except Exception as e
:
4581 raise vimconn
.VimConnException(
4582 f
"Exception in monitoring while getting VMs and ports status: {str(e)}"