1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 from glanceclient
import client
as glClient
45 import glanceclient
.exc
as gl1Exceptions
46 from keystoneauth1
import session
47 from keystoneauth1
.identity
import v2
, v3
48 import keystoneclient
.exceptions
as ksExceptions
49 import keystoneclient
.v2_0
.client
as ksClient_v2
50 import keystoneclient
.v3
.client
as ksClient_v3
52 from neutronclient
.common
import exceptions
as neExceptions
53 from neutronclient
.neutron
import client
as neClient
54 from novaclient
import client
as nClient
, exceptions
as nvExceptions
55 from osm_ro_plugin
import vimconn
56 from requests
.exceptions
import ConnectionError
59 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__
= "$22-sep-2017 23:59:59$"
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat
= {
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
72 netStatus2manoFormat
= {
75 "INACTIVE": "INACTIVE",
81 supportedClassificationTypes
= ["legacy_flow_classifier"]
83 # global var to have a timeout creating and deleting volumes
88 class SafeDumper(yaml
.SafeDumper
):
89 def represent_data(self
, data
):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data
, dict) and data
.__class
__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data
= dict(data
.items())
96 return super(SafeDumper
, self
).represent_data(data
)
99 class vimconnector(vimconn
.VimConnector
):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
118 api_version
= config
.get("APIversion")
120 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn
.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
126 vim_type
= config
.get("vim_type")
128 if vim_type
and vim_type
not in ("vio", "VIO"):
129 raise vimconn
.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
134 if config
.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self
._validate
_vlan
_ranges
(
137 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
140 if config
.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self
._validate
_vlan
_ranges
(
143 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
146 vimconn
.VimConnector
.__init
__(
160 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
161 raise vimconn
.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
167 if self
.config
.get("insecure"):
170 if self
.config
.get("ca_cert"):
171 self
.verify
= self
.config
.get("ca_cert")
174 raise TypeError("url param can not be NoneType")
176 self
.persistent_info
= persistent_info
177 self
.availability_zone
= persistent_info
.get("availability_zone", None)
178 self
.session
= persistent_info
.get("session", {"reload_client": True})
179 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
180 self
.nova
= self
.session
.get("nova")
181 self
.neutron
= self
.session
.get("neutron")
182 self
.cinder
= self
.session
.get("cinder")
183 self
.glance
= self
.session
.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self
.keystone
= self
.session
.get("keystone")
186 self
.api_version3
= self
.session
.get("api_version3")
187 self
.vim_type
= self
.config
.get("vim_type")
190 self
.vim_type
= self
.vim_type
.upper()
192 if self
.config
.get("use_internal_endpoint"):
193 self
.endpoint_type
= "internalURL"
195 self
.endpoint_type
= None
197 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
198 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
199 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
200 self
.logger
= logging
.getLogger("ro.vim.openstack")
202 # allow security_groups to be a list or a single string
203 if isinstance(self
.config
.get("security_groups"), str):
204 self
.config
["security_groups"] = [self
.config
["security_groups"]]
206 self
.security_groups_id
= None
208 # ###### VIO Specific Changes #########
209 if self
.vim_type
== "VIO":
210 self
.logger
= logging
.getLogger("ro.vim.vio")
213 self
.logger
.setLevel(getattr(logging
, log_level
))
215 def __getitem__(self
, index
):
216 """Get individuals parameters.
218 if index
== "project_domain_id":
219 return self
.config
.get("project_domain_id")
220 elif index
== "user_domain_id":
221 return self
.config
.get("user_domain_id")
223 return vimconn
.VimConnector
.__getitem
__(self
, index
)
225 def __setitem__(self
, index
, value
):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
228 if index
== "project_domain_id":
229 self
.config
["project_domain_id"] = value
230 elif index
== "user_domain_id":
231 self
.config
["user_domain_id"] = value
233 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
235 self
.session
["reload_client"] = True
237 def serialize(self
, value
):
238 """Serialization of python basic types.
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
244 if isinstance(value
, str):
249 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
251 except yaml
.representer
.RepresenterError
:
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
260 def _reload_connection(self
):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self
.session
["reload_client"]:
266 if self
.config
.get("APIversion"):
267 self
.api_version3
= (
268 self
.config
["APIversion"] == "v3.3"
269 or self
.config
["APIversion"] == "3"
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
276 self
.session
["api_version3"] = self
.api_version3
278 if self
.api_version3
:
279 if self
.config
.get("project_domain_id") or self
.config
.get(
280 "project_domain_name"
282 project_domain_id_default
= None
284 project_domain_id_default
= "default"
286 if self
.config
.get("user_domain_id") or self
.config
.get(
289 user_domain_id_default
= None
291 user_domain_id_default
= "default"
295 password
=self
.passwd
,
296 project_name
=self
.tenant_name
,
297 project_id
=self
.tenant_id
,
298 project_domain_id
=self
.config
.get(
299 "project_domain_id", project_domain_id_default
301 user_domain_id
=self
.config
.get(
302 "user_domain_id", user_domain_id_default
304 project_domain_name
=self
.config
.get("project_domain_name"),
305 user_domain_name
=self
.config
.get("user_domain_name"),
311 password
=self
.passwd
,
312 tenant_name
=self
.tenant_name
,
313 tenant_id
=self
.tenant_id
,
316 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name
= self
.config
.get("region_name")
321 if self
.api_version3
:
322 self
.keystone
= ksClient_v3
.Client(
324 endpoint_type
=self
.endpoint_type
,
325 region_name
=region_name
,
328 self
.keystone
= ksClient_v2
.Client(
329 session
=sess
, endpoint_type
=self
.endpoint_type
332 self
.session
["keystone"] = self
.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version
= self
.config
.get("microversion")
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self
.nova
= self
.session
["nova"] = nClient
.Client(
349 endpoint_type
=self
.endpoint_type
,
350 region_name
=region_name
,
352 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
355 endpoint_type
=self
.endpoint_type
,
356 region_name
=region_name
,
359 if sess
.get_all_version_data(service_type
="volumev2"):
360 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
363 endpoint_type
=self
.endpoint_type
,
364 region_name
=region_name
,
367 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
370 endpoint_type
=self
.endpoint_type
,
371 region_name
=region_name
,
375 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
377 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
379 if self
.endpoint_type
== "internalURL":
380 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
381 glance_endpoint
= self
.keystone
.endpoints
.list(
382 glance_service_id
, interface
="internal"
385 glance_endpoint
= None
387 self
.glance
= self
.session
["glance"] = glClient
.Client(
388 2, session
=sess
, endpoint
=glance_endpoint
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self
.session
["reload_client"] = False
394 self
.persistent_info
["session"] = self
.session
395 # add availablity zone info inside self.persistent_info
396 self
._set
_availablity
_zones
()
397 self
.persistent_info
["availability_zone"] = self
.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self
.security_groups_id
= None
401 def __net_os2mano(self
, net_list_dict
):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict
) is dict:
405 net_list_
= (net_list_dict
,)
406 elif type(net_list_dict
) is list:
407 net_list_
= net_list_dict
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net
in net_list_
:
411 if net
.get("provider:network_type") == "vlan":
414 net
["type"] = "bridge"
416 def __classification_os2mano(self
, class_list_dict
):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
420 if isinstance(class_list_dict
, dict):
421 class_list_
= [class_list_dict
]
422 elif isinstance(class_list_dict
, list):
423 class_list_
= class_list_dict
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification
in class_list_
:
427 id = classification
.pop("id")
428 name
= classification
.pop("name")
429 description
= classification
.pop("description")
430 project_id
= classification
.pop("project_id")
431 tenant_id
= classification
.pop("tenant_id")
432 original_classification
= copy
.deepcopy(classification
)
433 classification
.clear()
434 classification
["ctype"] = "legacy_flow_classifier"
435 classification
["definition"] = original_classification
436 classification
["id"] = id
437 classification
["name"] = name
438 classification
["description"] = description
439 classification
["project_id"] = project_id
440 classification
["tenant_id"] = tenant_id
442 def __sfi_os2mano(self
, sfi_list_dict
):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
446 if isinstance(sfi_list_dict
, dict):
447 sfi_list_
= [sfi_list_dict
]
448 elif isinstance(sfi_list_dict
, list):
449 sfi_list_
= sfi_list_dict
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
453 for sfi
in sfi_list_
:
454 sfi
["ingress_ports"] = []
455 sfi
["egress_ports"] = []
457 if sfi
.get("ingress"):
458 sfi
["ingress_ports"].append(sfi
["ingress"])
460 if sfi
.get("egress"):
461 sfi
["egress_ports"].append(sfi
["egress"])
465 params
= sfi
.get("service_function_parameters")
469 correlation
= params
.get("correlation")
474 sfi
["sfc_encap"] = sfc_encap
475 del sfi
["service_function_parameters"]
477 def __sf_os2mano(self
, sf_list_dict
):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
481 if isinstance(sf_list_dict
, dict):
482 sf_list_
= [sf_list_dict
]
483 elif isinstance(sf_list_dict
, list):
484 sf_list_
= sf_list_dict
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
489 del sf
["port_pair_group_parameters"]
490 sf
["sfis"] = sf
["port_pairs"]
493 def __sfp_os2mano(self
, sfp_list_dict
):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
497 if isinstance(sfp_list_dict
, dict):
498 sfp_list_
= [sfp_list_dict
]
499 elif isinstance(sfp_list_dict
, list):
500 sfp_list_
= sfp_list_dict
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
504 for sfp
in sfp_list_
:
505 params
= sfp
.pop("chain_parameters")
509 correlation
= params
.get("correlation")
514 sfp
["sfc_encap"] = sfc_encap
515 sfp
["spi"] = sfp
.pop("chain_id")
516 sfp
["classifications"] = sfp
.pop("flow_classifiers")
517 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
519 # placeholder for now; read TODO note below
520 def _validate_classification(self
, type, definition
):
521 # only legacy_flow_classifier Type is supported at this point
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
528 def _format_exception(self
, exception
):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error
= str(exception
)
536 neExceptions
.NetworkNotFoundClient
,
537 nvExceptions
.NotFound
,
538 ksExceptions
.NotFound
,
539 gl1Exceptions
.HTTPNotFound
,
542 raise vimconn
.VimConnNotFoundException(
543 type(exception
).__name
__ + ": " + message_error
549 gl1Exceptions
.HTTPException
,
550 gl1Exceptions
.CommunicationError
,
552 ksExceptions
.ConnectionError
,
553 neExceptions
.ConnectionFailed
,
556 if type(exception
).__name
__ == "SSLError":
557 tip
= " (maybe option 'insecure' must be added to the VIM)"
559 raise vimconn
.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
566 nvExceptions
.BadRequest
,
567 ksExceptions
.BadRequest
,
570 if message_error
== "OS-EXT-SRV-ATTR:host":
571 tip
= " (If the user does not have non-admin credentials, this attribute will be missing)"
572 raise vimconn
.VimConnInsufficientCredentials(
573 type(exception
).__name
__ + ": " + message_error
+ tip
575 raise vimconn
.VimConnException(
576 type(exception
).__name
__ + ": " + message_error
582 nvExceptions
.ClientException
,
583 ksExceptions
.ClientException
,
584 neExceptions
.NeutronException
,
587 raise vimconn
.VimConnUnexpectedResponse(
588 type(exception
).__name
__ + ": " + message_error
590 elif isinstance(exception
, nvExceptions
.Conflict
):
591 raise vimconn
.VimConnConflictException(
592 type(exception
).__name
__ + ": " + message_error
594 elif isinstance(exception
, vimconn
.VimConnException
):
597 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
599 raise vimconn
.VimConnConnectionException(
600 type(exception
).__name
__ + ": " + message_error
603 def _get_ids_from_name(self
):
605 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
608 # get tenant_id if only tenant_name is supplied
609 self
._reload
_connection
()
611 if not self
.my_tenant_id
:
612 raise vimconn
.VimConnConnectionException(
613 "Error getting tenant information from name={} id={}".format(
614 self
.tenant_name
, self
.tenant_id
618 if self
.config
.get("security_groups") and not self
.security_groups_id
:
619 # convert from name to id
620 neutron_sg_list
= self
.neutron
.list_security_groups(
621 tenant_id
=self
.my_tenant_id
624 self
.security_groups_id
= []
625 for sg
in self
.config
.get("security_groups"):
626 for neutron_sg
in neutron_sg_list
:
627 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
628 self
.security_groups_id
.append(neutron_sg
["id"])
631 self
.security_groups_id
= None
633 raise vimconn
.VimConnConnectionException(
634 "Not found security group {} for this tenant".format(sg
)
637 def _find_nova_server(self
, vm_id
):
639 Returns the VM instance from Openstack and completes it with flavor ID
640 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
643 self
._reload
_connection
()
644 server
= self
.nova
.servers
.find(id=vm_id
)
645 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
646 server_dict
= server
.to_dict()
648 server_dict
["flavor"]["id"] = self
.nova
.flavors
.find(
649 name
=server_dict
["flavor"]["original_name"]
651 except nClient
.exceptions
.NotFound
as e
:
652 self
.logger
.warning(str(e
.message
))
655 ksExceptions
.ClientException
,
656 nvExceptions
.ClientException
,
657 nvExceptions
.NotFound
,
660 self
._format
_exception
(e
)
662 def check_vim_connectivity(self
):
663 # just get network list to check connectivity and credentials
664 self
.get_network_list(filter_dict
={})
666 def get_tenant_list(self
, filter_dict
={}):
667 """Obtain tenants of VIM
668 filter_dict can contain the following keys:
669 name: filter by tenant name
670 id: filter by tenant uuid/id
672 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
674 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
677 self
._reload
_connection
()
679 if self
.api_version3
:
680 project_class_list
= self
.keystone
.projects
.list(
681 name
=filter_dict
.get("name")
684 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
688 for project
in project_class_list
:
689 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
692 project_list
.append(project
.to_dict())
696 ksExceptions
.ConnectionError
,
697 ksExceptions
.ClientException
,
700 self
._format
_exception
(e
)
702 def new_tenant(self
, tenant_name
, tenant_description
):
703 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
704 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
707 self
._reload
_connection
()
709 if self
.api_version3
:
710 project
= self
.keystone
.projects
.create(
712 self
.config
.get("project_domain_id", "default"),
713 description
=tenant_description
,
717 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
721 ksExceptions
.ConnectionError
,
722 ksExceptions
.ClientException
,
723 ksExceptions
.BadRequest
,
726 self
._format
_exception
(e
)
728 def delete_tenant(self
, tenant_id
):
729 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
730 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
733 self
._reload
_connection
()
735 if self
.api_version3
:
736 self
.keystone
.projects
.delete(tenant_id
)
738 self
.keystone
.tenants
.delete(tenant_id
)
742 ksExceptions
.ConnectionError
,
743 ksExceptions
.ClientException
,
744 ksExceptions
.NotFound
,
747 self
._format
_exception
(e
)
755 provider_network_profile
=None,
757 """Adds a tenant network to VIM
759 'net_name': name of the network
761 'bridge': overlay isolated network
762 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
763 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
764 'ip_profile': is a dict containing the IP parameters of the network
765 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
766 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
767 'gateway_address': (Optional) ip_schema, that is X.X.X.X
768 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
769 'dhcp_enabled': True or False
770 'dhcp_start_address': ip_schema, first IP to grant
771 'dhcp_count': number of IPs to grant.
772 'shared': if this network can be seen/use by other tenants/organization
773 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
774 physical-network: physnet-label}
775 Returns a tuple with the network identifier and created_items, or raises an exception on error
776 created_items can be None or a dictionary where this method can include key-values that will be passed to
777 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
778 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
782 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
784 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
789 if provider_network_profile
:
790 vlan
= provider_network_profile
.get("segmentation-id")
794 self
._reload
_connection
()
795 network_dict
= {"name": net_name
, "admin_state_up": True}
797 if net_type
in ("data", "ptp") or provider_network_profile
:
798 provider_physical_network
= None
800 if provider_network_profile
and provider_network_profile
.get(
803 provider_physical_network
= provider_network_profile
.get(
807 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
808 # or not declared, just ignore the checking
811 self
.config
.get("dataplane_physical_net"), (tuple, list)
813 and provider_physical_network
814 not in self
.config
["dataplane_physical_net"]
816 raise vimconn
.VimConnConflictException(
817 "Invalid parameter 'provider-network:physical-network' "
818 "for network creation. '{}' is not one of the declared "
819 "list at VIM_config:dataplane_physical_net".format(
820 provider_physical_network
824 # use the default dataplane_physical_net
825 if not provider_physical_network
:
826 provider_physical_network
= self
.config
.get(
827 "dataplane_physical_net"
830 # if it is non empty list, use the first value. If it is a string use the value directly
832 isinstance(provider_physical_network
, (tuple, list))
833 and provider_physical_network
835 provider_physical_network
= provider_physical_network
[0]
837 if not provider_physical_network
:
838 raise vimconn
.VimConnConflictException(
839 "missing information needed for underlay networks. Provide "
840 "'dataplane_physical_net' configuration at VIM or use the NS "
841 "instantiation parameter 'provider-network.physical-network'"
845 if not self
.config
.get("multisegment_support"):
847 "provider:physical_network"
848 ] = provider_physical_network
851 provider_network_profile
852 and "network-type" in provider_network_profile
855 "provider:network_type"
856 ] = provider_network_profile
["network-type"]
858 network_dict
["provider:network_type"] = self
.config
.get(
859 "dataplane_network_type", "vlan"
863 network_dict
["provider:segmentation_id"] = vlan
868 "provider:physical_network": "",
869 "provider:network_type": "vxlan",
871 segment_list
.append(segment1_dict
)
873 "provider:physical_network": provider_physical_network
,
874 "provider:network_type": "vlan",
878 segment2_dict
["provider:segmentation_id"] = vlan
879 elif self
.config
.get("multisegment_vlan_range"):
880 vlanID
= self
._generate
_multisegment
_vlanID
()
881 segment2_dict
["provider:segmentation_id"] = vlanID
884 # raise vimconn.VimConnConflictException(
885 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
887 segment_list
.append(segment2_dict
)
888 network_dict
["segments"] = segment_list
890 # VIO Specific Changes. It needs a concrete VLAN
891 if self
.vim_type
== "VIO" and vlan
is None:
892 if self
.config
.get("dataplane_net_vlan_range") is None:
893 raise vimconn
.VimConnConflictException(
894 "You must provide 'dataplane_net_vlan_range' in format "
895 "[start_ID - end_ID] at VIM_config for creating underlay "
899 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
901 network_dict
["shared"] = shared
903 if self
.config
.get("disable_network_port_security"):
904 network_dict
["port_security_enabled"] = False
906 if self
.config
.get("neutron_availability_zone_hints"):
907 hints
= self
.config
.get("neutron_availability_zone_hints")
909 if isinstance(hints
, str):
912 network_dict
["availability_zone_hints"] = hints
914 new_net
= self
.neutron
.create_network({"network": network_dict
})
916 # create subnetwork, even if there is no profile
921 if not ip_profile
.get("subnet_address"):
922 # Fake subnet is required
923 subnet_rand
= random
.SystemRandom().randint(0, 255)
924 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
926 if "ip_version" not in ip_profile
:
927 ip_profile
["ip_version"] = "IPv4"
930 "name": net_name
+ "-subnet",
931 "network_id": new_net
["network"]["id"],
932 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
933 "cidr": ip_profile
["subnet_address"],
936 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
937 if ip_profile
.get("gateway_address"):
938 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
940 subnet
["gateway_ip"] = None
942 if ip_profile
.get("dns_address"):
943 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
945 if "dhcp_enabled" in ip_profile
:
946 subnet
["enable_dhcp"] = (
948 if ip_profile
["dhcp_enabled"] == "false"
949 or ip_profile
["dhcp_enabled"] is False
953 if ip_profile
.get("dhcp_start_address"):
954 subnet
["allocation_pools"] = []
955 subnet
["allocation_pools"].append(dict())
956 subnet
["allocation_pools"][0]["start"] = ip_profile
[
960 if ip_profile
.get("dhcp_count"):
961 # parts = ip_profile["dhcp_start_address"].split(".")
962 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
963 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
964 ip_int
+= ip_profile
["dhcp_count"] - 1
965 ip_str
= str(netaddr
.IPAddress(ip_int
))
966 subnet
["allocation_pools"][0]["end"] = ip_str
969 ip_profile
.get("ipv6_address_mode")
970 and ip_profile
["ip_version"] != "IPv4"
972 subnet
["ipv6_address_mode"] = ip_profile
["ipv6_address_mode"]
973 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
974 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
975 subnet
["ipv6_ra_mode"] = ip_profile
["ipv6_address_mode"]
977 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
978 self
.neutron
.create_subnet({"subnet": subnet
})
980 if net_type
== "data" and self
.config
.get("multisegment_support"):
981 if self
.config
.get("l2gw_support"):
982 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
983 for l2gw
in l2gw_list
:
985 "l2_gateway_id": l2gw
["id"],
986 "network_id": new_net
["network"]["id"],
987 "segmentation_id": str(vlanID
),
989 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
990 {"l2_gateway_connection": l2gw_conn
}
994 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
997 return new_net
["network"]["id"], created_items
998 except Exception as e
:
999 # delete l2gw connections (if any) before deleting the network
1000 for k
, v
in created_items
.items():
1001 if not v
: # skip already deleted
1005 k_item
, _
, k_id
= k
.partition(":")
1007 if k_item
== "l2gwconn":
1008 self
.neutron
.delete_l2_gateway_connection(k_id
)
1009 except Exception as e2
:
1011 "Error deleting l2 gateway connection: {}: {}".format(
1012 type(e2
).__name
__, e2
1017 self
.neutron
.delete_network(new_net
["network"]["id"])
1019 self
._format
_exception
(e
)
1021 def get_network_list(self
, filter_dict
={}):
1022 """Obtain tenant networks of VIM
1028 admin_state_up: boolean
1030 Returns the network list of dictionaries
1032 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
1035 self
._reload
_connection
()
1036 filter_dict_os
= filter_dict
.copy()
1038 if self
.api_version3
and "tenant_id" in filter_dict_os
:
1040 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
1042 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
1043 net_list
= net_dict
["networks"]
1044 self
.__net
_os
2mano
(net_list
)
1048 neExceptions
.ConnectionFailed
,
1049 ksExceptions
.ClientException
,
1050 neExceptions
.NeutronException
,
1053 self
._format
_exception
(e
)
1055 def get_network(self
, net_id
):
1056 """Obtain details of network from VIM
1057 Returns the network information from a network id"""
1058 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1059 filter_dict
= {"id": net_id
}
1060 net_list
= self
.get_network_list(filter_dict
)
1062 if len(net_list
) == 0:
1063 raise vimconn
.VimConnNotFoundException(
1064 "Network '{}' not found".format(net_id
)
1066 elif len(net_list
) > 1:
1067 raise vimconn
.VimConnConflictException(
1068 "Found more than one network with this criteria"
1073 for subnet_id
in net
.get("subnets", ()):
1075 subnet
= self
.neutron
.show_subnet(subnet_id
)
1076 except Exception as e
:
1078 "osconnector.get_network(): Error getting subnet %s %s"
1081 subnet
= {"id": subnet_id
, "fault": str(e
)}
1083 subnets
.append(subnet
)
1085 net
["subnets"] = subnets
1086 net
["encapsulation"] = net
.get("provider:network_type")
1087 net
["encapsulation_type"] = net
.get("provider:network_type")
1088 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1089 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1093 def delete_network(self
, net_id
, created_items
=None):
1095 Removes a tenant network from VIM and its associated elements
1096 :param net_id: VIM identifier of the network, provided by method new_network
1097 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1098 Returns the network identifier or raises an exception upon error or when network is not found
1100 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1102 if created_items
is None:
1106 self
._reload
_connection
()
1107 # delete l2gw connections (if any) before deleting the network
1108 for k
, v
in created_items
.items():
1109 if not v
: # skip already deleted
1113 k_item
, _
, k_id
= k
.partition(":")
1114 if k_item
== "l2gwconn":
1115 self
.neutron
.delete_l2_gateway_connection(k_id
)
1116 except Exception as e
:
1118 "Error deleting l2 gateway connection: {}: {}".format(
1123 # delete VM ports attached to this networks before the network
1124 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1125 for p
in ports
["ports"]:
1127 self
.neutron
.delete_port(p
["id"])
1128 except Exception as e
:
1129 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1131 self
.neutron
.delete_network(net_id
)
1135 neExceptions
.ConnectionFailed
,
1136 neExceptions
.NetworkNotFoundClient
,
1137 neExceptions
.NeutronException
,
1138 ksExceptions
.ClientException
,
1139 neExceptions
.NeutronException
,
1142 self
._format
_exception
(e
)
1144 def refresh_nets_status(self
, net_list
):
1145 """Get the status of the networks
1146 Params: the list of network identifiers
1147 Returns a dictionary with:
1148 net_id: #VIM id of this network
1149 status: #Mandatory. Text with one of:
1150 # DELETED (not found at vim)
1151 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1152 # OTHER (Vim reported other status not understood)
1153 # ERROR (VIM indicates an ERROR status)
1154 # ACTIVE, INACTIVE, DOWN (admin down),
1155 # BUILD (on building process)
1157 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1158 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1162 for net_id
in net_list
:
1166 net_vim
= self
.get_network(net_id
)
1168 if net_vim
["status"] in netStatus2manoFormat
:
1169 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1171 net
["status"] = "OTHER"
1172 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1174 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1175 net
["status"] = "DOWN"
1177 net
["vim_info"] = self
.serialize(net_vim
)
1179 if net_vim
.get("fault"): # TODO
1180 net
["error_msg"] = str(net_vim
["fault"])
1181 except vimconn
.VimConnNotFoundException
as e
:
1182 self
.logger
.error("Exception getting net status: %s", str(e
))
1183 net
["status"] = "DELETED"
1184 net
["error_msg"] = str(e
)
1185 except vimconn
.VimConnException
as e
:
1186 self
.logger
.error("Exception getting net status: %s", str(e
))
1187 net
["status"] = "VIM_ERROR"
1188 net
["error_msg"] = str(e
)
1189 net_dict
[net_id
] = net
1192 def get_flavor(self
, flavor_id
):
1193 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1194 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1197 self
._reload
_connection
()
1198 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1199 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1201 return flavor
.to_dict()
1203 nvExceptions
.NotFound
,
1204 nvExceptions
.ClientException
,
1205 ksExceptions
.ClientException
,
1208 self
._format
_exception
(e
)
1210 def get_flavor_id_from_data(self
, flavor_dict
):
1211 """Obtain flavor id that match the flavor description
1212 Returns the flavor_id or raises a vimconnNotFoundException
1213 flavor_dict: contains the required ram, vcpus, disk
1214 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1215 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1216 vimconnNotFoundException is raised
1218 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1221 self
._reload
_connection
()
1222 flavor_candidate_id
= None
1223 flavor_candidate_data
= (10000, 10000, 10000)
1226 flavor_dict
["vcpus"],
1227 flavor_dict
["disk"],
1228 flavor_dict
.get("ephemeral", 0),
1229 flavor_dict
.get("swap", 0),
1232 extended
= flavor_dict
.get("extended", {})
1235 raise vimconn
.VimConnNotFoundException(
1236 "Flavor with EPA still not implemented"
1238 # if len(numas) > 1:
1239 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1241 # numas = extended.get("numas")
1242 for flavor
in self
.nova
.flavors
.list():
1243 epa
= flavor
.get_keys()
1254 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1256 if flavor_data
== flavor_target
:
1260 and flavor_target
< flavor_data
< flavor_candidate_data
1262 flavor_candidate_id
= flavor
.id
1263 flavor_candidate_data
= flavor_data
1265 if not exact_match
and flavor_candidate_id
:
1266 return flavor_candidate_id
1268 raise vimconn
.VimConnNotFoundException(
1269 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1272 nvExceptions
.NotFound
,
1273 nvExceptions
.ClientException
,
1274 ksExceptions
.ClientException
,
1277 self
._format
_exception
(e
)
1280 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1281 """Process resource quota and fill up extra_specs.
1283 quota (dict): Keeping the quota of resurces
1285 extra_specs (dict) Dict to be filled to be used during flavor creation
1288 if "limit" in quota
:
1289 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1291 if "reserve" in quota
:
1292 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1294 if "shares" in quota
:
1295 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1296 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1299 def process_numa_memory(
1300 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1302 """Set the memory in extra_specs.
1304 numa (dict): A dictionary which includes numa information
1305 node_id (int): ID of numa node
1306 extra_specs (dict): To be filled.
1309 if not numa
.get("memory"):
1311 memory_mb
= numa
["memory"] * 1024
1312 memory
= "hw:numa_mem.{}".format(node_id
)
1313 extra_specs
[memory
] = int(memory_mb
)
1316 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1317 """Set the cpu in extra_specs.
1319 numa (dict): A dictionary which includes numa information
1320 node_id (int): ID of numa node
1321 extra_specs (dict): To be filled.
1324 if not numa
.get("vcpu"):
1327 cpu
= "hw:numa_cpus.{}".format(node_id
)
1328 vcpu
= ",".join(map(str, vcpu
))
1329 extra_specs
[cpu
] = vcpu
1332 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1333 """Fill up extra_specs if numa has paired-threads.
1335 numa (dict): A dictionary which includes numa information
1336 extra_specs (dict): To be filled.
1339 threads (int) Number of virtual cpus
1342 if not numa
.get("paired-threads"):
1345 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1346 threads
= numa
["paired-threads"] * 2
1347 extra_specs
["hw:cpu_thread_policy"] = "require"
1348 extra_specs
["hw:cpu_policy"] = "dedicated"
1352 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1353 """Fill up extra_specs if numa has cores.
1355 numa (dict): A dictionary which includes numa information
1356 extra_specs (dict): To be filled.
1359 cores (int) Number of virtual cpus
1362 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1363 # architecture, or a non-SMT architecture will be emulated
1364 if not numa
.get("cores"):
1366 cores
= numa
["cores"]
1367 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1368 extra_specs
["hw:cpu_policy"] = "dedicated"
1372 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1373 """Fill up extra_specs if numa has threads.
1375 numa (dict): A dictionary which includes numa information
1376 extra_specs (dict): To be filled.
1379 threads (int) Number of virtual cpus
1382 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1383 if not numa
.get("threads"):
1385 threads
= numa
["threads"]
1386 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1387 extra_specs
["hw:cpu_policy"] = "dedicated"
1390 def _process_numa_parameters_of_flavor(
1391 self
, numas
: List
, extra_specs
: Dict
1393 """Process numa parameters and fill up extra_specs.
1396 numas (list): List of dictionary which includes numa information
1397 extra_specs (dict): To be filled.
1400 numa_nodes
= len(numas
)
1401 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1402 cpu_cores
, cpu_threads
= 0, 0
1404 if self
.vim_type
== "VIO":
1405 self
.process_vio_numa_nodes(numa_nodes
, extra_specs
)
1409 node_id
= numa
["id"]
1410 # overwrite ram and vcpus
1411 # check if key "memory" is present in numa else use ram value at flavor
1412 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1413 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1415 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1416 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1418 if "paired-threads" in numa
:
1419 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1420 cpu_threads
+= threads
1422 elif "cores" in numa
:
1423 cores
= self
.process_numa_cores(numa
, extra_specs
)
1426 elif "threads" in numa
:
1427 threads
= self
.process_numa_threads(numa
, extra_specs
)
1428 cpu_threads
+= threads
1431 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1433 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1436 def process_vio_numa_nodes(numa_nodes
: int, extra_specs
: Dict
) -> None:
1437 """According to number of numa nodes, updates the extra_specs for VIO.
1441 numa_nodes (int): List keeps the numa node numbers
1442 extra_specs (dict): Extra specs dict to be updated
1445 # If there are several numas, we do not define specific affinity.
1446 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1448 def _change_flavor_name(
1449 self
, name
: str, name_suffix
: int, flavor_data
: dict
1451 """Change the flavor name if the name already exists.
1454 name (str): Flavor name to be checked
1455 name_suffix (int): Suffix to be appended to name
1456 flavor_data (dict): Flavor dict
1459 name (str): New flavor name to be used
1463 fl
= self
.nova
.flavors
.list()
1464 fl_names
= [f
.name
for f
in fl
]
1466 while name
in fl_names
:
1468 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1472 def _process_extended_config_of_flavor(
1473 self
, extended
: dict, extra_specs
: dict
1475 """Process the extended dict to fill up extra_specs.
1478 extended (dict): Keeping the extra specification of flavor
1479 extra_specs (dict) Dict to be filled to be used during flavor creation
1484 "mem-quota": "memory",
1486 "disk-io-quota": "disk_io",
1494 "PREFER_LARGE": "any",
1498 "cpu-pinning-policy": "hw:cpu_policy",
1499 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1500 "mem-policy": "hw:numa_mempolicy",
1503 numas
= extended
.get("numas")
1505 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1507 for quota
, item
in quotas
.items():
1508 if quota
in extended
.keys():
1509 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1511 # Set the mempage size as specified in the descriptor
1512 if extended
.get("mempage-size"):
1513 if extended
["mempage-size"] in page_sizes
.keys():
1514 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1516 # Normally, validations in NBI should not allow to this condition.
1518 "Invalid mempage-size %s. Will be ignored",
1519 extended
.get("mempage-size"),
1522 for policy
, hw_policy
in policies
.items():
1523 if extended
.get(policy
):
1524 extra_specs
[hw_policy
] = extended
[policy
].lower()
1527 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1528 """Returns the details of flavor
1530 flavor_data (dict): Dictionary that includes required flavor details
1533 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1537 flavor_data
.get("ram", 64),
1538 flavor_data
.get("vcpus", 1),
1540 flavor_data
.get("extended"),
1543 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1544 """Adds a tenant flavor to openstack VIM.
1545 if change_name_if_used is True, it will change name in case of conflict,
1546 because it is not supported name repetition.
1549 flavor_data (dict): Flavor details to be processed
1550 change_name_if_used (bool): Change name in case of conflict
1553 flavor_id (str): flavor identifier
1556 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1562 name
= flavor_data
["name"]
1563 while retry
< max_retries
:
1566 self
._reload
_connection
()
1568 if change_name_if_used
:
1569 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1571 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1575 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1579 new_flavor
= self
.nova
.flavors
.create(
1583 disk
=flavor_data
.get("disk", 0),
1584 ephemeral
=flavor_data
.get("ephemeral", 0),
1585 swap
=flavor_data
.get("swap", 0),
1586 is_public
=flavor_data
.get("is_public", True),
1591 new_flavor
.set_keys(extra_specs
)
1593 return new_flavor
.id
1595 except nvExceptions
.Conflict
as e
:
1596 if change_name_if_used
and retry
< max_retries
:
1599 self
._format
_exception
(e
)
1602 ksExceptions
.ClientException
,
1603 nvExceptions
.ClientException
,
1607 self
._format
_exception
(e
)
1609 def delete_flavor(self
, flavor_id
):
1610 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1612 self
._reload
_connection
()
1613 self
.nova
.flavors
.delete(flavor_id
)
1616 # except nvExceptions.BadRequest as e:
1618 nvExceptions
.NotFound
,
1619 ksExceptions
.ClientException
,
1620 nvExceptions
.ClientException
,
1623 self
._format
_exception
(e
)
1625 def new_image(self
, image_dict
):
1627 Adds a tenant image to VIM. imge_dict is a dictionary with:
1629 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1630 location: path or URI
1631 public: "yes" or "no"
1632 metadata: metadata of the image
1633 Returns the image_id
1638 while retry
< max_retries
:
1641 self
._reload
_connection
()
1643 # determine format http://docs.openstack.org/developer/glance/formats.html
1644 if "disk_format" in image_dict
:
1645 disk_format
= image_dict
["disk_format"]
1646 else: # autodiscover based on extension
1647 if image_dict
["location"].endswith(".qcow2"):
1648 disk_format
= "qcow2"
1649 elif image_dict
["location"].endswith(".vhd"):
1651 elif image_dict
["location"].endswith(".vmdk"):
1652 disk_format
= "vmdk"
1653 elif image_dict
["location"].endswith(".vdi"):
1655 elif image_dict
["location"].endswith(".iso"):
1657 elif image_dict
["location"].endswith(".aki"):
1659 elif image_dict
["location"].endswith(".ari"):
1661 elif image_dict
["location"].endswith(".ami"):
1667 "new_image: '%s' loading from '%s'",
1669 image_dict
["location"],
1671 if self
.vim_type
== "VIO":
1672 container_format
= "bare"
1673 if "container_format" in image_dict
:
1674 container_format
= image_dict
["container_format"]
1676 new_image
= self
.glance
.images
.create(
1677 name
=image_dict
["name"],
1678 container_format
=container_format
,
1679 disk_format
=disk_format
,
1682 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1684 if image_dict
["location"].startswith("http"):
1685 # TODO there is not a method to direct download. It must be downloaded locally with requests
1686 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1688 with
open(image_dict
["location"]) as fimage
:
1689 self
.glance
.images
.upload(new_image
.id, fimage
)
1690 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1691 # image_dict.get("public","yes")=="yes",
1692 # container_format="bare", data=fimage, disk_format=disk_format)
1694 metadata_to_load
= image_dict
.get("metadata")
1696 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1698 if self
.vim_type
== "VIO":
1699 metadata_to_load
["upload_location"] = image_dict
["location"]
1701 metadata_to_load
["location"] = image_dict
["location"]
1703 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1707 nvExceptions
.Conflict
,
1708 ksExceptions
.ClientException
,
1709 nvExceptions
.ClientException
,
1711 self
._format
_exception
(e
)
1714 gl1Exceptions
.HTTPException
,
1715 gl1Exceptions
.CommunicationError
,
1718 if retry
== max_retries
:
1721 self
._format
_exception
(e
)
1722 except IOError as e
: # can not open the file
1723 raise vimconn
.VimConnConnectionException(
1724 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1725 http_code
=vimconn
.HTTP_Bad_Request
,
1728 def delete_image(self
, image_id
):
1729 """Deletes a tenant image from openstack VIM. Returns the old id"""
1731 self
._reload
_connection
()
1732 self
.glance
.images
.delete(image_id
)
1736 nvExceptions
.NotFound
,
1737 ksExceptions
.ClientException
,
1738 nvExceptions
.ClientException
,
1739 gl1Exceptions
.CommunicationError
,
1740 gl1Exceptions
.HTTPNotFound
,
1742 ) as e
: # TODO remove
1743 self
._format
_exception
(e
)
1745 def get_image_id_from_path(self
, path
):
1746 """Get the image id from image path in the VIM database. Returns the image_id"""
1748 self
._reload
_connection
()
1749 images
= self
.glance
.images
.list()
1751 for image
in images
:
1752 if image
.metadata
.get("location") == path
:
1755 raise vimconn
.VimConnNotFoundException(
1756 "image with location '{}' not found".format(path
)
1759 ksExceptions
.ClientException
,
1760 nvExceptions
.ClientException
,
1761 gl1Exceptions
.CommunicationError
,
1764 self
._format
_exception
(e
)
1766 def get_image_list(self
, filter_dict
={}):
1767 """Obtain tenant images from VIM
1771 checksum: image checksum
1772 Returns the image list of dictionaries:
1773 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1776 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1779 self
._reload
_connection
()
1780 # filter_dict_os = filter_dict.copy()
1781 # First we filter by the available filter fields: name, id. The others are removed.
1782 image_list
= self
.glance
.images
.list()
1785 for image
in image_list
:
1787 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1790 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1794 filter_dict
.get("checksum")
1795 and image
["checksum"] != filter_dict
["checksum"]
1799 filtered_list
.append(image
.copy())
1800 except gl1Exceptions
.HTTPNotFound
:
1803 return filtered_list
1805 ksExceptions
.ClientException
,
1806 nvExceptions
.ClientException
,
1807 gl1Exceptions
.CommunicationError
,
1810 self
._format
_exception
(e
)
1812 def __wait_for_vm(self
, vm_id
, status
):
1813 """wait until vm is in the desired status and return True.
1814 If the VM gets in ERROR status, return false.
1815 If the timeout is reached generate an exception"""
1817 while elapsed_time
< server_timeout
:
1818 vm_status
= self
.nova
.servers
.get(vm_id
).status
1820 if vm_status
== status
:
1823 if vm_status
== "ERROR":
1829 # if we exceeded the timeout rollback
1830 if elapsed_time
>= server_timeout
:
1831 raise vimconn
.VimConnException(
1832 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1833 http_code
=vimconn
.HTTP_Request_Timeout
,
1836 def _get_openstack_availablity_zones(self
):
1838 Get from openstack availability zones available
1842 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1843 openstack_availability_zone
= [
1845 for zone
in openstack_availability_zone
1846 if zone
.zoneName
!= "internal"
1849 return openstack_availability_zone
1853 def _set_availablity_zones(self
):
1855 Set vim availablity zone
1858 if "availability_zone" in self
.config
:
1859 vim_availability_zones
= self
.config
.get("availability_zone")
1861 if isinstance(vim_availability_zones
, str):
1862 self
.availability_zone
= [vim_availability_zones
]
1863 elif isinstance(vim_availability_zones
, list):
1864 self
.availability_zone
= vim_availability_zones
1866 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1868 def _get_vm_availability_zone(
1869 self
, availability_zone_index
, availability_zone_list
1872 Return thge availability zone to be used by the created VM.
1873 :return: The VIM availability zone to be used or None
1875 if availability_zone_index
is None:
1876 if not self
.config
.get("availability_zone"):
1878 elif isinstance(self
.config
.get("availability_zone"), str):
1879 return self
.config
["availability_zone"]
1881 # TODO consider using a different parameter at config for default AV and AV list match
1882 return self
.config
["availability_zone"][0]
1884 vim_availability_zones
= self
.availability_zone
1885 # check if VIM offer enough availability zones describe in the VNFD
1886 if vim_availability_zones
and len(availability_zone_list
) <= len(
1887 vim_availability_zones
1889 # check if all the names of NFV AV match VIM AV names
1890 match_by_index
= False
1891 for av
in availability_zone_list
:
1892 if av
not in vim_availability_zones
:
1893 match_by_index
= True
1897 return vim_availability_zones
[availability_zone_index
]
1899 return availability_zone_list
[availability_zone_index
]
1901 raise vimconn
.VimConnConflictException(
1902 "No enough availability zones at VIM for this deployment"
1905 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1906 """Fill up the security_groups in the port_dict.
1909 net (dict): Network details
1910 port_dict (dict): Port details
1914 self
.config
.get("security_groups")
1915 and net
.get("port_security") is not False
1916 and not self
.config
.get("no_port_security_extension")
1918 if not self
.security_groups_id
:
1919 self
._get
_ids
_from
_name
()
1921 port_dict
["security_groups"] = self
.security_groups_id
1923 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1924 """Fill up the network binding depending on network type in the port_dict.
1927 net (dict): Network details
1928 port_dict (dict): Port details
1931 if not net
.get("type"):
1932 raise vimconn
.VimConnException("Type is missing in the network details.")
1934 if net
["type"] == "virtual":
1938 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1939 port_dict
["binding:vnic_type"] = "direct"
1941 # VIO specific Changes
1942 if self
.vim_type
== "VIO":
1943 # Need to create port with port_security_enabled = False and no-security-groups
1944 port_dict
["port_security_enabled"] = False
1945 port_dict
["provider_security_groups"] = []
1946 port_dict
["security_groups"] = []
1949 # For PT PCI-PASSTHROUGH
1950 port_dict
["binding:vnic_type"] = "direct-physical"
1953 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1954 """Set the "ip" parameter in net dictionary.
1957 new_port (dict): New created port
1958 net (dict): Network details
1961 fixed_ips
= new_port
["port"].get("fixed_ips")
1964 net
["ip"] = fixed_ips
[0].get("ip_address")
1969 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1970 """Fill up the mac_address and fixed_ips in port_dict.
1973 net (dict): Network details
1974 port_dict (dict): Port details
1977 if net
.get("mac_address"):
1978 port_dict
["mac_address"] = net
["mac_address"]
1981 if ip_list
:= net
.get("ip_address"):
1982 if not isinstance(ip_list
, list):
1985 ip_dict
= {"ip_address": ip
}
1986 ip_dual_list
.append(ip_dict
)
1987 port_dict
["fixed_ips"] = ip_dual_list
1988 # TODO add "subnet_id": <subnet_id>
1990 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
1991 """Create new port using neutron.
1994 port_dict (dict): Port details
1995 created_items (dict): All created items
1996 net (dict): Network details
1999 new_port (dict): New created port
2002 new_port
= self
.neutron
.create_port({"port": port_dict
})
2003 created_items
["port:" + str(new_port
["port"]["id"])] = True
2004 net
["mac_address"] = new_port
["port"]["mac_address"]
2005 net
["vim_id"] = new_port
["port"]["id"]
2010 self
, net
: dict, name
: str, created_items
: dict
2011 ) -> Tuple
[dict, dict]:
2012 """Create port using net details.
2015 net (dict): Network details
2016 name (str): Name to be used as network name if net dict does not include name
2017 created_items (dict): All created items
2020 new_port, port New created port, port dictionary
2025 "network_id": net
["net_id"],
2026 "name": net
.get("name"),
2027 "admin_state_up": True,
2030 if not port_dict
["name"]:
2031 port_dict
["name"] = name
2033 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
2035 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
2037 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
2039 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
2041 vimconnector
._set
_fixed
_ip
(new_port
, net
)
2043 port
= {"port-id": new_port
["port"]["id"]}
2045 if float(self
.nova
.api_version
.get_string()) >= 2.32:
2046 port
["tag"] = new_port
["port"]["name"]
2048 return new_port
, port
2050 def _prepare_network_for_vminstance(
2054 created_items
: dict,
2056 external_network
: list,
2057 no_secured_ports
: list,
2059 """Create port and fill up net dictionary for new VM instance creation.
2062 name (str): Name of network
2063 net_list (list): List of networks
2064 created_items (dict): All created items belongs to a VM
2065 net_list_vim (list): List of ports
2066 external_network (list): List of external-networks
2067 no_secured_ports (list): Port security disabled ports
2070 self
._reload
_connection
()
2072 for net
in net_list
:
2073 # Skip non-connected iface
2074 if not net
.get("net_id"):
2077 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2079 net_list_vim
.append(port
)
2081 if net
.get("floating_ip", False):
2082 net
["exit_on_floating_ip_error"] = True
2083 external_network
.append(net
)
2085 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2086 net
["exit_on_floating_ip_error"] = False
2087 external_network
.append(net
)
2088 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2090 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2091 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2092 if net
.get("port_security") is False and not self
.config
.get(
2093 "no_port_security_extension"
2095 no_secured_ports
.append(
2097 new_port
["port"]["id"],
2098 net
.get("port_security_disable_strategy"),
2102 def _prepare_persistent_root_volumes(
2107 base_disk_index
: int,
2108 block_device_mapping
: dict,
2109 existing_vim_volumes
: list,
2110 created_items
: dict,
2112 """Prepare persistent root volumes for new VM instance.
2115 name (str): Name of VM instance
2116 vm_av_zone (list): List of availability zones
2117 disk (dict): Disk details
2118 base_disk_index (int): Disk index
2119 block_device_mapping (dict): Block device details
2120 existing_vim_volumes (list): Existing disk details
2121 created_items (dict): All created items belongs to VM
2124 boot_volume_id (str): ID of boot volume
2127 # Disk may include only vim_volume_id or only vim_id."
2128 # Use existing persistent root volume finding with volume_id or vim_id
2129 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2131 if disk
.get(key_id
):
2132 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2133 existing_vim_volumes
.append({"id": disk
[key_id
]})
2136 # Create persistent root volume
2137 volume
= self
.cinder
.volumes
.create(
2139 name
=name
+ "vd" + chr(base_disk_index
),
2140 imageRef
=disk
["image_id"],
2141 # Make sure volume is in the same AZ as the VM to be attached to
2142 availability_zone
=vm_av_zone
,
2144 boot_volume_id
= volume
.id
2145 self
.update_block_device_mapping(
2147 block_device_mapping
=block_device_mapping
,
2148 base_disk_index
=base_disk_index
,
2150 created_items
=created_items
,
2153 return boot_volume_id
2156 def update_block_device_mapping(
2158 block_device_mapping
: dict,
2159 base_disk_index
: int,
2161 created_items
: dict,
2163 """Add volume information to block device mapping dict.
2165 volume (object): Created volume object
2166 block_device_mapping (dict): Block device details
2167 base_disk_index (int): Disk index
2168 disk (dict): Disk details
2169 created_items (dict): All created items belongs to VM
2172 raise vimconn
.VimConnException("Volume is empty.")
2174 if not hasattr(volume
, "id"):
2175 raise vimconn
.VimConnException(
2176 "Created volume is not valid, does not have id attribute."
2179 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2180 if disk
.get("multiattach"): # multiattach volumes do not belong to VDUs
2182 volume_txt
= "volume:" + str(volume
.id)
2183 if disk
.get("keep"):
2184 volume_txt
+= ":keep"
2185 created_items
[volume_txt
] = True
2187 def new_shared_volumes(self
, shared_volume_data
) -> (str, str):
2189 volume
= self
.cinder
.volumes
.create(
2190 size
=shared_volume_data
["size"],
2191 name
=shared_volume_data
["name"],
2192 volume_type
="multiattach",
2194 return (volume
.name
, volume
.id)
2195 except (ConnectionError
, KeyError) as e
:
2196 self
._format
_exception
(e
)
2198 def _prepare_shared_volumes(
2202 base_disk_index
: int,
2203 block_device_mapping
: dict,
2204 existing_vim_volumes
: list,
2205 created_items
: dict,
2207 volumes
= {volume
.name
: volume
.id for volume
in self
.cinder
.volumes
.list()}
2208 if volumes
.get(disk
["name"]):
2209 sv_id
= volumes
[disk
["name"]]
2212 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2215 volume
= self
.cinder
.volumes
.get(sv_id
)
2216 vol_status
= volume
.status
2217 if volume
.status
not in ("in-use", "available"):
2220 self
.update_block_device_mapping(
2222 block_device_mapping
=block_device_mapping
,
2223 base_disk_index
=base_disk_index
,
2225 created_items
=created_items
,
2228 raise vimconn
.VimConnException(
2229 "Shared volume is not prepared, status is: {}".format(vol_status
),
2230 http_code
=vimconn
.HTTP_Internal_Server_Error
,
2233 def _prepare_non_root_persistent_volumes(
2238 block_device_mapping
: dict,
2239 base_disk_index
: int,
2240 existing_vim_volumes
: list,
2241 created_items
: dict,
2243 """Prepare persistent volumes for new VM instance.
2246 name (str): Name of VM instance
2247 disk (dict): Disk details
2248 vm_av_zone (list): List of availability zones
2249 block_device_mapping (dict): Block device details
2250 base_disk_index (int): Disk index
2251 existing_vim_volumes (list): Existing disk details
2252 created_items (dict): All created items belongs to VM
2254 # Non-root persistent volumes
2255 # Disk may include only vim_volume_id or only vim_id."
2256 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2257 if disk
.get(key_id
):
2258 # Use existing persistent volume
2259 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2260 existing_vim_volumes
.append({"id": disk
[key_id
]})
2262 volume_name
= f
"{name}vd{chr(base_disk_index)}"
2263 volume
= self
.cinder
.volumes
.create(
2266 # Make sure volume is in the same AZ as the VM to be attached to
2267 availability_zone
=vm_av_zone
,
2269 self
.update_block_device_mapping(
2271 block_device_mapping
=block_device_mapping
,
2272 base_disk_index
=base_disk_index
,
2274 created_items
=created_items
,
2277 def _wait_for_created_volumes_availability(
2278 self
, elapsed_time
: int, created_items
: dict
2280 """Wait till created volumes become available.
2283 elapsed_time (int): Passed time while waiting
2284 created_items (dict): All created items belongs to VM
2287 elapsed_time (int): Time spent while waiting
2290 while elapsed_time
< volume_timeout
:
2291 for created_item
in created_items
:
2293 created_item
.split(":")[0],
2294 created_item
.split(":")[1],
2297 volume
= self
.cinder
.volumes
.get(volume_id
)
2299 volume
.volume_type
== "multiattach"
2300 and volume
.status
== "in-use"
2303 elif volume
.status
!= "available":
2306 # All ready: break from while
2314 def _wait_for_existing_volumes_availability(
2315 self
, elapsed_time
: int, existing_vim_volumes
: list
2317 """Wait till existing volumes become available.
2320 elapsed_time (int): Passed time while waiting
2321 existing_vim_volumes (list): Existing volume details
2324 elapsed_time (int): Time spent while waiting
2328 while elapsed_time
< volume_timeout
:
2329 for volume
in existing_vim_volumes
:
2330 v
= self
.cinder
.volumes
.get(volume
["id"])
2331 if v
.volume_type
== "multiattach" and v
.status
== "in-use":
2333 elif v
.status
!= "available":
2335 else: # all ready: break from while
2343 def _prepare_disk_for_vminstance(
2346 existing_vim_volumes
: list,
2347 created_items
: dict,
2349 block_device_mapping
: dict,
2350 disk_list
: list = None,
2352 """Prepare all volumes for new VM instance.
2355 name (str): Name of Instance
2356 existing_vim_volumes (list): List of existing volumes
2357 created_items (dict): All created items belongs to VM
2358 vm_av_zone (list): VM availability zone
2359 block_device_mapping (dict): Block devices to be attached to VM
2360 disk_list (list): List of disks
2363 # Create additional volumes in case these are present in disk_list
2364 base_disk_index
= ord("b")
2365 boot_volume_id
= None
2367 for disk
in disk_list
:
2368 if "image_id" in disk
:
2369 # Root persistent volume
2370 base_disk_index
= ord("a")
2371 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2373 vm_av_zone
=vm_av_zone
,
2375 base_disk_index
=base_disk_index
,
2376 block_device_mapping
=block_device_mapping
,
2377 existing_vim_volumes
=existing_vim_volumes
,
2378 created_items
=created_items
,
2380 elif disk
.get("multiattach"):
2381 self
._prepare
_shared
_volumes
(
2384 base_disk_index
=base_disk_index
,
2385 block_device_mapping
=block_device_mapping
,
2386 existing_vim_volumes
=existing_vim_volumes
,
2387 created_items
=created_items
,
2390 # Non-root persistent volume
2391 self
._prepare
_non
_root
_persistent
_volumes
(
2394 vm_av_zone
=vm_av_zone
,
2395 block_device_mapping
=block_device_mapping
,
2396 base_disk_index
=base_disk_index
,
2397 existing_vim_volumes
=existing_vim_volumes
,
2398 created_items
=created_items
,
2400 base_disk_index
+= 1
2402 # Wait until created volumes are with status available
2403 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2404 elapsed_time
, created_items
2406 # Wait until existing volumes in vim are with status available
2407 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2408 elapsed_time
, existing_vim_volumes
2410 # If we exceeded the timeout rollback
2411 if elapsed_time
>= volume_timeout
:
2412 raise vimconn
.VimConnException(
2413 "Timeout creating volumes for instance " + name
,
2414 http_code
=vimconn
.HTTP_Request_Timeout
,
2417 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2419 def _find_the_external_network_for_floating_ip(self
):
2420 """Get the external network ip in order to create floating IP.
2423 pool_id (str): External network pool ID
2427 # Find the external network
2428 external_nets
= list()
2430 for net
in self
.neutron
.list_networks()["networks"]:
2431 if net
["router:external"]:
2432 external_nets
.append(net
)
2434 if len(external_nets
) == 0:
2435 raise vimconn
.VimConnException(
2436 "Cannot create floating_ip automatically since "
2437 "no external network is present",
2438 http_code
=vimconn
.HTTP_Conflict
,
2441 if len(external_nets
) > 1:
2442 raise vimconn
.VimConnException(
2443 "Cannot create floating_ip automatically since "
2444 "multiple external networks are present",
2445 http_code
=vimconn
.HTTP_Conflict
,
2449 return external_nets
[0].get("id")
2451 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2452 """Trigger neutron to create a new floating IP using external network ID.
2455 param (dict): Input parameters to create a floating IP
2456 created_items (dict): All created items belongs to new VM instance
2463 self
.logger
.debug("Creating floating IP")
2464 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2465 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2466 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2468 except Exception as e
:
2469 raise vimconn
.VimConnException(
2470 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2471 http_code
=vimconn
.HTTP_Conflict
,
2474 def _create_floating_ip(
2475 self
, floating_network
: dict, server
: object, created_items
: dict
2477 """Get the available Pool ID and create a new floating IP.
2480 floating_network (dict): Dict including external network ID
2481 server (object): Server object
2482 created_items (dict): All created items belongs to new VM instance
2486 # Pool_id is available
2488 isinstance(floating_network
["floating_ip"], str)
2489 and floating_network
["floating_ip"].lower() != "true"
2491 pool_id
= floating_network
["floating_ip"]
2495 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2499 "floating_network_id": pool_id
,
2500 "tenant_id": server
.tenant_id
,
2504 self
._neutron
_create
_float
_ip
(param
, created_items
)
2506 def _find_floating_ip(
2510 floating_network
: dict,
2512 """Find the available free floating IPs if there are.
2515 server (object): Server object
2516 floating_ips (list): List of floating IPs
2517 floating_network (dict): Details of floating network such as ID
2520 free_floating_ip (str): Free floating ip address
2523 for fip
in floating_ips
:
2524 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2527 if isinstance(floating_network
["floating_ip"], str):
2528 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2533 def _assign_floating_ip(
2534 self
, free_floating_ip
: str, floating_network
: dict
2536 """Assign the free floating ip address to port.
2539 free_floating_ip (str): Floating IP to be assigned
2540 floating_network (dict): ID of floating network
2543 fip (dict) (dict): Floating ip details
2546 # The vim_id key contains the neutron.port_id
2547 self
.neutron
.update_floatingip(
2549 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2551 # For race condition ensure not re-assigned to other VM after 5 seconds
2554 return self
.neutron
.show_floatingip(free_floating_ip
)
2556 def _get_free_floating_ip(
2557 self
, server
: object, floating_network
: dict
2559 """Get the free floating IP address.
2562 server (object): Server Object
2563 floating_network (dict): Floating network details
2566 free_floating_ip (str): Free floating ip addr
2570 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2573 random
.shuffle(floating_ips
)
2575 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2577 def _prepare_external_network_for_vminstance(
2579 external_network
: list,
2581 created_items
: dict,
2582 vm_start_time
: float,
2584 """Assign floating IP address for VM instance.
2587 external_network (list): ID of External network
2588 server (object): Server Object
2589 created_items (dict): All created items belongs to new VM instance
2590 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2596 for floating_network
in external_network
:
2599 floating_ip_retries
= 3
2600 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2603 free_floating_ip
= self
._get
_free
_floating
_ip
(
2604 server
, floating_network
2607 if not free_floating_ip
:
2608 self
._create
_floating
_ip
(
2609 floating_network
, server
, created_items
2613 # For race condition ensure not already assigned
2614 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2616 if fip
["floatingip"].get("port_id"):
2619 # Assign floating ip
2620 fip
= self
._assign
_floating
_ip
(
2621 free_floating_ip
, floating_network
2624 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2625 self
.logger
.warning(
2626 "floating_ip {} re-assigned to other port".format(
2633 "Assigned floating_ip {} to VM {}".format(
2634 free_floating_ip
, server
.id
2640 except Exception as e
:
2641 # Openstack need some time after VM creation to assign an IP. So retry if fails
2642 vm_status
= self
.nova
.servers
.get(server
.id).status
2644 if vm_status
not in ("ACTIVE", "ERROR"):
2645 if time
.time() - vm_start_time
< server_timeout
:
2648 elif floating_ip_retries
> 0:
2649 floating_ip_retries
-= 1
2652 raise vimconn
.VimConnException(
2653 "Cannot create floating_ip: {} {}".format(
2656 http_code
=vimconn
.HTTP_Conflict
,
2659 except Exception as e
:
2660 if not floating_network
["exit_on_floating_ip_error"]:
2661 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2666 def _update_port_security_for_vminstance(
2668 no_secured_ports
: list,
2671 """Updates the port security according to no_secured_ports list.
2674 no_secured_ports (list): List of ports that security will be disabled
2675 server (object): Server Object
2681 # Wait until the VM is active and then disable the port-security
2682 if no_secured_ports
:
2683 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2685 for port
in no_secured_ports
:
2687 "port": {"port_security_enabled": False, "security_groups": None}
2690 if port
[1] == "allow-address-pairs":
2692 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2696 self
.neutron
.update_port(port
[0], port_update
)
2699 raise vimconn
.VimConnException(
2700 "It was not possible to disable port security for port {}".format(
2712 affinity_group_list
: list,
2716 availability_zone_index
=None,
2717 availability_zone_list
=None,
2719 """Adds a VM instance to VIM.
2722 name (str): name of VM
2723 description (str): description
2724 start (bool): indicates if VM must start or boot in pause mode. Ignored
2725 image_id (str) image uuid
2726 flavor_id (str) flavor uuid
2727 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2728 net_list (list): list of interfaces, each one is a dictionary with:
2729 name: name of network
2730 net_id: network uuid to connect
2731 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2732 model: interface model, ignored #TODO
2733 mac_address: used for SR-IOV ifaces #TODO for other types
2734 use: 'data', 'bridge', 'mgmt'
2735 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2736 vim_id: filled/added by this function
2737 floating_ip: True/False (or it can be None)
2738 port_security: True/False
2739 cloud_config (dict): (optional) dictionary with:
2740 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2741 users: (optional) list of users to be inserted, each item is a dict with:
2742 name: (mandatory) user name,
2743 key-pairs: (optional) list of strings with the public key to be inserted to the user
2744 user-data: (optional) string is a text script to be passed directly to cloud-init
2745 config-files: (optional). List of files to be transferred. Each item is a dict with:
2746 dest: (mandatory) string with the destination absolute path
2747 encoding: (optional, by default text). Can be one of:
2748 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2749 content : (mandatory) string with the content of the file
2750 permissions: (optional) string with file permissions, typically octal notation '0644'
2751 owner: (optional) file owner, string with the format 'owner:group'
2752 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2753 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2754 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2755 size: (mandatory) string with the size of the disk in GB
2756 vim_id: (optional) should use this existing volume id
2757 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2758 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2759 availability_zone_index is None
2760 #TODO ip, security groups
2763 A tuple with the instance identifier and created_items or raises an exception on error
2764 created_items can be None or a dictionary where this method can include key-values that will be passed to
2765 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2766 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2771 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2781 # list of external networks to be connected to instance, later on used to create floating_ip
2782 external_network
= []
2783 # List of ports with port-security disabled
2784 no_secured_ports
= []
2785 block_device_mapping
= {}
2786 existing_vim_volumes
= []
2787 server_group_id
= None
2788 scheduller_hints
= {}
2790 # Check the Openstack Connection
2791 self
._reload
_connection
()
2793 # Prepare network list
2794 self
._prepare
_network
_for
_vminstance
(
2797 created_items
=created_items
,
2798 net_list_vim
=net_list_vim
,
2799 external_network
=external_network
,
2800 no_secured_ports
=no_secured_ports
,
2804 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2806 # Get availability Zone
2807 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2808 availability_zone_index
, availability_zone_list
2813 self
._prepare
_disk
_for
_vminstance
(
2815 existing_vim_volumes
=existing_vim_volumes
,
2816 created_items
=created_items
,
2817 vm_av_zone
=vm_av_zone
,
2818 block_device_mapping
=block_device_mapping
,
2819 disk_list
=disk_list
,
2822 if affinity_group_list
:
2823 # Only first id on the list will be used. Openstack restriction
2824 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2825 scheduller_hints
["group"] = server_group_id
2828 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2829 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2830 "block_device_mapping={}, server_group={})".format(
2835 self
.config
.get("security_groups"),
2837 self
.config
.get("keypair"),
2840 block_device_mapping
,
2845 server
= self
.nova
.servers
.create(
2850 security_groups
=self
.config
.get("security_groups"),
2851 # TODO remove security_groups in future versions. Already at neutron port
2852 availability_zone
=vm_av_zone
,
2853 key_name
=self
.config
.get("keypair"),
2855 config_drive
=config_drive
,
2856 block_device_mapping
=block_device_mapping
,
2857 scheduler_hints
=scheduller_hints
,
2860 vm_start_time
= time
.time()
2862 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2864 self
._prepare
_external
_network
_for
_vminstance
(
2865 external_network
=external_network
,
2867 created_items
=created_items
,
2868 vm_start_time
=vm_start_time
,
2871 return server
.id, created_items
2873 except Exception as e
:
2876 server_id
= server
.id
2879 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2883 self
.delete_vminstance(server_id
, created_items
)
2885 except Exception as e2
:
2886 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2888 self
._format
_exception
(e
)
2891 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2892 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2895 created_items (dict): All created items belongs to VM
2898 updated_created_items (dict): Dict which does not include keep flag for volumes.
2902 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2905 def get_vminstance(self
, vm_id
):
2906 """Returns the VM instance information from VIM"""
2907 return self
._find
_nova
_server
(vm_id
)
2909 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2911 Get a console for the virtual machine
2913 vm_id: uuid of the VM
2914 console_type, can be:
2915 "novnc" (by default), "xvpvnc" for VNC types,
2916 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2917 Returns dict with the console parameters:
2918 protocol: ssh, ftp, http, https, ...
2919 server: usually ip address
2920 port: the http, ssh, ... port
2921 suffix: extra text, e.g. the http path and query string
2923 self
.logger
.debug("Getting VM CONSOLE from VIM")
2926 self
._reload
_connection
()
2927 server
= self
.nova
.servers
.find(id=vm_id
)
2929 if console_type
is None or console_type
== "novnc":
2930 console_dict
= server
.get_vnc_console("novnc")
2931 elif console_type
== "xvpvnc":
2932 console_dict
= server
.get_vnc_console(console_type
)
2933 elif console_type
== "rdp-html5":
2934 console_dict
= server
.get_rdp_console(console_type
)
2935 elif console_type
== "spice-html5":
2936 console_dict
= server
.get_spice_console(console_type
)
2938 raise vimconn
.VimConnException(
2939 "console type '{}' not allowed".format(console_type
),
2940 http_code
=vimconn
.HTTP_Bad_Request
,
2943 console_dict1
= console_dict
.get("console")
2946 console_url
= console_dict1
.get("url")
2950 protocol_index
= console_url
.find("//")
2952 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2955 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2960 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2962 -vimconn
.HTTP_Internal_Server_Error
,
2963 "Unexpected response from VIM",
2967 "protocol": console_url
[0:protocol_index
],
2968 "server": console_url
[protocol_index
+ 2 : port_index
],
2969 "port": console_url
[port_index
:suffix_index
],
2970 "suffix": console_url
[suffix_index
+ 1 :],
2975 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2977 nvExceptions
.NotFound
,
2978 ksExceptions
.ClientException
,
2979 nvExceptions
.ClientException
,
2980 nvExceptions
.BadRequest
,
2983 self
._format
_exception
(e
)
2985 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2986 """Neutron delete ports by id.
2988 k_id (str): Port id in the VIM
2991 port_dict
= self
.neutron
.list_ports()
2992 existing_ports
= [port
["id"] for port
in port_dict
["ports"] if port_dict
]
2994 if k_id
in existing_ports
:
2995 self
.neutron
.delete_port(k_id
)
2997 except Exception as e
:
2998 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
3000 def delete_shared_volumes(self
, shared_volume_vim_id
: str) -> bool:
3001 """Cinder delete volume by id.
3003 shared_volume_vim_id (str): ID of shared volume in VIM
3007 while elapsed_time
< server_timeout
:
3008 vol_status
= self
.cinder
.volumes
.get(shared_volume_vim_id
).status
3009 if vol_status
== "available":
3010 self
.cinder
.volumes
.delete(shared_volume_vim_id
)
3016 if elapsed_time
>= server_timeout
:
3017 raise vimconn
.VimConnException(
3018 "Timeout waiting for volume "
3019 + shared_volume_vim_id
3020 + " to be available",
3021 http_code
=vimconn
.HTTP_Request_Timeout
,
3024 except Exception as e
:
3026 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3028 self
._format
_exception
(e
)
3030 def _delete_volumes_by_id_wth_cinder(
3031 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
3033 """Cinder delete volume by id.
3035 k (str): Full item name in created_items
3036 k_id (str): ID of floating ip in VIM
3037 volumes_to_hold (list): Volumes not to delete
3038 created_items (dict): All created items belongs to VM
3041 if k_id
in volumes_to_hold
:
3044 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
3048 self
.cinder
.volumes
.delete(k_id
)
3049 created_items
[k
] = None
3051 except Exception as e
:
3053 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3056 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
3057 """Neutron delete floating ip by id.
3059 k (str): Full item name in created_items
3060 k_id (str): ID of floating ip in VIM
3061 created_items (dict): All created items belongs to VM
3064 self
.neutron
.delete_floatingip(k_id
)
3065 created_items
[k
] = None
3067 except Exception as e
:
3069 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3073 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
3074 k_item
, _
, k_id
= k
.partition(":")
3077 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
3078 """Delete VM ports attached to the networks before deleting virtual machine.
3080 created_items (dict): All created items belongs to VM
3083 for k
, v
in created_items
.items():
3084 if not v
: # skip already deleted
3088 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3089 if k_item
== "port":
3090 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
3092 except Exception as e
:
3094 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3097 def _delete_created_items(
3098 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
3100 """Delete Volumes and floating ip if they exist in created_items."""
3101 for k
, v
in created_items
.items():
3102 if not v
: # skip already deleted
3106 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3107 if k_item
== "volume":
3108 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
3109 k
, k_id
, volumes_to_hold
, created_items
3115 elif k_item
== "floating_ip":
3116 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
3118 except Exception as e
:
3119 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3124 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
3125 """Remove the volumes which has key flag from created_items
3128 created_items (dict): All created items belongs to VM
3131 created_items (dict): Persistent volumes eliminated created_items
3135 for (key
, value
) in created_items
.items()
3136 if len(key
.split(":")) == 2
3139 def delete_vminstance(
3140 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
3142 """Removes a VM instance from VIM. Returns the old identifier.
3144 vm_id (str): Identifier of VM instance
3145 created_items (dict): All created items belongs to VM
3146 volumes_to_hold (list): Volumes_to_hold
3148 if created_items
is None:
3150 if volumes_to_hold
is None:
3151 volumes_to_hold
= []
3154 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
3158 self
._reload
_connection
()
3160 # Delete VM ports attached to the networks before the virtual machine
3162 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
3165 self
.nova
.servers
.delete(vm_id
)
3167 # Although having detached, volumes should have in active status before deleting.
3168 # We ensure in this loop
3172 while keep_waiting
and elapsed_time
< volume_timeout
:
3173 keep_waiting
= False
3175 # Delete volumes and floating IP.
3176 keep_waiting
= self
._delete
_created
_items
(
3177 created_items
, volumes_to_hold
, keep_waiting
3185 nvExceptions
.NotFound
,
3186 ksExceptions
.ClientException
,
3187 nvExceptions
.ClientException
,
3190 self
._format
_exception
(e
)
3192 def refresh_vms_status(self
, vm_list
):
3193 """Get the status of the virtual machines and their interfaces/ports
3194 Params: the list of VM identifiers
3195 Returns a dictionary with:
3196 vm_id: #VIM id of this Virtual Machine
3197 status: #Mandatory. Text with one of:
3198 # DELETED (not found at vim)
3199 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3200 # OTHER (Vim reported other status not understood)
3201 # ERROR (VIM indicates an ERROR status)
3202 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3203 # CREATING (on building process), ERROR
3204 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3206 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3207 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3209 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3210 mac_address: #Text format XX:XX:XX:XX:XX:XX
3211 vim_net_id: #network id where this interface is connected
3212 vim_interface_id: #interface/port VIM id
3213 ip_address: #null, or text with IPv4, IPv6 address
3214 compute_node: #identification of compute node where PF,VF interface is allocated
3215 pci: #PCI address of the NIC that hosts the PF,VF
3216 vlan: #physical VLAN used for VF
3220 "refresh_vms status: Getting tenant VM instance information from VIM"
3223 for vm_id
in vm_list
:
3227 vm_vim
= self
.get_vminstance(vm_id
)
3229 if vm_vim
["status"] in vmStatus2manoFormat
:
3230 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3232 vm
["status"] = "OTHER"
3233 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3235 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3236 vm_vim
.pop("user_data", None)
3237 vm
["vim_info"] = self
.serialize(vm_vim
)
3239 vm
["interfaces"] = []
3240 if vm_vim
.get("fault"):
3241 vm
["error_msg"] = str(vm_vim
["fault"])
3245 self
._reload
_connection
()
3246 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3248 for port
in port_dict
["ports"]:
3250 interface
["vim_info"] = self
.serialize(port
)
3251 interface
["mac_address"] = port
.get("mac_address")
3252 interface
["vim_net_id"] = port
["network_id"]
3253 interface
["vim_interface_id"] = port
["id"]
3254 # check if OS-EXT-SRV-ATTR:host is there,
3255 # in case of non-admin credentials, it will be missing
3257 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3258 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3260 interface
["pci"] = None
3262 # check if binding:profile is there,
3263 # in case of non-admin credentials, it will be missing
3264 if port
.get("binding:profile"):
3265 if port
["binding:profile"].get("pci_slot"):
3266 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3268 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3269 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3270 pci
= port
["binding:profile"]["pci_slot"]
3271 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3272 interface
["pci"] = pci
3274 interface
["vlan"] = None
3276 if port
.get("binding:vif_details"):
3277 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3279 # Get vlan from network in case not present in port for those old openstacks and cases where
3280 # it is needed vlan at PT
3281 if not interface
["vlan"]:
3282 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3283 network
= self
.neutron
.show_network(port
["network_id"])
3286 network
["network"].get("provider:network_type")
3289 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3290 interface
["vlan"] = network
["network"].get(
3291 "provider:segmentation_id"
3295 # look for floating ip address
3297 floating_ip_dict
= self
.neutron
.list_floatingips(
3301 if floating_ip_dict
.get("floatingips"):
3303 floating_ip_dict
["floatingips"][0].get(
3304 "floating_ip_address"
3310 for subnet
in port
["fixed_ips"]:
3311 ips
.append(subnet
["ip_address"])
3313 interface
["ip_address"] = ";".join(ips
)
3314 vm
["interfaces"].append(interface
)
3315 except Exception as e
:
3317 "Error getting vm interface information {}: {}".format(
3322 except vimconn
.VimConnNotFoundException
as e
:
3323 self
.logger
.error("Exception getting vm status: %s", str(e
))
3324 vm
["status"] = "DELETED"
3325 vm
["error_msg"] = str(e
)
3326 except vimconn
.VimConnException
as e
:
3327 self
.logger
.error("Exception getting vm status: %s", str(e
))
3328 vm
["status"] = "VIM_ERROR"
3329 vm
["error_msg"] = str(e
)
3335 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3336 """Send and action over a VM instance from VIM
3337 Returns None or the console dict if the action was successfully sent to the VIM
3339 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3342 self
._reload
_connection
()
3343 server
= self
.nova
.servers
.find(id=vm_id
)
3345 if "start" in action_dict
:
3346 if action_dict
["start"] == "rebuild":
3349 if server
.status
== "PAUSED":
3351 elif server
.status
== "SUSPENDED":
3353 elif server
.status
== "SHUTOFF":
3357 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3359 raise vimconn
.VimConnException(
3360 "Cannot 'start' instance while it is in active state",
3361 http_code
=vimconn
.HTTP_Bad_Request
,
3364 elif "pause" in action_dict
:
3366 elif "resume" in action_dict
:
3368 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3369 self
.logger
.debug("server status %s", server
.status
)
3370 if server
.status
== "ACTIVE":
3373 self
.logger
.debug("ERROR: VM is not in Active state")
3374 raise vimconn
.VimConnException(
3375 "VM is not in active state, stop operation is not allowed",
3376 http_code
=vimconn
.HTTP_Bad_Request
,
3378 elif "forceOff" in action_dict
:
3379 server
.stop() # TODO
3380 elif "terminate" in action_dict
:
3382 elif "createImage" in action_dict
:
3383 server
.create_image()
3384 # "path":path_schema,
3385 # "description":description_schema,
3386 # "name":name_schema,
3387 # "metadata":metadata_schema,
3388 # "imageRef": id_schema,
3389 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3390 elif "rebuild" in action_dict
:
3391 server
.rebuild(server
.image
["id"])
3392 elif "reboot" in action_dict
:
3393 server
.reboot() # reboot_type="SOFT"
3394 elif "console" in action_dict
:
3395 console_type
= action_dict
["console"]
3397 if console_type
is None or console_type
== "novnc":
3398 console_dict
= server
.get_vnc_console("novnc")
3399 elif console_type
== "xvpvnc":
3400 console_dict
= server
.get_vnc_console(console_type
)
3401 elif console_type
== "rdp-html5":
3402 console_dict
= server
.get_rdp_console(console_type
)
3403 elif console_type
== "spice-html5":
3404 console_dict
= server
.get_spice_console(console_type
)
3406 raise vimconn
.VimConnException(
3407 "console type '{}' not allowed".format(console_type
),
3408 http_code
=vimconn
.HTTP_Bad_Request
,
3412 console_url
= console_dict
["console"]["url"]
3414 protocol_index
= console_url
.find("//")
3416 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3419 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3424 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3425 raise vimconn
.VimConnException(
3426 "Unexpected response from VIM " + str(console_dict
)
3430 "protocol": console_url
[0:protocol_index
],
3431 "server": console_url
[protocol_index
+ 2 : port_index
],
3432 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3433 "suffix": console_url
[suffix_index
+ 1 :],
3436 return console_dict2
3438 raise vimconn
.VimConnException(
3439 "Unexpected response from VIM " + str(console_dict
)
3444 ksExceptions
.ClientException
,
3445 nvExceptions
.ClientException
,
3446 nvExceptions
.NotFound
,
3449 self
._format
_exception
(e
)
3450 # TODO insert exception vimconn.HTTP_Unauthorized
3452 # ###### VIO Specific Changes #########
3453 def _generate_vlanID(self
):
3455 Method to get unused vlanID
3463 networks
= self
.get_network_list()
3465 for net
in networks
:
3466 if net
.get("provider:segmentation_id"):
3467 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3469 used_vlanIDs
= set(usedVlanIDs
)
3471 # find unused VLAN ID
3472 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3474 start_vlanid
, end_vlanid
= map(
3475 int, vlanID_range
.replace(" ", "").split("-")
3478 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3479 if vlanID
not in used_vlanIDs
:
3481 except Exception as exp
:
3482 raise vimconn
.VimConnException(
3483 "Exception {} occurred while generating VLAN ID.".format(exp
)
3486 raise vimconn
.VimConnConflictException(
3487 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3488 self
.config
.get("dataplane_net_vlan_range")
3492 def _generate_multisegment_vlanID(self
):
3494 Method to get unused vlanID
3502 networks
= self
.get_network_list()
3503 for net
in networks
:
3504 if net
.get("provider:network_type") == "vlan" and net
.get(
3505 "provider:segmentation_id"
3507 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3508 elif net
.get("segments"):
3509 for segment
in net
.get("segments"):
3510 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3511 "provider:segmentation_id"
3513 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3515 used_vlanIDs
= set(usedVlanIDs
)
3517 # find unused VLAN ID
3518 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3520 start_vlanid
, end_vlanid
= map(
3521 int, vlanID_range
.replace(" ", "").split("-")
3524 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3525 if vlanID
not in used_vlanIDs
:
3527 except Exception as exp
:
3528 raise vimconn
.VimConnException(
3529 "Exception {} occurred while generating VLAN ID.".format(exp
)
3532 raise vimconn
.VimConnConflictException(
3533 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3534 self
.config
.get("multisegment_vlan_range")
3538 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3540 Method to validate user given vlanID ranges
3544 for vlanID_range
in input_vlan_range
:
3545 vlan_range
= vlanID_range
.replace(" ", "")
3547 vlanID_pattern
= r
"(\d)*-(\d)*$"
3548 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3550 raise vimconn
.VimConnConflictException(
3551 "Invalid VLAN range for {}: {}.You must provide "
3552 "'{}' in format [start_ID - end_ID].".format(
3553 text_vlan_range
, vlanID_range
, text_vlan_range
3557 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3558 if start_vlanid
<= 0:
3559 raise vimconn
.VimConnConflictException(
3560 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3561 "networks valid IDs are 1 to 4094 ".format(
3562 text_vlan_range
, vlanID_range
3566 if end_vlanid
> 4094:
3567 raise vimconn
.VimConnConflictException(
3568 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3569 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3570 text_vlan_range
, vlanID_range
3574 if start_vlanid
> end_vlanid
:
3575 raise vimconn
.VimConnConflictException(
3576 "Invalid VLAN range for {}: {}. You must provide '{}'"
3577 " in format start_ID - end_ID and start_ID < end_ID ".format(
3578 text_vlan_range
, vlanID_range
, text_vlan_range
3582 def get_hosts_info(self
):
3583 """Get the information of deployed hosts
3584 Returns the hosts content"""
3586 print("osconnector: Getting Host info from VIM")
3590 self
._reload
_connection
()
3591 hypervisors
= self
.nova
.hypervisors
.list()
3593 for hype
in hypervisors
:
3594 h_list
.append(hype
.to_dict())
3596 return 1, {"hosts": h_list
}
3597 except nvExceptions
.NotFound
as e
:
3598 error_value
= -vimconn
.HTTP_Not_Found
3599 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3600 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3601 error_value
= -vimconn
.HTTP_Bad_Request
3605 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3608 # TODO insert exception vimconn.HTTP_Unauthorized
3609 # if reaching here is because an exception
3610 self
.logger
.debug("get_hosts_info " + error_text
)
3612 return error_value
, error_text
3614 def get_hosts(self
, vim_tenant
):
3615 """Get the hosts and deployed instances
3616 Returns the hosts content"""
3617 r
, hype_dict
= self
.get_hosts_info()
3622 hypervisors
= hype_dict
["hosts"]
3625 servers
= self
.nova
.servers
.list()
3626 for hype
in hypervisors
:
3627 for server
in servers
:
3629 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3630 == hype
["hypervisor_hostname"]
3633 hype
["vm"].append(server
.id)
3635 hype
["vm"] = [server
.id]
3638 except nvExceptions
.NotFound
as e
:
3639 error_value
= -vimconn
.HTTP_Not_Found
3640 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3641 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3642 error_value
= -vimconn
.HTTP_Bad_Request
3646 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3649 # TODO insert exception vimconn.HTTP_Unauthorized
3650 # if reaching here is because an exception
3651 self
.logger
.debug("get_hosts " + error_text
)
3653 return error_value
, error_text
3655 def new_affinity_group(self
, affinity_group_data
):
3656 """Adds a server group to VIM
3657 affinity_group_data contains a dictionary with information, keys:
3658 name: name in VIM for the server group
3659 type: affinity or anti-affinity
3660 scope: Only nfvi-node allowed
3661 Returns the server group identifier"""
3662 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
3665 name
= affinity_group_data
["name"]
3666 policy
= affinity_group_data
["type"]
3668 self
._reload
_connection
()
3669 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
3671 return new_server_group
.id
3673 ksExceptions
.ClientException
,
3674 nvExceptions
.ClientException
,
3678 self
._format
_exception
(e
)
3680 def get_affinity_group(self
, affinity_group_id
):
3681 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3682 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
3684 self
._reload
_connection
()
3685 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
3687 return server_group
.to_dict()
3689 nvExceptions
.NotFound
,
3690 nvExceptions
.ClientException
,
3691 ksExceptions
.ClientException
,
3694 self
._format
_exception
(e
)
3696 def delete_affinity_group(self
, affinity_group_id
):
3697 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3698 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
3700 self
._reload
_connection
()
3701 self
.nova
.server_groups
.delete(affinity_group_id
)
3703 return affinity_group_id
3705 nvExceptions
.NotFound
,
3706 ksExceptions
.ClientException
,
3707 nvExceptions
.ClientException
,
3710 self
._format
_exception
(e
)
3712 def get_vdu_state(self
, vm_id
, host_is_required
=False) -> list:
3713 """Getting the state of a VDU.
3715 vm_id (str): ID of an instance
3716 host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
3717 and if this is set to True, it raises KeyError.
3719 vdu_data (list): VDU details including state, flavor, host_info, AZ
3721 self
.logger
.debug("Getting the status of VM")
3722 self
.logger
.debug("VIM VM ID %s", vm_id
)
3724 self
._reload
_connection
()
3725 server_dict
= self
._find
_nova
_server
(vm_id
)
3726 srv_attr
= "OS-EXT-SRV-ATTR:host"
3728 server_dict
[srv_attr
] if host_is_required
else server_dict
.get(srv_attr
)
3731 server_dict
["status"],
3732 server_dict
["flavor"]["id"],
3734 server_dict
["OS-EXT-AZ:availability_zone"],
3736 self
.logger
.debug("vdu_data %s", vdu_data
)
3739 except Exception as e
:
3740 self
._format
_exception
(e
)
3742 def check_compute_availability(self
, host
, server_flavor_details
):
3743 self
._reload
_connection
()
3744 hypervisor_search
= self
.nova
.hypervisors
.search(
3745 hypervisor_match
=host
, servers
=True
3747 for hypervisor
in hypervisor_search
:
3748 hypervisor_id
= hypervisor
.to_dict()["id"]
3749 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
3750 hypervisor_dict
= hypervisor_details
.to_dict()
3751 hypervisor_temp
= json
.dumps(hypervisor_dict
)
3752 hypervisor_json
= json
.loads(hypervisor_temp
)
3753 resources_available
= [
3754 hypervisor_json
["free_ram_mb"],
3755 hypervisor_json
["disk_available_least"],
3756 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
3758 compute_available
= all(
3759 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
3761 if compute_available
:
3764 def check_availability_zone(
3765 self
, old_az
, server_flavor_details
, old_host
, host
=None
3767 self
._reload
_connection
()
3768 az_check
= {"zone_check": False, "compute_availability": None}
3769 aggregates_list
= self
.nova
.aggregates
.list()
3770 for aggregate
in aggregates_list
:
3771 aggregate_details
= aggregate
.to_dict()
3772 aggregate_temp
= json
.dumps(aggregate_details
)
3773 aggregate_json
= json
.loads(aggregate_temp
)
3774 if aggregate_json
["availability_zone"] == old_az
:
3775 hosts_list
= aggregate_json
["hosts"]
3776 if host
is not None:
3777 if host
in hosts_list
:
3778 az_check
["zone_check"] = True
3779 available_compute_id
= self
.check_compute_availability(
3780 host
, server_flavor_details
3782 if available_compute_id
is not None:
3783 az_check
["compute_availability"] = available_compute_id
3785 for check_host
in hosts_list
:
3786 if check_host
!= old_host
:
3787 available_compute_id
= self
.check_compute_availability(
3788 check_host
, server_flavor_details
3790 if available_compute_id
is not None:
3791 az_check
["zone_check"] = True
3792 az_check
["compute_availability"] = available_compute_id
3795 az_check
["zone_check"] = True
3798 def migrate_instance(self
, vm_id
, compute_host
=None):
3802 vm_id: ID of an instance
3803 compute_host: Host to migrate the vdu to
3805 self
._reload
_connection
()
3807 instance_state
= self
.get_vdu_state(vm_id
, host_is_required
=True)
3808 server_flavor_id
= instance_state
[1]
3809 server_hypervisor_name
= instance_state
[2]
3810 server_availability_zone
= instance_state
[3]
3812 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
3813 server_flavor_details
= [
3814 server_flavor
["ram"],
3815 server_flavor
["disk"],
3816 server_flavor
["vcpus"],
3818 if compute_host
== server_hypervisor_name
:
3819 raise vimconn
.VimConnException(
3820 "Unable to migrate instance '{}' to the same host '{}'".format(
3823 http_code
=vimconn
.HTTP_Bad_Request
,
3825 az_status
= self
.check_availability_zone(
3826 server_availability_zone
,
3827 server_flavor_details
,
3828 server_hypervisor_name
,
3831 availability_zone_check
= az_status
["zone_check"]
3832 available_compute_id
= az_status
.get("compute_availability")
3834 if availability_zone_check
is False:
3835 raise vimconn
.VimConnException(
3836 "Unable to migrate instance '{}' to a different availability zone".format(
3839 http_code
=vimconn
.HTTP_Bad_Request
,
3841 if available_compute_id
is not None:
3842 # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
3843 self
.nova
.servers
.live_migrate(
3845 host
=available_compute_id
,
3846 block_migration
=True,
3849 changed_compute_host
= ""
3850 if state
== "MIGRATING":
3851 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3852 changed_compute_host
= self
.get_vdu_state(
3853 vm_id
, host_is_required
=True
3855 if vm_state
and changed_compute_host
== available_compute_id
:
3857 "Instance '{}' migrated to the new compute host '{}'".format(
3858 vm_id
, changed_compute_host
3861 return state
, available_compute_id
3863 raise vimconn
.VimConnException(
3864 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3865 vm_id
, available_compute_id
3867 http_code
=vimconn
.HTTP_Bad_Request
,
3870 raise vimconn
.VimConnException(
3871 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3872 available_compute_id
3874 http_code
=vimconn
.HTTP_Bad_Request
,
3877 nvExceptions
.BadRequest
,
3878 nvExceptions
.ClientException
,
3879 nvExceptions
.NotFound
,
3881 self
._format
_exception
(e
)
3883 def resize_instance(self
, vm_id
, new_flavor_id
):
3885 For resizing the vm based on the given
3888 vm_id : ID of an instance
3889 new_flavor_id : Flavor id to be resized
3890 Return the status of a resized instance
3892 self
._reload
_connection
()
3893 self
.logger
.debug("resize the flavor of an instance")
3894 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
3895 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
3896 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
3898 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
3899 if old_flavor_disk
> new_flavor_disk
:
3900 raise nvExceptions
.BadRequest(
3902 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
3905 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
3906 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
3908 instance_resized_status
= self
.confirm_resize(vm_id
)
3909 return instance_resized_status
3911 raise nvExceptions
.BadRequest(
3913 message
="Cannot 'resize' vm_state is in ERROR",
3917 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3918 raise nvExceptions
.BadRequest(
3920 message
="Cannot 'resize' instance while it is in vm_state resized",
3923 nvExceptions
.BadRequest
,
3924 nvExceptions
.ClientException
,
3925 nvExceptions
.NotFound
,
3927 self
._format
_exception
(e
)
3929 def confirm_resize(self
, vm_id
):
3931 Confirm the resize of an instance
3933 vm_id: ID of an instance
3935 self
._reload
_connection
()
3936 self
.nova
.servers
.confirm_resize(server
=vm_id
)
3937 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
3938 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3939 instance_status
= self
.get_vdu_state(vm_id
)[0]
3940 return instance_status
3942 def get_monitoring_data(self
):
3944 self
.logger
.debug("Getting servers and ports data from Openstack VIMs.")
3945 self
._reload
_connection
()
3946 all_servers
= self
.nova
.servers
.list(detailed
=True)
3948 for server
in all_servers
:
3949 server
.flavor
["id"] = self
.nova
.flavors
.find(
3950 name
=server
.flavor
["original_name"]
3952 except nClient
.exceptions
.NotFound
as e
:
3953 self
.logger
.warning(str(e
.message
))
3954 all_ports
= self
.neutron
.list_ports()
3955 return all_servers
, all_ports
3957 vimconn
.VimConnException
,
3958 vimconn
.VimConnNotFoundException
,
3959 vimconn
.VimConnConnectionException
,
3961 raise vimconn
.VimConnException(
3962 f
"Exception in monitoring while getting VMs and ports status: {str(e)}"