1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 from glanceclient
import client
as glClient
45 import glanceclient
.exc
as gl1Exceptions
46 from keystoneauth1
import session
47 from keystoneauth1
.identity
import v2
, v3
48 import keystoneclient
.exceptions
as ksExceptions
49 import keystoneclient
.v2_0
.client
as ksClient_v2
50 import keystoneclient
.v3
.client
as ksClient_v3
52 from neutronclient
.common
import exceptions
as neExceptions
53 from neutronclient
.neutron
import client
as neClient
54 from novaclient
import client
as nClient
, exceptions
as nvExceptions
55 from osm_ro_plugin
import vimconn
56 from requests
.exceptions
import ConnectionError
59 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__
= "$22-sep-2017 23:59:59$"
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat
= {
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
72 netStatus2manoFormat
= {
75 "INACTIVE": "INACTIVE",
81 supportedClassificationTypes
= ["legacy_flow_classifier"]
83 # global var to have a timeout creating and deleting volumes
88 class SafeDumper(yaml
.SafeDumper
):
89 def represent_data(self
, data
):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data
, dict) and data
.__class
__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data
= dict(data
.items())
96 return super(SafeDumper
, self
).represent_data(data
)
99 class vimconnector(vimconn
.VimConnector
):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
118 api_version
= config
.get("APIversion")
120 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn
.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
126 vim_type
= config
.get("vim_type")
128 if vim_type
and vim_type
not in ("vio", "VIO"):
129 raise vimconn
.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
134 if config
.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self
._validate
_vlan
_ranges
(
137 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
140 if config
.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self
._validate
_vlan
_ranges
(
143 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
146 vimconn
.VimConnector
.__init
__(
160 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
161 raise vimconn
.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
167 if self
.config
.get("insecure"):
170 if self
.config
.get("ca_cert"):
171 self
.verify
= self
.config
.get("ca_cert")
174 raise TypeError("url param can not be NoneType")
176 self
.persistent_info
= persistent_info
177 self
.availability_zone
= persistent_info
.get("availability_zone", None)
178 self
.session
= persistent_info
.get("session", {"reload_client": True})
179 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
180 self
.nova
= self
.session
.get("nova")
181 self
.neutron
= self
.session
.get("neutron")
182 self
.cinder
= self
.session
.get("cinder")
183 self
.glance
= self
.session
.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self
.keystone
= self
.session
.get("keystone")
186 self
.api_version3
= self
.session
.get("api_version3")
187 self
.vim_type
= self
.config
.get("vim_type")
190 self
.vim_type
= self
.vim_type
.upper()
192 if self
.config
.get("use_internal_endpoint"):
193 self
.endpoint_type
= "internalURL"
195 self
.endpoint_type
= None
197 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
198 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
199 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
200 self
.logger
= logging
.getLogger("ro.vim.openstack")
202 # allow security_groups to be a list or a single string
203 if isinstance(self
.config
.get("security_groups"), str):
204 self
.config
["security_groups"] = [self
.config
["security_groups"]]
206 self
.security_groups_id
= None
208 # ###### VIO Specific Changes #########
209 if self
.vim_type
== "VIO":
210 self
.logger
= logging
.getLogger("ro.vim.vio")
213 self
.logger
.setLevel(getattr(logging
, log_level
))
215 def __getitem__(self
, index
):
216 """Get individuals parameters.
218 if index
== "project_domain_id":
219 return self
.config
.get("project_domain_id")
220 elif index
== "user_domain_id":
221 return self
.config
.get("user_domain_id")
223 return vimconn
.VimConnector
.__getitem
__(self
, index
)
225 def __setitem__(self
, index
, value
):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
228 if index
== "project_domain_id":
229 self
.config
["project_domain_id"] = value
230 elif index
== "user_domain_id":
231 self
.config
["user_domain_id"] = value
233 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
235 self
.session
["reload_client"] = True
237 def serialize(self
, value
):
238 """Serialization of python basic types.
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
244 if isinstance(value
, str):
249 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
251 except yaml
.representer
.RepresenterError
:
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
260 def _reload_connection(self
):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self
.session
["reload_client"]:
266 if self
.config
.get("APIversion"):
267 self
.api_version3
= (
268 self
.config
["APIversion"] == "v3.3"
269 or self
.config
["APIversion"] == "3"
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
276 self
.session
["api_version3"] = self
.api_version3
278 if self
.api_version3
:
279 if self
.config
.get("project_domain_id") or self
.config
.get(
280 "project_domain_name"
282 project_domain_id_default
= None
284 project_domain_id_default
= "default"
286 if self
.config
.get("user_domain_id") or self
.config
.get(
289 user_domain_id_default
= None
291 user_domain_id_default
= "default"
295 password
=self
.passwd
,
296 project_name
=self
.tenant_name
,
297 project_id
=self
.tenant_id
,
298 project_domain_id
=self
.config
.get(
299 "project_domain_id", project_domain_id_default
301 user_domain_id
=self
.config
.get(
302 "user_domain_id", user_domain_id_default
304 project_domain_name
=self
.config
.get("project_domain_name"),
305 user_domain_name
=self
.config
.get("user_domain_name"),
311 password
=self
.passwd
,
312 tenant_name
=self
.tenant_name
,
313 tenant_id
=self
.tenant_id
,
316 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name
= self
.config
.get("region_name")
321 if self
.api_version3
:
322 self
.keystone
= ksClient_v3
.Client(
324 endpoint_type
=self
.endpoint_type
,
325 region_name
=region_name
,
328 self
.keystone
= ksClient_v2
.Client(
329 session
=sess
, endpoint_type
=self
.endpoint_type
332 self
.session
["keystone"] = self
.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version
= self
.config
.get("microversion")
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self
.nova
= self
.session
["nova"] = nClient
.Client(
349 endpoint_type
=self
.endpoint_type
,
350 region_name
=region_name
,
352 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
355 endpoint_type
=self
.endpoint_type
,
356 region_name
=region_name
,
359 if sess
.get_all_version_data(service_type
="volumev2"):
360 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
363 endpoint_type
=self
.endpoint_type
,
364 region_name
=region_name
,
367 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
370 endpoint_type
=self
.endpoint_type
,
371 region_name
=region_name
,
375 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
377 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
379 if self
.endpoint_type
== "internalURL":
380 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
381 glance_endpoint
= self
.keystone
.endpoints
.list(
382 glance_service_id
, interface
="internal"
385 glance_endpoint
= None
387 self
.glance
= self
.session
["glance"] = glClient
.Client(
388 2, session
=sess
, endpoint
=glance_endpoint
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self
.session
["reload_client"] = False
394 self
.persistent_info
["session"] = self
.session
395 # add availablity zone info inside self.persistent_info
396 self
._set
_availablity
_zones
()
397 self
.persistent_info
["availability_zone"] = self
.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self
.security_groups_id
= None
401 def __net_os2mano(self
, net_list_dict
):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict
) is dict:
405 net_list_
= (net_list_dict
,)
406 elif type(net_list_dict
) is list:
407 net_list_
= net_list_dict
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net
in net_list_
:
411 if net
.get("provider:network_type") == "vlan":
414 net
["type"] = "bridge"
416 def __classification_os2mano(self
, class_list_dict
):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
420 if isinstance(class_list_dict
, dict):
421 class_list_
= [class_list_dict
]
422 elif isinstance(class_list_dict
, list):
423 class_list_
= class_list_dict
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification
in class_list_
:
427 id = classification
.pop("id")
428 name
= classification
.pop("name")
429 description
= classification
.pop("description")
430 project_id
= classification
.pop("project_id")
431 tenant_id
= classification
.pop("tenant_id")
432 original_classification
= copy
.deepcopy(classification
)
433 classification
.clear()
434 classification
["ctype"] = "legacy_flow_classifier"
435 classification
["definition"] = original_classification
436 classification
["id"] = id
437 classification
["name"] = name
438 classification
["description"] = description
439 classification
["project_id"] = project_id
440 classification
["tenant_id"] = tenant_id
442 def __sfi_os2mano(self
, sfi_list_dict
):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
446 if isinstance(sfi_list_dict
, dict):
447 sfi_list_
= [sfi_list_dict
]
448 elif isinstance(sfi_list_dict
, list):
449 sfi_list_
= sfi_list_dict
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
453 for sfi
in sfi_list_
:
454 sfi
["ingress_ports"] = []
455 sfi
["egress_ports"] = []
457 if sfi
.get("ingress"):
458 sfi
["ingress_ports"].append(sfi
["ingress"])
460 if sfi
.get("egress"):
461 sfi
["egress_ports"].append(sfi
["egress"])
465 params
= sfi
.get("service_function_parameters")
469 correlation
= params
.get("correlation")
474 sfi
["sfc_encap"] = sfc_encap
475 del sfi
["service_function_parameters"]
477 def __sf_os2mano(self
, sf_list_dict
):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
481 if isinstance(sf_list_dict
, dict):
482 sf_list_
= [sf_list_dict
]
483 elif isinstance(sf_list_dict
, list):
484 sf_list_
= sf_list_dict
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
489 del sf
["port_pair_group_parameters"]
490 sf
["sfis"] = sf
["port_pairs"]
493 def __sfp_os2mano(self
, sfp_list_dict
):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
497 if isinstance(sfp_list_dict
, dict):
498 sfp_list_
= [sfp_list_dict
]
499 elif isinstance(sfp_list_dict
, list):
500 sfp_list_
= sfp_list_dict
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
504 for sfp
in sfp_list_
:
505 params
= sfp
.pop("chain_parameters")
509 correlation
= params
.get("correlation")
514 sfp
["sfc_encap"] = sfc_encap
515 sfp
["spi"] = sfp
.pop("chain_id")
516 sfp
["classifications"] = sfp
.pop("flow_classifiers")
517 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
519 # placeholder for now; read TODO note below
520 def _validate_classification(self
, type, definition
):
521 # only legacy_flow_classifier Type is supported at this point
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
528 def _format_exception(self
, exception
):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error
= str(exception
)
536 neExceptions
.NetworkNotFoundClient
,
537 nvExceptions
.NotFound
,
538 ksExceptions
.NotFound
,
539 gl1Exceptions
.HTTPNotFound
,
542 raise vimconn
.VimConnNotFoundException(
543 type(exception
).__name
__ + ": " + message_error
549 gl1Exceptions
.HTTPException
,
550 gl1Exceptions
.CommunicationError
,
552 ksExceptions
.ConnectionError
,
553 neExceptions
.ConnectionFailed
,
556 if type(exception
).__name
__ == "SSLError":
557 tip
= " (maybe option 'insecure' must be added to the VIM)"
559 raise vimconn
.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
566 nvExceptions
.BadRequest
,
567 ksExceptions
.BadRequest
,
570 raise vimconn
.VimConnException(
571 type(exception
).__name
__ + ": " + message_error
576 nvExceptions
.ClientException
,
577 ksExceptions
.ClientException
,
578 neExceptions
.NeutronException
,
581 raise vimconn
.VimConnUnexpectedResponse(
582 type(exception
).__name
__ + ": " + message_error
584 elif isinstance(exception
, nvExceptions
.Conflict
):
585 raise vimconn
.VimConnConflictException(
586 type(exception
).__name
__ + ": " + message_error
588 elif isinstance(exception
, vimconn
.VimConnException
):
591 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
593 raise vimconn
.VimConnConnectionException(
594 type(exception
).__name
__ + ": " + message_error
597 def _get_ids_from_name(self
):
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
602 # get tenant_id if only tenant_name is supplied
603 self
._reload
_connection
()
605 if not self
.my_tenant_id
:
606 raise vimconn
.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self
.tenant_name
, self
.tenant_id
612 if self
.config
.get("security_groups") and not self
.security_groups_id
:
613 # convert from name to id
614 neutron_sg_list
= self
.neutron
.list_security_groups(
615 tenant_id
=self
.my_tenant_id
618 self
.security_groups_id
= []
619 for sg
in self
.config
.get("security_groups"):
620 for neutron_sg
in neutron_sg_list
:
621 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
622 self
.security_groups_id
.append(neutron_sg
["id"])
625 self
.security_groups_id
= None
627 raise vimconn
.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg
)
631 def _find_nova_server(self
, vm_id
):
633 Returns the VM instance from Openstack and completes it with flavor ID
634 Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
637 self
._reload
_connection
()
638 server
= self
.nova
.servers
.find(id=vm_id
)
639 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
640 server_dict
= server
.to_dict()
642 server_dict
["flavor"]["id"] = self
.nova
.flavors
.find(
643 name
=server_dict
["flavor"]["original_name"]
645 except nClient
.exceptions
.NotFound
as e
:
646 self
.logger
.warning(str(e
.message
))
649 ksExceptions
.ClientException
,
650 nvExceptions
.ClientException
,
651 nvExceptions
.NotFound
,
654 self
._format
_exception
(e
)
656 def check_vim_connectivity(self
):
657 # just get network list to check connectivity and credentials
658 self
.get_network_list(filter_dict
={})
660 def get_tenant_list(self
, filter_dict
={}):
661 """Obtain tenants of VIM
662 filter_dict can contain the following keys:
663 name: filter by tenant name
664 id: filter by tenant uuid/id
666 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
668 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
671 self
._reload
_connection
()
673 if self
.api_version3
:
674 project_class_list
= self
.keystone
.projects
.list(
675 name
=filter_dict
.get("name")
678 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
682 for project
in project_class_list
:
683 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
686 project_list
.append(project
.to_dict())
690 ksExceptions
.ConnectionError
,
691 ksExceptions
.ClientException
,
694 self
._format
_exception
(e
)
696 def new_tenant(self
, tenant_name
, tenant_description
):
697 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
698 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
701 self
._reload
_connection
()
703 if self
.api_version3
:
704 project
= self
.keystone
.projects
.create(
706 self
.config
.get("project_domain_id", "default"),
707 description
=tenant_description
,
711 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
715 ksExceptions
.ConnectionError
,
716 ksExceptions
.ClientException
,
717 ksExceptions
.BadRequest
,
720 self
._format
_exception
(e
)
722 def delete_tenant(self
, tenant_id
):
723 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
724 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
727 self
._reload
_connection
()
729 if self
.api_version3
:
730 self
.keystone
.projects
.delete(tenant_id
)
732 self
.keystone
.tenants
.delete(tenant_id
)
736 ksExceptions
.ConnectionError
,
737 ksExceptions
.ClientException
,
738 ksExceptions
.NotFound
,
741 self
._format
_exception
(e
)
749 provider_network_profile
=None,
751 """Adds a tenant network to VIM
753 'net_name': name of the network
755 'bridge': overlay isolated network
756 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
757 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
758 'ip_profile': is a dict containing the IP parameters of the network
759 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
760 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
761 'gateway_address': (Optional) ip_schema, that is X.X.X.X
762 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
763 'dhcp_enabled': True or False
764 'dhcp_start_address': ip_schema, first IP to grant
765 'dhcp_count': number of IPs to grant.
766 'shared': if this network can be seen/use by other tenants/organization
767 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
768 physical-network: physnet-label}
769 Returns a tuple with the network identifier and created_items, or raises an exception on error
770 created_items can be None or a dictionary where this method can include key-values that will be passed to
771 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
772 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
776 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
778 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
783 if provider_network_profile
:
784 vlan
= provider_network_profile
.get("segmentation-id")
788 self
._reload
_connection
()
789 network_dict
= {"name": net_name
, "admin_state_up": True}
791 if net_type
in ("data", "ptp") or provider_network_profile
:
792 provider_physical_network
= None
794 if provider_network_profile
and provider_network_profile
.get(
797 provider_physical_network
= provider_network_profile
.get(
801 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
802 # or not declared, just ignore the checking
805 self
.config
.get("dataplane_physical_net"), (tuple, list)
807 and provider_physical_network
808 not in self
.config
["dataplane_physical_net"]
810 raise vimconn
.VimConnConflictException(
811 "Invalid parameter 'provider-network:physical-network' "
812 "for network creation. '{}' is not one of the declared "
813 "list at VIM_config:dataplane_physical_net".format(
814 provider_physical_network
818 # use the default dataplane_physical_net
819 if not provider_physical_network
:
820 provider_physical_network
= self
.config
.get(
821 "dataplane_physical_net"
824 # if it is non empty list, use the first value. If it is a string use the value directly
826 isinstance(provider_physical_network
, (tuple, list))
827 and provider_physical_network
829 provider_physical_network
= provider_physical_network
[0]
831 if not provider_physical_network
:
832 raise vimconn
.VimConnConflictException(
833 "missing information needed for underlay networks. Provide "
834 "'dataplane_physical_net' configuration at VIM or use the NS "
835 "instantiation parameter 'provider-network.physical-network'"
839 if not self
.config
.get("multisegment_support"):
841 "provider:physical_network"
842 ] = provider_physical_network
845 provider_network_profile
846 and "network-type" in provider_network_profile
849 "provider:network_type"
850 ] = provider_network_profile
["network-type"]
852 network_dict
["provider:network_type"] = self
.config
.get(
853 "dataplane_network_type", "vlan"
857 network_dict
["provider:segmentation_id"] = vlan
862 "provider:physical_network": "",
863 "provider:network_type": "vxlan",
865 segment_list
.append(segment1_dict
)
867 "provider:physical_network": provider_physical_network
,
868 "provider:network_type": "vlan",
872 segment2_dict
["provider:segmentation_id"] = vlan
873 elif self
.config
.get("multisegment_vlan_range"):
874 vlanID
= self
._generate
_multisegment
_vlanID
()
875 segment2_dict
["provider:segmentation_id"] = vlanID
878 # raise vimconn.VimConnConflictException(
879 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
881 segment_list
.append(segment2_dict
)
882 network_dict
["segments"] = segment_list
884 # VIO Specific Changes. It needs a concrete VLAN
885 if self
.vim_type
== "VIO" and vlan
is None:
886 if self
.config
.get("dataplane_net_vlan_range") is None:
887 raise vimconn
.VimConnConflictException(
888 "You must provide 'dataplane_net_vlan_range' in format "
889 "[start_ID - end_ID] at VIM_config for creating underlay "
893 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
895 network_dict
["shared"] = shared
897 if self
.config
.get("disable_network_port_security"):
898 network_dict
["port_security_enabled"] = False
900 if self
.config
.get("neutron_availability_zone_hints"):
901 hints
= self
.config
.get("neutron_availability_zone_hints")
903 if isinstance(hints
, str):
906 network_dict
["availability_zone_hints"] = hints
908 new_net
= self
.neutron
.create_network({"network": network_dict
})
910 # create subnetwork, even if there is no profile
915 if not ip_profile
.get("subnet_address"):
916 # Fake subnet is required
917 subnet_rand
= random
.SystemRandom().randint(0, 255)
918 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
920 if "ip_version" not in ip_profile
:
921 ip_profile
["ip_version"] = "IPv4"
924 "name": net_name
+ "-subnet",
925 "network_id": new_net
["network"]["id"],
926 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
927 "cidr": ip_profile
["subnet_address"],
930 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
931 if ip_profile
.get("gateway_address"):
932 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
934 subnet
["gateway_ip"] = None
936 if ip_profile
.get("dns_address"):
937 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
939 if "dhcp_enabled" in ip_profile
:
940 subnet
["enable_dhcp"] = (
942 if ip_profile
["dhcp_enabled"] == "false"
943 or ip_profile
["dhcp_enabled"] is False
947 if ip_profile
.get("dhcp_start_address"):
948 subnet
["allocation_pools"] = []
949 subnet
["allocation_pools"].append(dict())
950 subnet
["allocation_pools"][0]["start"] = ip_profile
[
954 if ip_profile
.get("dhcp_count"):
955 # parts = ip_profile["dhcp_start_address"].split(".")
956 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
957 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
958 ip_int
+= ip_profile
["dhcp_count"] - 1
959 ip_str
= str(netaddr
.IPAddress(ip_int
))
960 subnet
["allocation_pools"][0]["end"] = ip_str
963 ip_profile
.get("ipv6_address_mode")
964 and ip_profile
["ip_version"] != "IPv4"
966 subnet
["ipv6_address_mode"] = ip_profile
["ipv6_address_mode"]
967 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
968 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
969 subnet
["ipv6_ra_mode"] = ip_profile
["ipv6_address_mode"]
971 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
972 self
.neutron
.create_subnet({"subnet": subnet
})
974 if net_type
== "data" and self
.config
.get("multisegment_support"):
975 if self
.config
.get("l2gw_support"):
976 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
977 for l2gw
in l2gw_list
:
979 "l2_gateway_id": l2gw
["id"],
980 "network_id": new_net
["network"]["id"],
981 "segmentation_id": str(vlanID
),
983 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
984 {"l2_gateway_connection": l2gw_conn
}
988 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
991 return new_net
["network"]["id"], created_items
992 except Exception as e
:
993 # delete l2gw connections (if any) before deleting the network
994 for k
, v
in created_items
.items():
995 if not v
: # skip already deleted
999 k_item
, _
, k_id
= k
.partition(":")
1001 if k_item
== "l2gwconn":
1002 self
.neutron
.delete_l2_gateway_connection(k_id
)
1003 except Exception as e2
:
1005 "Error deleting l2 gateway connection: {}: {}".format(
1006 type(e2
).__name
__, e2
1011 self
.neutron
.delete_network(new_net
["network"]["id"])
1013 self
._format
_exception
(e
)
1015 def get_network_list(self
, filter_dict
={}):
1016 """Obtain tenant networks of VIM
1022 admin_state_up: boolean
1024 Returns the network list of dictionaries
1026 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
1029 self
._reload
_connection
()
1030 filter_dict_os
= filter_dict
.copy()
1032 if self
.api_version3
and "tenant_id" in filter_dict_os
:
1034 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
1036 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
1037 net_list
= net_dict
["networks"]
1038 self
.__net
_os
2mano
(net_list
)
1042 neExceptions
.ConnectionFailed
,
1043 ksExceptions
.ClientException
,
1044 neExceptions
.NeutronException
,
1047 self
._format
_exception
(e
)
1049 def get_network(self
, net_id
):
1050 """Obtain details of network from VIM
1051 Returns the network information from a network id"""
1052 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1053 filter_dict
= {"id": net_id
}
1054 net_list
= self
.get_network_list(filter_dict
)
1056 if len(net_list
) == 0:
1057 raise vimconn
.VimConnNotFoundException(
1058 "Network '{}' not found".format(net_id
)
1060 elif len(net_list
) > 1:
1061 raise vimconn
.VimConnConflictException(
1062 "Found more than one network with this criteria"
1067 for subnet_id
in net
.get("subnets", ()):
1069 subnet
= self
.neutron
.show_subnet(subnet_id
)
1070 except Exception as e
:
1072 "osconnector.get_network(): Error getting subnet %s %s"
1075 subnet
= {"id": subnet_id
, "fault": str(e
)}
1077 subnets
.append(subnet
)
1079 net
["subnets"] = subnets
1080 net
["encapsulation"] = net
.get("provider:network_type")
1081 net
["encapsulation_type"] = net
.get("provider:network_type")
1082 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1083 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1087 def delete_network(self
, net_id
, created_items
=None):
1089 Removes a tenant network from VIM and its associated elements
1090 :param net_id: VIM identifier of the network, provided by method new_network
1091 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1092 Returns the network identifier or raises an exception upon error or when network is not found
1094 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1096 if created_items
is None:
1100 self
._reload
_connection
()
1101 # delete l2gw connections (if any) before deleting the network
1102 for k
, v
in created_items
.items():
1103 if not v
: # skip already deleted
1107 k_item
, _
, k_id
= k
.partition(":")
1108 if k_item
== "l2gwconn":
1109 self
.neutron
.delete_l2_gateway_connection(k_id
)
1110 except Exception as e
:
1112 "Error deleting l2 gateway connection: {}: {}".format(
1117 # delete VM ports attached to this networks before the network
1118 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1119 for p
in ports
["ports"]:
1121 self
.neutron
.delete_port(p
["id"])
1122 except Exception as e
:
1123 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1125 self
.neutron
.delete_network(net_id
)
1129 neExceptions
.ConnectionFailed
,
1130 neExceptions
.NetworkNotFoundClient
,
1131 neExceptions
.NeutronException
,
1132 ksExceptions
.ClientException
,
1133 neExceptions
.NeutronException
,
1136 self
._format
_exception
(e
)
1138 def refresh_nets_status(self
, net_list
):
1139 """Get the status of the networks
1140 Params: the list of network identifiers
1141 Returns a dictionary with:
1142 net_id: #VIM id of this network
1143 status: #Mandatory. Text with one of:
1144 # DELETED (not found at vim)
1145 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1146 # OTHER (Vim reported other status not understood)
1147 # ERROR (VIM indicates an ERROR status)
1148 # ACTIVE, INACTIVE, DOWN (admin down),
1149 # BUILD (on building process)
1151 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1152 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1156 for net_id
in net_list
:
1160 net_vim
= self
.get_network(net_id
)
1162 if net_vim
["status"] in netStatus2manoFormat
:
1163 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1165 net
["status"] = "OTHER"
1166 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1168 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1169 net
["status"] = "DOWN"
1171 net
["vim_info"] = self
.serialize(net_vim
)
1173 if net_vim
.get("fault"): # TODO
1174 net
["error_msg"] = str(net_vim
["fault"])
1175 except vimconn
.VimConnNotFoundException
as e
:
1176 self
.logger
.error("Exception getting net status: %s", str(e
))
1177 net
["status"] = "DELETED"
1178 net
["error_msg"] = str(e
)
1179 except vimconn
.VimConnException
as e
:
1180 self
.logger
.error("Exception getting net status: %s", str(e
))
1181 net
["status"] = "VIM_ERROR"
1182 net
["error_msg"] = str(e
)
1183 net_dict
[net_id
] = net
1186 def get_flavor(self
, flavor_id
):
1187 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1188 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1191 self
._reload
_connection
()
1192 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1193 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1195 return flavor
.to_dict()
1197 nvExceptions
.NotFound
,
1198 nvExceptions
.ClientException
,
1199 ksExceptions
.ClientException
,
1202 self
._format
_exception
(e
)
1204 def get_flavor_id_from_data(self
, flavor_dict
):
1205 """Obtain flavor id that match the flavor description
1206 Returns the flavor_id or raises a vimconnNotFoundException
1207 flavor_dict: contains the required ram, vcpus, disk
1208 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1209 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1210 vimconnNotFoundException is raised
1212 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1215 self
._reload
_connection
()
1216 flavor_candidate_id
= None
1217 flavor_candidate_data
= (10000, 10000, 10000)
1220 flavor_dict
["vcpus"],
1221 flavor_dict
["disk"],
1222 flavor_dict
.get("ephemeral", 0),
1223 flavor_dict
.get("swap", 0),
1226 extended
= flavor_dict
.get("extended", {})
1229 raise vimconn
.VimConnNotFoundException(
1230 "Flavor with EPA still not implemented"
1232 # if len(numas) > 1:
1233 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1235 # numas = extended.get("numas")
1236 for flavor
in self
.nova
.flavors
.list():
1237 epa
= flavor
.get_keys()
1248 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1250 if flavor_data
== flavor_target
:
1254 and flavor_target
< flavor_data
< flavor_candidate_data
1256 flavor_candidate_id
= flavor
.id
1257 flavor_candidate_data
= flavor_data
1259 if not exact_match
and flavor_candidate_id
:
1260 return flavor_candidate_id
1262 raise vimconn
.VimConnNotFoundException(
1263 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1266 nvExceptions
.NotFound
,
1267 nvExceptions
.ClientException
,
1268 ksExceptions
.ClientException
,
1271 self
._format
_exception
(e
)
1274 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1275 """Process resource quota and fill up extra_specs.
1277 quota (dict): Keeping the quota of resurces
1279 extra_specs (dict) Dict to be filled to be used during flavor creation
1282 if "limit" in quota
:
1283 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1285 if "reserve" in quota
:
1286 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1288 if "shares" in quota
:
1289 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1290 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1293 def process_numa_memory(
1294 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1296 """Set the memory in extra_specs.
1298 numa (dict): A dictionary which includes numa information
1299 node_id (int): ID of numa node
1300 extra_specs (dict): To be filled.
1303 if not numa
.get("memory"):
1305 memory_mb
= numa
["memory"] * 1024
1306 memory
= "hw:numa_mem.{}".format(node_id
)
1307 extra_specs
[memory
] = int(memory_mb
)
1310 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1311 """Set the cpu in extra_specs.
1313 numa (dict): A dictionary which includes numa information
1314 node_id (int): ID of numa node
1315 extra_specs (dict): To be filled.
1318 if not numa
.get("vcpu"):
1321 cpu
= "hw:numa_cpus.{}".format(node_id
)
1322 vcpu
= ",".join(map(str, vcpu
))
1323 extra_specs
[cpu
] = vcpu
1326 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1327 """Fill up extra_specs if numa has paired-threads.
1329 numa (dict): A dictionary which includes numa information
1330 extra_specs (dict): To be filled.
1333 threads (int) Number of virtual cpus
1336 if not numa
.get("paired-threads"):
1339 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1340 threads
= numa
["paired-threads"] * 2
1341 extra_specs
["hw:cpu_thread_policy"] = "require"
1342 extra_specs
["hw:cpu_policy"] = "dedicated"
1346 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1347 """Fill up extra_specs if numa has cores.
1349 numa (dict): A dictionary which includes numa information
1350 extra_specs (dict): To be filled.
1353 cores (int) Number of virtual cpus
1356 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1357 # architecture, or a non-SMT architecture will be emulated
1358 if not numa
.get("cores"):
1360 cores
= numa
["cores"]
1361 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1362 extra_specs
["hw:cpu_policy"] = "dedicated"
1366 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1367 """Fill up extra_specs if numa has threads.
1369 numa (dict): A dictionary which includes numa information
1370 extra_specs (dict): To be filled.
1373 threads (int) Number of virtual cpus
1376 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1377 if not numa
.get("threads"):
1379 threads
= numa
["threads"]
1380 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1381 extra_specs
["hw:cpu_policy"] = "dedicated"
1384 def _process_numa_parameters_of_flavor(
1385 self
, numas
: List
, extra_specs
: Dict
1387 """Process numa parameters and fill up extra_specs.
1390 numas (list): List of dictionary which includes numa information
1391 extra_specs (dict): To be filled.
1394 numa_nodes
= len(numas
)
1395 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1396 cpu_cores
, cpu_threads
= 0, 0
1398 if self
.vim_type
== "VIO":
1399 self
.process_vio_numa_nodes(numa_nodes
, extra_specs
)
1403 node_id
= numa
["id"]
1404 # overwrite ram and vcpus
1405 # check if key "memory" is present in numa else use ram value at flavor
1406 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1407 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1409 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1410 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1412 if "paired-threads" in numa
:
1413 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1414 cpu_threads
+= threads
1416 elif "cores" in numa
:
1417 cores
= self
.process_numa_cores(numa
, extra_specs
)
1420 elif "threads" in numa
:
1421 threads
= self
.process_numa_threads(numa
, extra_specs
)
1422 cpu_threads
+= threads
1425 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1427 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1430 def process_vio_numa_nodes(numa_nodes
: int, extra_specs
: Dict
) -> None:
1431 """According to number of numa nodes, updates the extra_specs for VIO.
1435 numa_nodes (int): List keeps the numa node numbers
1436 extra_specs (dict): Extra specs dict to be updated
1439 # If there are several numas, we do not define specific affinity.
1440 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1442 def _change_flavor_name(
1443 self
, name
: str, name_suffix
: int, flavor_data
: dict
1445 """Change the flavor name if the name already exists.
1448 name (str): Flavor name to be checked
1449 name_suffix (int): Suffix to be appended to name
1450 flavor_data (dict): Flavor dict
1453 name (str): New flavor name to be used
1457 fl
= self
.nova
.flavors
.list()
1458 fl_names
= [f
.name
for f
in fl
]
1460 while name
in fl_names
:
1462 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1466 def _process_extended_config_of_flavor(
1467 self
, extended
: dict, extra_specs
: dict
1469 """Process the extended dict to fill up extra_specs.
1472 extended (dict): Keeping the extra specification of flavor
1473 extra_specs (dict) Dict to be filled to be used during flavor creation
1478 "mem-quota": "memory",
1480 "disk-io-quota": "disk_io",
1488 "PREFER_LARGE": "any",
1492 "cpu-pinning-policy": "hw:cpu_policy",
1493 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1494 "mem-policy": "hw:numa_mempolicy",
1497 numas
= extended
.get("numas")
1499 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1501 for quota
, item
in quotas
.items():
1502 if quota
in extended
.keys():
1503 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1505 # Set the mempage size as specified in the descriptor
1506 if extended
.get("mempage-size"):
1507 if extended
["mempage-size"] in page_sizes
.keys():
1508 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1510 # Normally, validations in NBI should not allow to this condition.
1512 "Invalid mempage-size %s. Will be ignored",
1513 extended
.get("mempage-size"),
1516 for policy
, hw_policy
in policies
.items():
1517 if extended
.get(policy
):
1518 extra_specs
[hw_policy
] = extended
[policy
].lower()
1521 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1522 """Returns the details of flavor
1524 flavor_data (dict): Dictionary that includes required flavor details
1527 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1531 flavor_data
.get("ram", 64),
1532 flavor_data
.get("vcpus", 1),
1534 flavor_data
.get("extended"),
1537 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1538 """Adds a tenant flavor to openstack VIM.
1539 if change_name_if_used is True, it will change name in case of conflict,
1540 because it is not supported name repetition.
1543 flavor_data (dict): Flavor details to be processed
1544 change_name_if_used (bool): Change name in case of conflict
1547 flavor_id (str): flavor identifier
1550 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1556 name
= flavor_data
["name"]
1557 while retry
< max_retries
:
1560 self
._reload
_connection
()
1562 if change_name_if_used
:
1563 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1565 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1569 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1573 new_flavor
= self
.nova
.flavors
.create(
1577 disk
=flavor_data
.get("disk", 0),
1578 ephemeral
=flavor_data
.get("ephemeral", 0),
1579 swap
=flavor_data
.get("swap", 0),
1580 is_public
=flavor_data
.get("is_public", True),
1585 new_flavor
.set_keys(extra_specs
)
1587 return new_flavor
.id
1589 except nvExceptions
.Conflict
as e
:
1590 if change_name_if_used
and retry
< max_retries
:
1593 self
._format
_exception
(e
)
1596 ksExceptions
.ClientException
,
1597 nvExceptions
.ClientException
,
1601 self
._format
_exception
(e
)
1603 def delete_flavor(self
, flavor_id
):
1604 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1606 self
._reload
_connection
()
1607 self
.nova
.flavors
.delete(flavor_id
)
1610 # except nvExceptions.BadRequest as e:
1612 nvExceptions
.NotFound
,
1613 ksExceptions
.ClientException
,
1614 nvExceptions
.ClientException
,
1617 self
._format
_exception
(e
)
1619 def new_image(self
, image_dict
):
1621 Adds a tenant image to VIM. imge_dict is a dictionary with:
1623 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1624 location: path or URI
1625 public: "yes" or "no"
1626 metadata: metadata of the image
1627 Returns the image_id
1632 while retry
< max_retries
:
1635 self
._reload
_connection
()
1637 # determine format http://docs.openstack.org/developer/glance/formats.html
1638 if "disk_format" in image_dict
:
1639 disk_format
= image_dict
["disk_format"]
1640 else: # autodiscover based on extension
1641 if image_dict
["location"].endswith(".qcow2"):
1642 disk_format
= "qcow2"
1643 elif image_dict
["location"].endswith(".vhd"):
1645 elif image_dict
["location"].endswith(".vmdk"):
1646 disk_format
= "vmdk"
1647 elif image_dict
["location"].endswith(".vdi"):
1649 elif image_dict
["location"].endswith(".iso"):
1651 elif image_dict
["location"].endswith(".aki"):
1653 elif image_dict
["location"].endswith(".ari"):
1655 elif image_dict
["location"].endswith(".ami"):
1661 "new_image: '%s' loading from '%s'",
1663 image_dict
["location"],
1665 if self
.vim_type
== "VIO":
1666 container_format
= "bare"
1667 if "container_format" in image_dict
:
1668 container_format
= image_dict
["container_format"]
1670 new_image
= self
.glance
.images
.create(
1671 name
=image_dict
["name"],
1672 container_format
=container_format
,
1673 disk_format
=disk_format
,
1676 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1678 if image_dict
["location"].startswith("http"):
1679 # TODO there is not a method to direct download. It must be downloaded locally with requests
1680 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1682 with
open(image_dict
["location"]) as fimage
:
1683 self
.glance
.images
.upload(new_image
.id, fimage
)
1684 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1685 # image_dict.get("public","yes")=="yes",
1686 # container_format="bare", data=fimage, disk_format=disk_format)
1688 metadata_to_load
= image_dict
.get("metadata")
1690 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1692 if self
.vim_type
== "VIO":
1693 metadata_to_load
["upload_location"] = image_dict
["location"]
1695 metadata_to_load
["location"] = image_dict
["location"]
1697 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1701 nvExceptions
.Conflict
,
1702 ksExceptions
.ClientException
,
1703 nvExceptions
.ClientException
,
1705 self
._format
_exception
(e
)
1708 gl1Exceptions
.HTTPException
,
1709 gl1Exceptions
.CommunicationError
,
1712 if retry
== max_retries
:
1715 self
._format
_exception
(e
)
1716 except IOError as e
: # can not open the file
1717 raise vimconn
.VimConnConnectionException(
1718 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1719 http_code
=vimconn
.HTTP_Bad_Request
,
1722 def delete_image(self
, image_id
):
1723 """Deletes a tenant image from openstack VIM. Returns the old id"""
1725 self
._reload
_connection
()
1726 self
.glance
.images
.delete(image_id
)
1730 nvExceptions
.NotFound
,
1731 ksExceptions
.ClientException
,
1732 nvExceptions
.ClientException
,
1733 gl1Exceptions
.CommunicationError
,
1734 gl1Exceptions
.HTTPNotFound
,
1736 ) as e
: # TODO remove
1737 self
._format
_exception
(e
)
1739 def get_image_id_from_path(self
, path
):
1740 """Get the image id from image path in the VIM database. Returns the image_id"""
1742 self
._reload
_connection
()
1743 images
= self
.glance
.images
.list()
1745 for image
in images
:
1746 if image
.metadata
.get("location") == path
:
1749 raise vimconn
.VimConnNotFoundException(
1750 "image with location '{}' not found".format(path
)
1753 ksExceptions
.ClientException
,
1754 nvExceptions
.ClientException
,
1755 gl1Exceptions
.CommunicationError
,
1758 self
._format
_exception
(e
)
1760 def get_image_list(self
, filter_dict
={}):
1761 """Obtain tenant images from VIM
1765 checksum: image checksum
1766 Returns the image list of dictionaries:
1767 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1770 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1773 self
._reload
_connection
()
1774 # filter_dict_os = filter_dict.copy()
1775 # First we filter by the available filter fields: name, id. The others are removed.
1776 image_list
= self
.glance
.images
.list()
1779 for image
in image_list
:
1781 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1784 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1788 filter_dict
.get("checksum")
1789 and image
["checksum"] != filter_dict
["checksum"]
1793 filtered_list
.append(image
.copy())
1794 except gl1Exceptions
.HTTPNotFound
:
1797 return filtered_list
1799 ksExceptions
.ClientException
,
1800 nvExceptions
.ClientException
,
1801 gl1Exceptions
.CommunicationError
,
1804 self
._format
_exception
(e
)
1806 def __wait_for_vm(self
, vm_id
, status
):
1807 """wait until vm is in the desired status and return True.
1808 If the VM gets in ERROR status, return false.
1809 If the timeout is reached generate an exception"""
1811 while elapsed_time
< server_timeout
:
1812 vm_status
= self
.nova
.servers
.get(vm_id
).status
1814 if vm_status
== status
:
1817 if vm_status
== "ERROR":
1823 # if we exceeded the timeout rollback
1824 if elapsed_time
>= server_timeout
:
1825 raise vimconn
.VimConnException(
1826 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1827 http_code
=vimconn
.HTTP_Request_Timeout
,
1830 def _get_openstack_availablity_zones(self
):
1832 Get from openstack availability zones available
1836 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1837 openstack_availability_zone
= [
1839 for zone
in openstack_availability_zone
1840 if zone
.zoneName
!= "internal"
1843 return openstack_availability_zone
1847 def _set_availablity_zones(self
):
1849 Set vim availablity zone
1852 if "availability_zone" in self
.config
:
1853 vim_availability_zones
= self
.config
.get("availability_zone")
1855 if isinstance(vim_availability_zones
, str):
1856 self
.availability_zone
= [vim_availability_zones
]
1857 elif isinstance(vim_availability_zones
, list):
1858 self
.availability_zone
= vim_availability_zones
1860 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1862 def _get_vm_availability_zone(
1863 self
, availability_zone_index
, availability_zone_list
1866 Return thge availability zone to be used by the created VM.
1867 :return: The VIM availability zone to be used or None
1869 if availability_zone_index
is None:
1870 if not self
.config
.get("availability_zone"):
1872 elif isinstance(self
.config
.get("availability_zone"), str):
1873 return self
.config
["availability_zone"]
1875 # TODO consider using a different parameter at config for default AV and AV list match
1876 return self
.config
["availability_zone"][0]
1878 vim_availability_zones
= self
.availability_zone
1879 # check if VIM offer enough availability zones describe in the VNFD
1880 if vim_availability_zones
and len(availability_zone_list
) <= len(
1881 vim_availability_zones
1883 # check if all the names of NFV AV match VIM AV names
1884 match_by_index
= False
1885 for av
in availability_zone_list
:
1886 if av
not in vim_availability_zones
:
1887 match_by_index
= True
1891 return vim_availability_zones
[availability_zone_index
]
1893 return availability_zone_list
[availability_zone_index
]
1895 raise vimconn
.VimConnConflictException(
1896 "No enough availability zones at VIM for this deployment"
1899 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1900 """Fill up the security_groups in the port_dict.
1903 net (dict): Network details
1904 port_dict (dict): Port details
1908 self
.config
.get("security_groups")
1909 and net
.get("port_security") is not False
1910 and not self
.config
.get("no_port_security_extension")
1912 if not self
.security_groups_id
:
1913 self
._get
_ids
_from
_name
()
1915 port_dict
["security_groups"] = self
.security_groups_id
1917 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1918 """Fill up the network binding depending on network type in the port_dict.
1921 net (dict): Network details
1922 port_dict (dict): Port details
1925 if not net
.get("type"):
1926 raise vimconn
.VimConnException("Type is missing in the network details.")
1928 if net
["type"] == "virtual":
1932 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1933 port_dict
["binding:vnic_type"] = "direct"
1935 # VIO specific Changes
1936 if self
.vim_type
== "VIO":
1937 # Need to create port with port_security_enabled = False and no-security-groups
1938 port_dict
["port_security_enabled"] = False
1939 port_dict
["provider_security_groups"] = []
1940 port_dict
["security_groups"] = []
1943 # For PT PCI-PASSTHROUGH
1944 port_dict
["binding:vnic_type"] = "direct-physical"
1947 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1948 """Set the "ip" parameter in net dictionary.
1951 new_port (dict): New created port
1952 net (dict): Network details
1955 fixed_ips
= new_port
["port"].get("fixed_ips")
1958 net
["ip"] = fixed_ips
[0].get("ip_address")
1963 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1964 """Fill up the mac_address and fixed_ips in port_dict.
1967 net (dict): Network details
1968 port_dict (dict): Port details
1971 if net
.get("mac_address"):
1972 port_dict
["mac_address"] = net
["mac_address"]
1975 if ip_list
:= net
.get("ip_address"):
1976 if not isinstance(ip_list
, list):
1979 ip_dict
= {"ip_address": ip
}
1980 ip_dual_list
.append(ip_dict
)
1981 port_dict
["fixed_ips"] = ip_dual_list
1982 # TODO add "subnet_id": <subnet_id>
1984 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
1985 """Create new port using neutron.
1988 port_dict (dict): Port details
1989 created_items (dict): All created items
1990 net (dict): Network details
1993 new_port (dict): New created port
1996 new_port
= self
.neutron
.create_port({"port": port_dict
})
1997 created_items
["port:" + str(new_port
["port"]["id"])] = True
1998 net
["mac_address"] = new_port
["port"]["mac_address"]
1999 net
["vim_id"] = new_port
["port"]["id"]
2004 self
, net
: dict, name
: str, created_items
: dict
2005 ) -> Tuple
[dict, dict]:
2006 """Create port using net details.
2009 net (dict): Network details
2010 name (str): Name to be used as network name if net dict does not include name
2011 created_items (dict): All created items
2014 new_port, port New created port, port dictionary
2019 "network_id": net
["net_id"],
2020 "name": net
.get("name"),
2021 "admin_state_up": True,
2024 if not port_dict
["name"]:
2025 port_dict
["name"] = name
2027 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
2029 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
2031 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
2033 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
2035 vimconnector
._set
_fixed
_ip
(new_port
, net
)
2037 port
= {"port-id": new_port
["port"]["id"]}
2039 if float(self
.nova
.api_version
.get_string()) >= 2.32:
2040 port
["tag"] = new_port
["port"]["name"]
2042 return new_port
, port
2044 def _prepare_network_for_vminstance(
2048 created_items
: dict,
2050 external_network
: list,
2051 no_secured_ports
: list,
2053 """Create port and fill up net dictionary for new VM instance creation.
2056 name (str): Name of network
2057 net_list (list): List of networks
2058 created_items (dict): All created items belongs to a VM
2059 net_list_vim (list): List of ports
2060 external_network (list): List of external-networks
2061 no_secured_ports (list): Port security disabled ports
2064 self
._reload
_connection
()
2066 for net
in net_list
:
2067 # Skip non-connected iface
2068 if not net
.get("net_id"):
2071 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2073 net_list_vim
.append(port
)
2075 if net
.get("floating_ip", False):
2076 net
["exit_on_floating_ip_error"] = True
2077 external_network
.append(net
)
2079 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2080 net
["exit_on_floating_ip_error"] = False
2081 external_network
.append(net
)
2082 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2084 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2085 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2086 if net
.get("port_security") is False and not self
.config
.get(
2087 "no_port_security_extension"
2089 no_secured_ports
.append(
2091 new_port
["port"]["id"],
2092 net
.get("port_security_disable_strategy"),
2096 def _prepare_persistent_root_volumes(
2101 base_disk_index
: int,
2102 block_device_mapping
: dict,
2103 existing_vim_volumes
: list,
2104 created_items
: dict,
2106 """Prepare persistent root volumes for new VM instance.
2109 name (str): Name of VM instance
2110 vm_av_zone (list): List of availability zones
2111 disk (dict): Disk details
2112 base_disk_index (int): Disk index
2113 block_device_mapping (dict): Block device details
2114 existing_vim_volumes (list): Existing disk details
2115 created_items (dict): All created items belongs to VM
2118 boot_volume_id (str): ID of boot volume
2121 # Disk may include only vim_volume_id or only vim_id."
2122 # Use existing persistent root volume finding with volume_id or vim_id
2123 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2125 if disk
.get(key_id
):
2126 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2127 existing_vim_volumes
.append({"id": disk
[key_id
]})
2130 # Create persistent root volume
2131 volume
= self
.cinder
.volumes
.create(
2133 name
=name
+ "vd" + chr(base_disk_index
),
2134 imageRef
=disk
["image_id"],
2135 # Make sure volume is in the same AZ as the VM to be attached to
2136 availability_zone
=vm_av_zone
,
2138 boot_volume_id
= volume
.id
2139 self
.update_block_device_mapping(
2141 block_device_mapping
=block_device_mapping
,
2142 base_disk_index
=base_disk_index
,
2144 created_items
=created_items
,
2147 return boot_volume_id
2150 def update_block_device_mapping(
2152 block_device_mapping
: dict,
2153 base_disk_index
: int,
2155 created_items
: dict,
2157 """Add volume information to block device mapping dict.
2159 volume (object): Created volume object
2160 block_device_mapping (dict): Block device details
2161 base_disk_index (int): Disk index
2162 disk (dict): Disk details
2163 created_items (dict): All created items belongs to VM
2166 raise vimconn
.VimConnException("Volume is empty.")
2168 if not hasattr(volume
, "id"):
2169 raise vimconn
.VimConnException(
2170 "Created volume is not valid, does not have id attribute."
2173 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2174 if disk
.get("multiattach"): # multiattach volumes do not belong to VDUs
2176 volume_txt
= "volume:" + str(volume
.id)
2177 if disk
.get("keep"):
2178 volume_txt
+= ":keep"
2179 created_items
[volume_txt
] = True
2181 def new_shared_volumes(self
, shared_volume_data
) -> (str, str):
2183 volume
= self
.cinder
.volumes
.create(
2184 size
=shared_volume_data
["size"],
2185 name
=shared_volume_data
["name"],
2186 volume_type
="multiattach",
2188 return (volume
.name
, volume
.id)
2189 except (ConnectionError
, KeyError) as e
:
2190 self
._format
_exception
(e
)
2192 def _prepare_shared_volumes(
2196 base_disk_index
: int,
2197 block_device_mapping
: dict,
2198 existing_vim_volumes
: list,
2199 created_items
: dict,
2201 volumes
= {volume
.name
: volume
.id for volume
in self
.cinder
.volumes
.list()}
2202 if volumes
.get(disk
["name"]):
2203 sv_id
= volumes
[disk
["name"]]
2206 # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
2209 volume
= self
.cinder
.volumes
.get(sv_id
)
2210 vol_status
= volume
.status
2211 if volume
.status
not in ("in-use", "available"):
2214 self
.update_block_device_mapping(
2216 block_device_mapping
=block_device_mapping
,
2217 base_disk_index
=base_disk_index
,
2219 created_items
=created_items
,
2222 raise vimconn
.VimConnException(
2223 "Shared volume is not prepared, status is: {}".format(vol_status
),
2224 http_code
=vimconn
.HTTP_Internal_Server_Error
,
2227 def _prepare_non_root_persistent_volumes(
2232 block_device_mapping
: dict,
2233 base_disk_index
: int,
2234 existing_vim_volumes
: list,
2235 created_items
: dict,
2237 """Prepare persistent volumes for new VM instance.
2240 name (str): Name of VM instance
2241 disk (dict): Disk details
2242 vm_av_zone (list): List of availability zones
2243 block_device_mapping (dict): Block device details
2244 base_disk_index (int): Disk index
2245 existing_vim_volumes (list): Existing disk details
2246 created_items (dict): All created items belongs to VM
2248 # Non-root persistent volumes
2249 # Disk may include only vim_volume_id or only vim_id."
2250 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2251 if disk
.get(key_id
):
2252 # Use existing persistent volume
2253 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2254 existing_vim_volumes
.append({"id": disk
[key_id
]})
2256 volume_name
= f
"{name}vd{chr(base_disk_index)}"
2257 volume
= self
.cinder
.volumes
.create(
2260 # Make sure volume is in the same AZ as the VM to be attached to
2261 availability_zone
=vm_av_zone
,
2263 self
.update_block_device_mapping(
2265 block_device_mapping
=block_device_mapping
,
2266 base_disk_index
=base_disk_index
,
2268 created_items
=created_items
,
2271 def _wait_for_created_volumes_availability(
2272 self
, elapsed_time
: int, created_items
: dict
2274 """Wait till created volumes become available.
2277 elapsed_time (int): Passed time while waiting
2278 created_items (dict): All created items belongs to VM
2281 elapsed_time (int): Time spent while waiting
2284 while elapsed_time
< volume_timeout
:
2285 for created_item
in created_items
:
2287 created_item
.split(":")[0],
2288 created_item
.split(":")[1],
2291 volume
= self
.cinder
.volumes
.get(volume_id
)
2293 volume
.volume_type
== "multiattach"
2294 and volume
.status
== "in-use"
2297 elif volume
.status
!= "available":
2300 # All ready: break from while
2308 def _wait_for_existing_volumes_availability(
2309 self
, elapsed_time
: int, existing_vim_volumes
: list
2311 """Wait till existing volumes become available.
2314 elapsed_time (int): Passed time while waiting
2315 existing_vim_volumes (list): Existing volume details
2318 elapsed_time (int): Time spent while waiting
2322 while elapsed_time
< volume_timeout
:
2323 for volume
in existing_vim_volumes
:
2324 v
= self
.cinder
.volumes
.get(volume
["id"])
2325 if v
.volume_type
== "multiattach" and v
.status
== "in-use":
2327 elif v
.status
!= "available":
2329 else: # all ready: break from while
2337 def _prepare_disk_for_vminstance(
2340 existing_vim_volumes
: list,
2341 created_items
: dict,
2343 block_device_mapping
: dict,
2344 disk_list
: list = None,
2346 """Prepare all volumes for new VM instance.
2349 name (str): Name of Instance
2350 existing_vim_volumes (list): List of existing volumes
2351 created_items (dict): All created items belongs to VM
2352 vm_av_zone (list): VM availability zone
2353 block_device_mapping (dict): Block devices to be attached to VM
2354 disk_list (list): List of disks
2357 # Create additional volumes in case these are present in disk_list
2358 base_disk_index
= ord("b")
2359 boot_volume_id
= None
2361 for disk
in disk_list
:
2362 if "image_id" in disk
:
2363 # Root persistent volume
2364 base_disk_index
= ord("a")
2365 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2367 vm_av_zone
=vm_av_zone
,
2369 base_disk_index
=base_disk_index
,
2370 block_device_mapping
=block_device_mapping
,
2371 existing_vim_volumes
=existing_vim_volumes
,
2372 created_items
=created_items
,
2374 elif disk
.get("multiattach"):
2375 self
._prepare
_shared
_volumes
(
2378 base_disk_index
=base_disk_index
,
2379 block_device_mapping
=block_device_mapping
,
2380 existing_vim_volumes
=existing_vim_volumes
,
2381 created_items
=created_items
,
2384 # Non-root persistent volume
2385 self
._prepare
_non
_root
_persistent
_volumes
(
2388 vm_av_zone
=vm_av_zone
,
2389 block_device_mapping
=block_device_mapping
,
2390 base_disk_index
=base_disk_index
,
2391 existing_vim_volumes
=existing_vim_volumes
,
2392 created_items
=created_items
,
2394 base_disk_index
+= 1
2396 # Wait until created volumes are with status available
2397 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2398 elapsed_time
, created_items
2400 # Wait until existing volumes in vim are with status available
2401 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2402 elapsed_time
, existing_vim_volumes
2404 # If we exceeded the timeout rollback
2405 if elapsed_time
>= volume_timeout
:
2406 raise vimconn
.VimConnException(
2407 "Timeout creating volumes for instance " + name
,
2408 http_code
=vimconn
.HTTP_Request_Timeout
,
2411 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2413 def _find_the_external_network_for_floating_ip(self
):
2414 """Get the external network ip in order to create floating IP.
2417 pool_id (str): External network pool ID
2421 # Find the external network
2422 external_nets
= list()
2424 for net
in self
.neutron
.list_networks()["networks"]:
2425 if net
["router:external"]:
2426 external_nets
.append(net
)
2428 if len(external_nets
) == 0:
2429 raise vimconn
.VimConnException(
2430 "Cannot create floating_ip automatically since "
2431 "no external network is present",
2432 http_code
=vimconn
.HTTP_Conflict
,
2435 if len(external_nets
) > 1:
2436 raise vimconn
.VimConnException(
2437 "Cannot create floating_ip automatically since "
2438 "multiple external networks are present",
2439 http_code
=vimconn
.HTTP_Conflict
,
2443 return external_nets
[0].get("id")
2445 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2446 """Trigger neutron to create a new floating IP using external network ID.
2449 param (dict): Input parameters to create a floating IP
2450 created_items (dict): All created items belongs to new VM instance
2457 self
.logger
.debug("Creating floating IP")
2458 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2459 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2460 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2462 except Exception as e
:
2463 raise vimconn
.VimConnException(
2464 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2465 http_code
=vimconn
.HTTP_Conflict
,
2468 def _create_floating_ip(
2469 self
, floating_network
: dict, server
: object, created_items
: dict
2471 """Get the available Pool ID and create a new floating IP.
2474 floating_network (dict): Dict including external network ID
2475 server (object): Server object
2476 created_items (dict): All created items belongs to new VM instance
2480 # Pool_id is available
2482 isinstance(floating_network
["floating_ip"], str)
2483 and floating_network
["floating_ip"].lower() != "true"
2485 pool_id
= floating_network
["floating_ip"]
2489 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2493 "floating_network_id": pool_id
,
2494 "tenant_id": server
.tenant_id
,
2498 self
._neutron
_create
_float
_ip
(param
, created_items
)
2500 def _find_floating_ip(
2504 floating_network
: dict,
2506 """Find the available free floating IPs if there are.
2509 server (object): Server object
2510 floating_ips (list): List of floating IPs
2511 floating_network (dict): Details of floating network such as ID
2514 free_floating_ip (str): Free floating ip address
2517 for fip
in floating_ips
:
2518 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2521 if isinstance(floating_network
["floating_ip"], str):
2522 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2527 def _assign_floating_ip(
2528 self
, free_floating_ip
: str, floating_network
: dict
2530 """Assign the free floating ip address to port.
2533 free_floating_ip (str): Floating IP to be assigned
2534 floating_network (dict): ID of floating network
2537 fip (dict) (dict): Floating ip details
2540 # The vim_id key contains the neutron.port_id
2541 self
.neutron
.update_floatingip(
2543 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2545 # For race condition ensure not re-assigned to other VM after 5 seconds
2548 return self
.neutron
.show_floatingip(free_floating_ip
)
2550 def _get_free_floating_ip(
2551 self
, server
: object, floating_network
: dict
2553 """Get the free floating IP address.
2556 server (object): Server Object
2557 floating_network (dict): Floating network details
2560 free_floating_ip (str): Free floating ip addr
2564 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2567 random
.shuffle(floating_ips
)
2569 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2571 def _prepare_external_network_for_vminstance(
2573 external_network
: list,
2575 created_items
: dict,
2576 vm_start_time
: float,
2578 """Assign floating IP address for VM instance.
2581 external_network (list): ID of External network
2582 server (object): Server Object
2583 created_items (dict): All created items belongs to new VM instance
2584 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2590 for floating_network
in external_network
:
2593 floating_ip_retries
= 3
2594 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2597 free_floating_ip
= self
._get
_free
_floating
_ip
(
2598 server
, floating_network
2601 if not free_floating_ip
:
2602 self
._create
_floating
_ip
(
2603 floating_network
, server
, created_items
2607 # For race condition ensure not already assigned
2608 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2610 if fip
["floatingip"].get("port_id"):
2613 # Assign floating ip
2614 fip
= self
._assign
_floating
_ip
(
2615 free_floating_ip
, floating_network
2618 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2619 self
.logger
.warning(
2620 "floating_ip {} re-assigned to other port".format(
2627 "Assigned floating_ip {} to VM {}".format(
2628 free_floating_ip
, server
.id
2634 except Exception as e
:
2635 # Openstack need some time after VM creation to assign an IP. So retry if fails
2636 vm_status
= self
.nova
.servers
.get(server
.id).status
2638 if vm_status
not in ("ACTIVE", "ERROR"):
2639 if time
.time() - vm_start_time
< server_timeout
:
2642 elif floating_ip_retries
> 0:
2643 floating_ip_retries
-= 1
2646 raise vimconn
.VimConnException(
2647 "Cannot create floating_ip: {} {}".format(
2650 http_code
=vimconn
.HTTP_Conflict
,
2653 except Exception as e
:
2654 if not floating_network
["exit_on_floating_ip_error"]:
2655 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2660 def _update_port_security_for_vminstance(
2662 no_secured_ports
: list,
2665 """Updates the port security according to no_secured_ports list.
2668 no_secured_ports (list): List of ports that security will be disabled
2669 server (object): Server Object
2675 # Wait until the VM is active and then disable the port-security
2676 if no_secured_ports
:
2677 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2679 for port
in no_secured_ports
:
2681 "port": {"port_security_enabled": False, "security_groups": None}
2684 if port
[1] == "allow-address-pairs":
2686 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2690 self
.neutron
.update_port(port
[0], port_update
)
2693 raise vimconn
.VimConnException(
2694 "It was not possible to disable port security for port {}".format(
2706 affinity_group_list
: list,
2710 availability_zone_index
=None,
2711 availability_zone_list
=None,
2713 """Adds a VM instance to VIM.
2716 name (str): name of VM
2717 description (str): description
2718 start (bool): indicates if VM must start or boot in pause mode. Ignored
2719 image_id (str) image uuid
2720 flavor_id (str) flavor uuid
2721 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2722 net_list (list): list of interfaces, each one is a dictionary with:
2723 name: name of network
2724 net_id: network uuid to connect
2725 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2726 model: interface model, ignored #TODO
2727 mac_address: used for SR-IOV ifaces #TODO for other types
2728 use: 'data', 'bridge', 'mgmt'
2729 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2730 vim_id: filled/added by this function
2731 floating_ip: True/False (or it can be None)
2732 port_security: True/False
2733 cloud_config (dict): (optional) dictionary with:
2734 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2735 users: (optional) list of users to be inserted, each item is a dict with:
2736 name: (mandatory) user name,
2737 key-pairs: (optional) list of strings with the public key to be inserted to the user
2738 user-data: (optional) string is a text script to be passed directly to cloud-init
2739 config-files: (optional). List of files to be transferred. Each item is a dict with:
2740 dest: (mandatory) string with the destination absolute path
2741 encoding: (optional, by default text). Can be one of:
2742 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2743 content : (mandatory) string with the content of the file
2744 permissions: (optional) string with file permissions, typically octal notation '0644'
2745 owner: (optional) file owner, string with the format 'owner:group'
2746 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2747 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2748 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2749 size: (mandatory) string with the size of the disk in GB
2750 vim_id: (optional) should use this existing volume id
2751 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2752 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2753 availability_zone_index is None
2754 #TODO ip, security groups
2757 A tuple with the instance identifier and created_items or raises an exception on error
2758 created_items can be None or a dictionary where this method can include key-values that will be passed to
2759 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2760 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2765 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2775 # list of external networks to be connected to instance, later on used to create floating_ip
2776 external_network
= []
2777 # List of ports with port-security disabled
2778 no_secured_ports
= []
2779 block_device_mapping
= {}
2780 existing_vim_volumes
= []
2781 server_group_id
= None
2782 scheduller_hints
= {}
2784 # Check the Openstack Connection
2785 self
._reload
_connection
()
2787 # Prepare network list
2788 self
._prepare
_network
_for
_vminstance
(
2791 created_items
=created_items
,
2792 net_list_vim
=net_list_vim
,
2793 external_network
=external_network
,
2794 no_secured_ports
=no_secured_ports
,
2798 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2800 # Get availability Zone
2801 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2802 availability_zone_index
, availability_zone_list
2807 self
._prepare
_disk
_for
_vminstance
(
2809 existing_vim_volumes
=existing_vim_volumes
,
2810 created_items
=created_items
,
2811 vm_av_zone
=vm_av_zone
,
2812 block_device_mapping
=block_device_mapping
,
2813 disk_list
=disk_list
,
2816 if affinity_group_list
:
2817 # Only first id on the list will be used. Openstack restriction
2818 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2819 scheduller_hints
["group"] = server_group_id
2822 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2823 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2824 "block_device_mapping={}, server_group={})".format(
2829 self
.config
.get("security_groups"),
2831 self
.config
.get("keypair"),
2834 block_device_mapping
,
2839 server
= self
.nova
.servers
.create(
2844 security_groups
=self
.config
.get("security_groups"),
2845 # TODO remove security_groups in future versions. Already at neutron port
2846 availability_zone
=vm_av_zone
,
2847 key_name
=self
.config
.get("keypair"),
2849 config_drive
=config_drive
,
2850 block_device_mapping
=block_device_mapping
,
2851 scheduler_hints
=scheduller_hints
,
2854 vm_start_time
= time
.time()
2856 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2858 self
._prepare
_external
_network
_for
_vminstance
(
2859 external_network
=external_network
,
2861 created_items
=created_items
,
2862 vm_start_time
=vm_start_time
,
2865 return server
.id, created_items
2867 except Exception as e
:
2870 server_id
= server
.id
2873 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2877 self
.delete_vminstance(server_id
, created_items
)
2879 except Exception as e2
:
2880 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2882 self
._format
_exception
(e
)
2885 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2886 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2889 created_items (dict): All created items belongs to VM
2892 updated_created_items (dict): Dict which does not include keep flag for volumes.
2896 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2899 def get_vminstance(self
, vm_id
):
2900 """Returns the VM instance information from VIM"""
2901 return self
._find
_nova
_server
(vm_id
)
2903 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2905 Get a console for the virtual machine
2907 vm_id: uuid of the VM
2908 console_type, can be:
2909 "novnc" (by default), "xvpvnc" for VNC types,
2910 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2911 Returns dict with the console parameters:
2912 protocol: ssh, ftp, http, https, ...
2913 server: usually ip address
2914 port: the http, ssh, ... port
2915 suffix: extra text, e.g. the http path and query string
2917 self
.logger
.debug("Getting VM CONSOLE from VIM")
2920 self
._reload
_connection
()
2921 server
= self
.nova
.servers
.find(id=vm_id
)
2923 if console_type
is None or console_type
== "novnc":
2924 console_dict
= server
.get_vnc_console("novnc")
2925 elif console_type
== "xvpvnc":
2926 console_dict
= server
.get_vnc_console(console_type
)
2927 elif console_type
== "rdp-html5":
2928 console_dict
= server
.get_rdp_console(console_type
)
2929 elif console_type
== "spice-html5":
2930 console_dict
= server
.get_spice_console(console_type
)
2932 raise vimconn
.VimConnException(
2933 "console type '{}' not allowed".format(console_type
),
2934 http_code
=vimconn
.HTTP_Bad_Request
,
2937 console_dict1
= console_dict
.get("console")
2940 console_url
= console_dict1
.get("url")
2944 protocol_index
= console_url
.find("//")
2946 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2949 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2954 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2956 -vimconn
.HTTP_Internal_Server_Error
,
2957 "Unexpected response from VIM",
2961 "protocol": console_url
[0:protocol_index
],
2962 "server": console_url
[protocol_index
+ 2 : port_index
],
2963 "port": console_url
[port_index
:suffix_index
],
2964 "suffix": console_url
[suffix_index
+ 1 :],
2969 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2971 nvExceptions
.NotFound
,
2972 ksExceptions
.ClientException
,
2973 nvExceptions
.ClientException
,
2974 nvExceptions
.BadRequest
,
2977 self
._format
_exception
(e
)
2979 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2980 """Neutron delete ports by id.
2982 k_id (str): Port id in the VIM
2985 port_dict
= self
.neutron
.list_ports()
2986 existing_ports
= [port
["id"] for port
in port_dict
["ports"] if port_dict
]
2988 if k_id
in existing_ports
:
2989 self
.neutron
.delete_port(k_id
)
2991 except Exception as e
:
2992 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2994 def delete_shared_volumes(self
, shared_volume_vim_id
: str) -> bool:
2995 """Cinder delete volume by id.
2997 shared_volume_vim_id (str): ID of shared volume in VIM
3001 while elapsed_time
< server_timeout
:
3002 vol_status
= self
.cinder
.volumes
.get(shared_volume_vim_id
).status
3003 if vol_status
== "available":
3004 self
.cinder
.volumes
.delete(shared_volume_vim_id
)
3010 if elapsed_time
>= server_timeout
:
3011 raise vimconn
.VimConnException(
3012 "Timeout waiting for volume "
3013 + shared_volume_vim_id
3014 + " to be available",
3015 http_code
=vimconn
.HTTP_Request_Timeout
,
3018 except Exception as e
:
3020 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3022 self
._format
_exception
(e
)
3024 def _delete_volumes_by_id_wth_cinder(
3025 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
3027 """Cinder delete volume by id.
3029 k (str): Full item name in created_items
3030 k_id (str): ID of floating ip in VIM
3031 volumes_to_hold (list): Volumes not to delete
3032 created_items (dict): All created items belongs to VM
3035 if k_id
in volumes_to_hold
:
3038 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
3042 self
.cinder
.volumes
.delete(k_id
)
3043 created_items
[k
] = None
3045 except Exception as e
:
3047 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3050 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
3051 """Neutron delete floating ip by id.
3053 k (str): Full item name in created_items
3054 k_id (str): ID of floating ip in VIM
3055 created_items (dict): All created items belongs to VM
3058 self
.neutron
.delete_floatingip(k_id
)
3059 created_items
[k
] = None
3061 except Exception as e
:
3063 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3067 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
3068 k_item
, _
, k_id
= k
.partition(":")
3071 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
3072 """Delete VM ports attached to the networks before deleting virtual machine.
3074 created_items (dict): All created items belongs to VM
3077 for k
, v
in created_items
.items():
3078 if not v
: # skip already deleted
3082 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3083 if k_item
== "port":
3084 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
3086 except Exception as e
:
3088 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3091 def _delete_created_items(
3092 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
3094 """Delete Volumes and floating ip if they exist in created_items."""
3095 for k
, v
in created_items
.items():
3096 if not v
: # skip already deleted
3100 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3101 if k_item
== "volume":
3102 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
3103 k
, k_id
, volumes_to_hold
, created_items
3109 elif k_item
== "floating_ip":
3110 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
3112 except Exception as e
:
3113 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3118 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
3119 """Remove the volumes which has key flag from created_items
3122 created_items (dict): All created items belongs to VM
3125 created_items (dict): Persistent volumes eliminated created_items
3129 for (key
, value
) in created_items
.items()
3130 if len(key
.split(":")) == 2
3133 def delete_vminstance(
3134 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
3136 """Removes a VM instance from VIM. Returns the old identifier.
3138 vm_id (str): Identifier of VM instance
3139 created_items (dict): All created items belongs to VM
3140 volumes_to_hold (list): Volumes_to_hold
3142 if created_items
is None:
3144 if volumes_to_hold
is None:
3145 volumes_to_hold
= []
3148 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
3152 self
._reload
_connection
()
3154 # Delete VM ports attached to the networks before the virtual machine
3156 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
3159 self
.nova
.servers
.delete(vm_id
)
3161 # Although having detached, volumes should have in active status before deleting.
3162 # We ensure in this loop
3166 while keep_waiting
and elapsed_time
< volume_timeout
:
3167 keep_waiting
= False
3169 # Delete volumes and floating IP.
3170 keep_waiting
= self
._delete
_created
_items
(
3171 created_items
, volumes_to_hold
, keep_waiting
3179 nvExceptions
.NotFound
,
3180 ksExceptions
.ClientException
,
3181 nvExceptions
.ClientException
,
3184 self
._format
_exception
(e
)
3186 def refresh_vms_status(self
, vm_list
):
3187 """Get the status of the virtual machines and their interfaces/ports
3188 Params: the list of VM identifiers
3189 Returns a dictionary with:
3190 vm_id: #VIM id of this Virtual Machine
3191 status: #Mandatory. Text with one of:
3192 # DELETED (not found at vim)
3193 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3194 # OTHER (Vim reported other status not understood)
3195 # ERROR (VIM indicates an ERROR status)
3196 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3197 # CREATING (on building process), ERROR
3198 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3200 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3201 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3203 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3204 mac_address: #Text format XX:XX:XX:XX:XX:XX
3205 vim_net_id: #network id where this interface is connected
3206 vim_interface_id: #interface/port VIM id
3207 ip_address: #null, or text with IPv4, IPv6 address
3208 compute_node: #identification of compute node where PF,VF interface is allocated
3209 pci: #PCI address of the NIC that hosts the PF,VF
3210 vlan: #physical VLAN used for VF
3214 "refresh_vms status: Getting tenant VM instance information from VIM"
3217 for vm_id
in vm_list
:
3221 vm_vim
= self
.get_vminstance(vm_id
)
3223 if vm_vim
["status"] in vmStatus2manoFormat
:
3224 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3226 vm
["status"] = "OTHER"
3227 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3229 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3230 vm_vim
.pop("user_data", None)
3231 vm
["vim_info"] = self
.serialize(vm_vim
)
3233 vm
["interfaces"] = []
3234 if vm_vim
.get("fault"):
3235 vm
["error_msg"] = str(vm_vim
["fault"])
3239 self
._reload
_connection
()
3240 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3242 for port
in port_dict
["ports"]:
3244 interface
["vim_info"] = self
.serialize(port
)
3245 interface
["mac_address"] = port
.get("mac_address")
3246 interface
["vim_net_id"] = port
["network_id"]
3247 interface
["vim_interface_id"] = port
["id"]
3248 # check if OS-EXT-SRV-ATTR:host is there,
3249 # in case of non-admin credentials, it will be missing
3251 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3252 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3254 interface
["pci"] = None
3256 # check if binding:profile is there,
3257 # in case of non-admin credentials, it will be missing
3258 if port
.get("binding:profile"):
3259 if port
["binding:profile"].get("pci_slot"):
3260 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3262 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3263 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3264 pci
= port
["binding:profile"]["pci_slot"]
3265 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3266 interface
["pci"] = pci
3268 interface
["vlan"] = None
3270 if port
.get("binding:vif_details"):
3271 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3273 # Get vlan from network in case not present in port for those old openstacks and cases where
3274 # it is needed vlan at PT
3275 if not interface
["vlan"]:
3276 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3277 network
= self
.neutron
.show_network(port
["network_id"])
3280 network
["network"].get("provider:network_type")
3283 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3284 interface
["vlan"] = network
["network"].get(
3285 "provider:segmentation_id"
3289 # look for floating ip address
3291 floating_ip_dict
= self
.neutron
.list_floatingips(
3295 if floating_ip_dict
.get("floatingips"):
3297 floating_ip_dict
["floatingips"][0].get(
3298 "floating_ip_address"
3304 for subnet
in port
["fixed_ips"]:
3305 ips
.append(subnet
["ip_address"])
3307 interface
["ip_address"] = ";".join(ips
)
3308 vm
["interfaces"].append(interface
)
3309 except Exception as e
:
3311 "Error getting vm interface information {}: {}".format(
3316 except vimconn
.VimConnNotFoundException
as e
:
3317 self
.logger
.error("Exception getting vm status: %s", str(e
))
3318 vm
["status"] = "DELETED"
3319 vm
["error_msg"] = str(e
)
3320 except vimconn
.VimConnException
as e
:
3321 self
.logger
.error("Exception getting vm status: %s", str(e
))
3322 vm
["status"] = "VIM_ERROR"
3323 vm
["error_msg"] = str(e
)
3329 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3330 """Send and action over a VM instance from VIM
3331 Returns None or the console dict if the action was successfully sent to the VIM
3333 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3336 self
._reload
_connection
()
3337 server
= self
.nova
.servers
.find(id=vm_id
)
3339 if "start" in action_dict
:
3340 if action_dict
["start"] == "rebuild":
3343 if server
.status
== "PAUSED":
3345 elif server
.status
== "SUSPENDED":
3347 elif server
.status
== "SHUTOFF":
3351 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3353 raise vimconn
.VimConnException(
3354 "Cannot 'start' instance while it is in active state",
3355 http_code
=vimconn
.HTTP_Bad_Request
,
3358 elif "pause" in action_dict
:
3360 elif "resume" in action_dict
:
3362 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3363 self
.logger
.debug("server status %s", server
.status
)
3364 if server
.status
== "ACTIVE":
3367 self
.logger
.debug("ERROR: VM is not in Active state")
3368 raise vimconn
.VimConnException(
3369 "VM is not in active state, stop operation is not allowed",
3370 http_code
=vimconn
.HTTP_Bad_Request
,
3372 elif "forceOff" in action_dict
:
3373 server
.stop() # TODO
3374 elif "terminate" in action_dict
:
3376 elif "createImage" in action_dict
:
3377 server
.create_image()
3378 # "path":path_schema,
3379 # "description":description_schema,
3380 # "name":name_schema,
3381 # "metadata":metadata_schema,
3382 # "imageRef": id_schema,
3383 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3384 elif "rebuild" in action_dict
:
3385 server
.rebuild(server
.image
["id"])
3386 elif "reboot" in action_dict
:
3387 server
.reboot() # reboot_type="SOFT"
3388 elif "console" in action_dict
:
3389 console_type
= action_dict
["console"]
3391 if console_type
is None or console_type
== "novnc":
3392 console_dict
= server
.get_vnc_console("novnc")
3393 elif console_type
== "xvpvnc":
3394 console_dict
= server
.get_vnc_console(console_type
)
3395 elif console_type
== "rdp-html5":
3396 console_dict
= server
.get_rdp_console(console_type
)
3397 elif console_type
== "spice-html5":
3398 console_dict
= server
.get_spice_console(console_type
)
3400 raise vimconn
.VimConnException(
3401 "console type '{}' not allowed".format(console_type
),
3402 http_code
=vimconn
.HTTP_Bad_Request
,
3406 console_url
= console_dict
["console"]["url"]
3408 protocol_index
= console_url
.find("//")
3410 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3413 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3418 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3419 raise vimconn
.VimConnException(
3420 "Unexpected response from VIM " + str(console_dict
)
3424 "protocol": console_url
[0:protocol_index
],
3425 "server": console_url
[protocol_index
+ 2 : port_index
],
3426 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3427 "suffix": console_url
[suffix_index
+ 1 :],
3430 return console_dict2
3432 raise vimconn
.VimConnException(
3433 "Unexpected response from VIM " + str(console_dict
)
3438 ksExceptions
.ClientException
,
3439 nvExceptions
.ClientException
,
3440 nvExceptions
.NotFound
,
3443 self
._format
_exception
(e
)
3444 # TODO insert exception vimconn.HTTP_Unauthorized
3446 # ###### VIO Specific Changes #########
3447 def _generate_vlanID(self
):
3449 Method to get unused vlanID
3457 networks
= self
.get_network_list()
3459 for net
in networks
:
3460 if net
.get("provider:segmentation_id"):
3461 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3463 used_vlanIDs
= set(usedVlanIDs
)
3465 # find unused VLAN ID
3466 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3468 start_vlanid
, end_vlanid
= map(
3469 int, vlanID_range
.replace(" ", "").split("-")
3472 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3473 if vlanID
not in used_vlanIDs
:
3475 except Exception as exp
:
3476 raise vimconn
.VimConnException(
3477 "Exception {} occurred while generating VLAN ID.".format(exp
)
3480 raise vimconn
.VimConnConflictException(
3481 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3482 self
.config
.get("dataplane_net_vlan_range")
3486 def _generate_multisegment_vlanID(self
):
3488 Method to get unused vlanID
3496 networks
= self
.get_network_list()
3497 for net
in networks
:
3498 if net
.get("provider:network_type") == "vlan" and net
.get(
3499 "provider:segmentation_id"
3501 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3502 elif net
.get("segments"):
3503 for segment
in net
.get("segments"):
3504 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3505 "provider:segmentation_id"
3507 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3509 used_vlanIDs
= set(usedVlanIDs
)
3511 # find unused VLAN ID
3512 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3514 start_vlanid
, end_vlanid
= map(
3515 int, vlanID_range
.replace(" ", "").split("-")
3518 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3519 if vlanID
not in used_vlanIDs
:
3521 except Exception as exp
:
3522 raise vimconn
.VimConnException(
3523 "Exception {} occurred while generating VLAN ID.".format(exp
)
3526 raise vimconn
.VimConnConflictException(
3527 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3528 self
.config
.get("multisegment_vlan_range")
3532 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3534 Method to validate user given vlanID ranges
3538 for vlanID_range
in input_vlan_range
:
3539 vlan_range
= vlanID_range
.replace(" ", "")
3541 vlanID_pattern
= r
"(\d)*-(\d)*$"
3542 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3544 raise vimconn
.VimConnConflictException(
3545 "Invalid VLAN range for {}: {}.You must provide "
3546 "'{}' in format [start_ID - end_ID].".format(
3547 text_vlan_range
, vlanID_range
, text_vlan_range
3551 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3552 if start_vlanid
<= 0:
3553 raise vimconn
.VimConnConflictException(
3554 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3555 "networks valid IDs are 1 to 4094 ".format(
3556 text_vlan_range
, vlanID_range
3560 if end_vlanid
> 4094:
3561 raise vimconn
.VimConnConflictException(
3562 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3563 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3564 text_vlan_range
, vlanID_range
3568 if start_vlanid
> end_vlanid
:
3569 raise vimconn
.VimConnConflictException(
3570 "Invalid VLAN range for {}: {}. You must provide '{}'"
3571 " in format start_ID - end_ID and start_ID < end_ID ".format(
3572 text_vlan_range
, vlanID_range
, text_vlan_range
3576 def get_hosts_info(self
):
3577 """Get the information of deployed hosts
3578 Returns the hosts content"""
3580 print("osconnector: Getting Host info from VIM")
3584 self
._reload
_connection
()
3585 hypervisors
= self
.nova
.hypervisors
.list()
3587 for hype
in hypervisors
:
3588 h_list
.append(hype
.to_dict())
3590 return 1, {"hosts": h_list
}
3591 except nvExceptions
.NotFound
as e
:
3592 error_value
= -vimconn
.HTTP_Not_Found
3593 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3594 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3595 error_value
= -vimconn
.HTTP_Bad_Request
3599 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3602 # TODO insert exception vimconn.HTTP_Unauthorized
3603 # if reaching here is because an exception
3604 self
.logger
.debug("get_hosts_info " + error_text
)
3606 return error_value
, error_text
3608 def get_hosts(self
, vim_tenant
):
3609 """Get the hosts and deployed instances
3610 Returns the hosts content"""
3611 r
, hype_dict
= self
.get_hosts_info()
3616 hypervisors
= hype_dict
["hosts"]
3619 servers
= self
.nova
.servers
.list()
3620 for hype
in hypervisors
:
3621 for server
in servers
:
3623 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3624 == hype
["hypervisor_hostname"]
3627 hype
["vm"].append(server
.id)
3629 hype
["vm"] = [server
.id]
3632 except nvExceptions
.NotFound
as e
:
3633 error_value
= -vimconn
.HTTP_Not_Found
3634 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3635 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3636 error_value
= -vimconn
.HTTP_Bad_Request
3640 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3643 # TODO insert exception vimconn.HTTP_Unauthorized
3644 # if reaching here is because an exception
3645 self
.logger
.debug("get_hosts " + error_text
)
3647 return error_value
, error_text
3649 def new_affinity_group(self
, affinity_group_data
):
3650 """Adds a server group to VIM
3651 affinity_group_data contains a dictionary with information, keys:
3652 name: name in VIM for the server group
3653 type: affinity or anti-affinity
3654 scope: Only nfvi-node allowed
3655 Returns the server group identifier"""
3656 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
3659 name
= affinity_group_data
["name"]
3660 policy
= affinity_group_data
["type"]
3662 self
._reload
_connection
()
3663 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
3665 return new_server_group
.id
3667 ksExceptions
.ClientException
,
3668 nvExceptions
.ClientException
,
3672 self
._format
_exception
(e
)
3674 def get_affinity_group(self
, affinity_group_id
):
3675 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3676 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
3678 self
._reload
_connection
()
3679 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
3681 return server_group
.to_dict()
3683 nvExceptions
.NotFound
,
3684 nvExceptions
.ClientException
,
3685 ksExceptions
.ClientException
,
3688 self
._format
_exception
(e
)
3690 def delete_affinity_group(self
, affinity_group_id
):
3691 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3692 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
3694 self
._reload
_connection
()
3695 self
.nova
.server_groups
.delete(affinity_group_id
)
3697 return affinity_group_id
3699 nvExceptions
.NotFound
,
3700 ksExceptions
.ClientException
,
3701 nvExceptions
.ClientException
,
3704 self
._format
_exception
(e
)
3706 def get_vdu_state(self
, vm_id
):
3708 Getting the state of a vdu
3710 vm_id: ID of an instance
3712 self
.logger
.debug("Getting the status of VM")
3713 self
.logger
.debug("VIM VM ID %s", vm_id
)
3714 self
._reload
_connection
()
3715 server_dict
= self
._find
_nova
_server
(vm_id
)
3717 server_dict
["status"],
3718 server_dict
["flavor"]["id"],
3719 server_dict
["OS-EXT-SRV-ATTR:host"],
3720 server_dict
["OS-EXT-AZ:availability_zone"],
3722 self
.logger
.debug("vdu_data %s", vdu_data
)
3725 def check_compute_availability(self
, host
, server_flavor_details
):
3726 self
._reload
_connection
()
3727 hypervisor_search
= self
.nova
.hypervisors
.search(
3728 hypervisor_match
=host
, servers
=True
3730 for hypervisor
in hypervisor_search
:
3731 hypervisor_id
= hypervisor
.to_dict()["id"]
3732 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
3733 hypervisor_dict
= hypervisor_details
.to_dict()
3734 hypervisor_temp
= json
.dumps(hypervisor_dict
)
3735 hypervisor_json
= json
.loads(hypervisor_temp
)
3736 resources_available
= [
3737 hypervisor_json
["free_ram_mb"],
3738 hypervisor_json
["disk_available_least"],
3739 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
3741 compute_available
= all(
3742 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
3744 if compute_available
:
3747 def check_availability_zone(
3748 self
, old_az
, server_flavor_details
, old_host
, host
=None
3750 self
._reload
_connection
()
3751 az_check
= {"zone_check": False, "compute_availability": None}
3752 aggregates_list
= self
.nova
.aggregates
.list()
3753 for aggregate
in aggregates_list
:
3754 aggregate_details
= aggregate
.to_dict()
3755 aggregate_temp
= json
.dumps(aggregate_details
)
3756 aggregate_json
= json
.loads(aggregate_temp
)
3757 if aggregate_json
["availability_zone"] == old_az
:
3758 hosts_list
= aggregate_json
["hosts"]
3759 if host
is not None:
3760 if host
in hosts_list
:
3761 az_check
["zone_check"] = True
3762 available_compute_id
= self
.check_compute_availability(
3763 host
, server_flavor_details
3765 if available_compute_id
is not None:
3766 az_check
["compute_availability"] = available_compute_id
3768 for check_host
in hosts_list
:
3769 if check_host
!= old_host
:
3770 available_compute_id
= self
.check_compute_availability(
3771 check_host
, server_flavor_details
3773 if available_compute_id
is not None:
3774 az_check
["zone_check"] = True
3775 az_check
["compute_availability"] = available_compute_id
3778 az_check
["zone_check"] = True
3781 def migrate_instance(self
, vm_id
, compute_host
=None):
3785 vm_id: ID of an instance
3786 compute_host: Host to migrate the vdu to
3788 self
._reload
_connection
()
3790 instance_state
= self
.get_vdu_state(vm_id
)
3791 server_flavor_id
= instance_state
[1]
3792 server_hypervisor_name
= instance_state
[2]
3793 server_availability_zone
= instance_state
[3]
3795 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
3796 server_flavor_details
= [
3797 server_flavor
["ram"],
3798 server_flavor
["disk"],
3799 server_flavor
["vcpus"],
3801 if compute_host
== server_hypervisor_name
:
3802 raise vimconn
.VimConnException(
3803 "Unable to migrate instance '{}' to the same host '{}'".format(
3806 http_code
=vimconn
.HTTP_Bad_Request
,
3808 az_status
= self
.check_availability_zone(
3809 server_availability_zone
,
3810 server_flavor_details
,
3811 server_hypervisor_name
,
3814 availability_zone_check
= az_status
["zone_check"]
3815 available_compute_id
= az_status
.get("compute_availability")
3817 if availability_zone_check
is False:
3818 raise vimconn
.VimConnException(
3819 "Unable to migrate instance '{}' to a different availability zone".format(
3822 http_code
=vimconn
.HTTP_Bad_Request
,
3824 if available_compute_id
is not None:
3825 self
.nova
.servers
.live_migrate(
3827 host
=available_compute_id
,
3828 block_migration
=True,
3829 disk_over_commit
=False,
3832 changed_compute_host
= ""
3833 if state
== "MIGRATING":
3834 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3835 changed_compute_host
= self
.get_vdu_state(vm_id
)[2]
3836 if vm_state
and changed_compute_host
== available_compute_id
:
3838 "Instance '{}' migrated to the new compute host '{}'".format(
3839 vm_id
, changed_compute_host
3842 return state
, available_compute_id
3844 raise vimconn
.VimConnException(
3845 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3846 vm_id
, available_compute_id
3848 http_code
=vimconn
.HTTP_Bad_Request
,
3851 raise vimconn
.VimConnException(
3852 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3853 available_compute_id
3855 http_code
=vimconn
.HTTP_Bad_Request
,
3858 nvExceptions
.BadRequest
,
3859 nvExceptions
.ClientException
,
3860 nvExceptions
.NotFound
,
3862 self
._format
_exception
(e
)
3864 def resize_instance(self
, vm_id
, new_flavor_id
):
3866 For resizing the vm based on the given
3869 vm_id : ID of an instance
3870 new_flavor_id : Flavor id to be resized
3871 Return the status of a resized instance
3873 self
._reload
_connection
()
3874 self
.logger
.debug("resize the flavor of an instance")
3875 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
3876 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
3877 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
3879 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
3880 if old_flavor_disk
> new_flavor_disk
:
3881 raise nvExceptions
.BadRequest(
3883 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
3886 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
3887 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
3889 instance_resized_status
= self
.confirm_resize(vm_id
)
3890 return instance_resized_status
3892 raise nvExceptions
.BadRequest(
3894 message
="Cannot 'resize' vm_state is in ERROR",
3898 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3899 raise nvExceptions
.BadRequest(
3901 message
="Cannot 'resize' instance while it is in vm_state resized",
3904 nvExceptions
.BadRequest
,
3905 nvExceptions
.ClientException
,
3906 nvExceptions
.NotFound
,
3908 self
._format
_exception
(e
)
3910 def confirm_resize(self
, vm_id
):
3912 Confirm the resize of an instance
3914 vm_id: ID of an instance
3916 self
._reload
_connection
()
3917 self
.nova
.servers
.confirm_resize(server
=vm_id
)
3918 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
3919 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3920 instance_status
= self
.get_vdu_state(vm_id
)[0]
3921 return instance_status
3923 def get_monitoring_data(self
):
3925 self
.logger
.debug("Getting servers and ports data from Openstack VIMs.")
3926 self
._reload
_connection
()
3927 all_servers
= self
.nova
.servers
.list(detailed
=True)
3929 for server
in all_servers
:
3930 server
.flavor
["id"] = self
.nova
.flavors
.find(
3931 name
=server
.flavor
["original_name"]
3933 except nClient
.exceptions
.NotFound
as e
:
3934 self
.logger
.warning(str(e
.message
))
3935 all_ports
= self
.neutron
.list_ports()
3936 return all_servers
, all_ports
3938 vimconn
.VimConnException
,
3939 vimconn
.VimConnNotFoundException
,
3940 vimconn
.VimConnConnectionException
,
3942 raise vimconn
.VimConnException(
3943 f
"Exception in monitoring while getting VMs and ports status: {str(e)}"