1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 from glanceclient
import client
as glClient
45 import glanceclient
.exc
as gl1Exceptions
46 from keystoneauth1
import session
47 from keystoneauth1
.identity
import v2
, v3
48 import keystoneclient
.exceptions
as ksExceptions
49 import keystoneclient
.v2_0
.client
as ksClient_v2
50 import keystoneclient
.v3
.client
as ksClient_v3
52 from neutronclient
.common
import exceptions
as neExceptions
53 from neutronclient
.neutron
import client
as neClient
54 from novaclient
import client
as nClient
, exceptions
as nvExceptions
55 from osm_ro_plugin
import vimconn
56 from requests
.exceptions
import ConnectionError
59 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__
= "$22-sep-2017 23:59:59$"
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat
= {
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
72 netStatus2manoFormat
= {
75 "INACTIVE": "INACTIVE",
81 supportedClassificationTypes
= ["legacy_flow_classifier"]
83 # global var to have a timeout creating and deleting volumes
88 class SafeDumper(yaml
.SafeDumper
):
89 def represent_data(self
, data
):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data
, dict) and data
.__class
__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data
= dict(data
.items())
96 return super(SafeDumper
, self
).represent_data(data
)
99 class vimconnector(vimconn
.VimConnector
):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
118 api_version
= config
.get("APIversion")
120 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn
.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
126 vim_type
= config
.get("vim_type")
128 if vim_type
and vim_type
not in ("vio", "VIO"):
129 raise vimconn
.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
134 if config
.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self
._validate
_vlan
_ranges
(
137 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
140 if config
.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self
._validate
_vlan
_ranges
(
143 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
146 vimconn
.VimConnector
.__init
__(
160 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
161 raise vimconn
.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
167 if self
.config
.get("insecure"):
170 if self
.config
.get("ca_cert"):
171 self
.verify
= self
.config
.get("ca_cert")
174 raise TypeError("url param can not be NoneType")
176 self
.persistent_info
= persistent_info
177 self
.availability_zone
= persistent_info
.get("availability_zone", None)
178 self
.session
= persistent_info
.get("session", {"reload_client": True})
179 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
180 self
.nova
= self
.session
.get("nova")
181 self
.neutron
= self
.session
.get("neutron")
182 self
.cinder
= self
.session
.get("cinder")
183 self
.glance
= self
.session
.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self
.keystone
= self
.session
.get("keystone")
186 self
.api_version3
= self
.session
.get("api_version3")
187 self
.vim_type
= self
.config
.get("vim_type")
190 self
.vim_type
= self
.vim_type
.upper()
192 if self
.config
.get("use_internal_endpoint"):
193 self
.endpoint_type
= "internalURL"
195 self
.endpoint_type
= None
197 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
198 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
199 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
200 self
.logger
= logging
.getLogger("ro.vim.openstack")
202 # allow security_groups to be a list or a single string
203 if isinstance(self
.config
.get("security_groups"), str):
204 self
.config
["security_groups"] = [self
.config
["security_groups"]]
206 self
.security_groups_id
= None
208 # ###### VIO Specific Changes #########
209 if self
.vim_type
== "VIO":
210 self
.logger
= logging
.getLogger("ro.vim.vio")
213 self
.logger
.setLevel(getattr(logging
, log_level
))
215 def __getitem__(self
, index
):
216 """Get individuals parameters.
218 if index
== "project_domain_id":
219 return self
.config
.get("project_domain_id")
220 elif index
== "user_domain_id":
221 return self
.config
.get("user_domain_id")
223 return vimconn
.VimConnector
.__getitem
__(self
, index
)
225 def __setitem__(self
, index
, value
):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
228 if index
== "project_domain_id":
229 self
.config
["project_domain_id"] = value
230 elif index
== "user_domain_id":
231 self
.config
["user_domain_id"] = value
233 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
235 self
.session
["reload_client"] = True
237 def serialize(self
, value
):
238 """Serialization of python basic types.
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
244 if isinstance(value
, str):
249 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
251 except yaml
.representer
.RepresenterError
:
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
260 def _reload_connection(self
):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self
.session
["reload_client"]:
266 if self
.config
.get("APIversion"):
267 self
.api_version3
= (
268 self
.config
["APIversion"] == "v3.3"
269 or self
.config
["APIversion"] == "3"
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
276 self
.session
["api_version3"] = self
.api_version3
278 if self
.api_version3
:
279 if self
.config
.get("project_domain_id") or self
.config
.get(
280 "project_domain_name"
282 project_domain_id_default
= None
284 project_domain_id_default
= "default"
286 if self
.config
.get("user_domain_id") or self
.config
.get(
289 user_domain_id_default
= None
291 user_domain_id_default
= "default"
295 password
=self
.passwd
,
296 project_name
=self
.tenant_name
,
297 project_id
=self
.tenant_id
,
298 project_domain_id
=self
.config
.get(
299 "project_domain_id", project_domain_id_default
301 user_domain_id
=self
.config
.get(
302 "user_domain_id", user_domain_id_default
304 project_domain_name
=self
.config
.get("project_domain_name"),
305 user_domain_name
=self
.config
.get("user_domain_name"),
311 password
=self
.passwd
,
312 tenant_name
=self
.tenant_name
,
313 tenant_id
=self
.tenant_id
,
316 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name
= self
.config
.get("region_name")
321 if self
.api_version3
:
322 self
.keystone
= ksClient_v3
.Client(
324 endpoint_type
=self
.endpoint_type
,
325 region_name
=region_name
,
328 self
.keystone
= ksClient_v2
.Client(
329 session
=sess
, endpoint_type
=self
.endpoint_type
332 self
.session
["keystone"] = self
.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version
= self
.config
.get("microversion")
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self
.nova
= self
.session
["nova"] = nClient
.Client(
349 endpoint_type
=self
.endpoint_type
,
350 region_name
=region_name
,
352 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
355 endpoint_type
=self
.endpoint_type
,
356 region_name
=region_name
,
358 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
361 endpoint_type
=self
.endpoint_type
,
362 region_name
=region_name
,
366 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
368 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
370 if self
.endpoint_type
== "internalURL":
371 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
372 glance_endpoint
= self
.keystone
.endpoints
.list(
373 glance_service_id
, interface
="internal"
376 glance_endpoint
= None
378 self
.glance
= self
.session
["glance"] = glClient
.Client(
379 2, session
=sess
, endpoint
=glance_endpoint
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self
.session
["reload_client"] = False
385 self
.persistent_info
["session"] = self
.session
386 # add availablity zone info inside self.persistent_info
387 self
._set
_availablity
_zones
()
388 self
.persistent_info
["availability_zone"] = self
.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self
.security_groups_id
= None
392 def __net_os2mano(self
, net_list_dict
):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict
) is dict:
396 net_list_
= (net_list_dict
,)
397 elif type(net_list_dict
) is list:
398 net_list_
= net_list_dict
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net
in net_list_
:
402 if net
.get("provider:network_type") == "vlan":
405 net
["type"] = "bridge"
407 def __classification_os2mano(self
, class_list_dict
):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
411 if isinstance(class_list_dict
, dict):
412 class_list_
= [class_list_dict
]
413 elif isinstance(class_list_dict
, list):
414 class_list_
= class_list_dict
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification
in class_list_
:
418 id = classification
.pop("id")
419 name
= classification
.pop("name")
420 description
= classification
.pop("description")
421 project_id
= classification
.pop("project_id")
422 tenant_id
= classification
.pop("tenant_id")
423 original_classification
= copy
.deepcopy(classification
)
424 classification
.clear()
425 classification
["ctype"] = "legacy_flow_classifier"
426 classification
["definition"] = original_classification
427 classification
["id"] = id
428 classification
["name"] = name
429 classification
["description"] = description
430 classification
["project_id"] = project_id
431 classification
["tenant_id"] = tenant_id
433 def __sfi_os2mano(self
, sfi_list_dict
):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
437 if isinstance(sfi_list_dict
, dict):
438 sfi_list_
= [sfi_list_dict
]
439 elif isinstance(sfi_list_dict
, list):
440 sfi_list_
= sfi_list_dict
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
444 for sfi
in sfi_list_
:
445 sfi
["ingress_ports"] = []
446 sfi
["egress_ports"] = []
448 if sfi
.get("ingress"):
449 sfi
["ingress_ports"].append(sfi
["ingress"])
451 if sfi
.get("egress"):
452 sfi
["egress_ports"].append(sfi
["egress"])
456 params
= sfi
.get("service_function_parameters")
460 correlation
= params
.get("correlation")
465 sfi
["sfc_encap"] = sfc_encap
466 del sfi
["service_function_parameters"]
468 def __sf_os2mano(self
, sf_list_dict
):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
472 if isinstance(sf_list_dict
, dict):
473 sf_list_
= [sf_list_dict
]
474 elif isinstance(sf_list_dict
, list):
475 sf_list_
= sf_list_dict
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
480 del sf
["port_pair_group_parameters"]
481 sf
["sfis"] = sf
["port_pairs"]
484 def __sfp_os2mano(self
, sfp_list_dict
):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
488 if isinstance(sfp_list_dict
, dict):
489 sfp_list_
= [sfp_list_dict
]
490 elif isinstance(sfp_list_dict
, list):
491 sfp_list_
= sfp_list_dict
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
495 for sfp
in sfp_list_
:
496 params
= sfp
.pop("chain_parameters")
500 correlation
= params
.get("correlation")
505 sfp
["sfc_encap"] = sfc_encap
506 sfp
["spi"] = sfp
.pop("chain_id")
507 sfp
["classifications"] = sfp
.pop("flow_classifiers")
508 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
510 # placeholder for now; read TODO note below
511 def _validate_classification(self
, type, definition
):
512 # only legacy_flow_classifier Type is supported at this point
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
519 def _format_exception(self
, exception
):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error
= str(exception
)
527 neExceptions
.NetworkNotFoundClient
,
528 nvExceptions
.NotFound
,
529 ksExceptions
.NotFound
,
530 gl1Exceptions
.HTTPNotFound
,
533 raise vimconn
.VimConnNotFoundException(
534 type(exception
).__name
__ + ": " + message_error
540 gl1Exceptions
.HTTPException
,
541 gl1Exceptions
.CommunicationError
,
543 ksExceptions
.ConnectionError
,
544 neExceptions
.ConnectionFailed
,
547 if type(exception
).__name
__ == "SSLError":
548 tip
= " (maybe option 'insecure' must be added to the VIM)"
550 raise vimconn
.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
557 nvExceptions
.BadRequest
,
558 ksExceptions
.BadRequest
,
561 raise vimconn
.VimConnException(
562 type(exception
).__name
__ + ": " + message_error
567 nvExceptions
.ClientException
,
568 ksExceptions
.ClientException
,
569 neExceptions
.NeutronException
,
572 raise vimconn
.VimConnUnexpectedResponse(
573 type(exception
).__name
__ + ": " + message_error
575 elif isinstance(exception
, nvExceptions
.Conflict
):
576 raise vimconn
.VimConnConflictException(
577 type(exception
).__name
__ + ": " + message_error
579 elif isinstance(exception
, vimconn
.VimConnException
):
582 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
584 raise vimconn
.VimConnConnectionException(
585 type(exception
).__name
__ + ": " + message_error
588 def _get_ids_from_name(self
):
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
593 # get tenant_id if only tenant_name is supplied
594 self
._reload
_connection
()
596 if not self
.my_tenant_id
:
597 raise vimconn
.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self
.tenant_name
, self
.tenant_id
603 if self
.config
.get("security_groups") and not self
.security_groups_id
:
604 # convert from name to id
605 neutron_sg_list
= self
.neutron
.list_security_groups(
606 tenant_id
=self
.my_tenant_id
609 self
.security_groups_id
= []
610 for sg
in self
.config
.get("security_groups"):
611 for neutron_sg
in neutron_sg_list
:
612 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
613 self
.security_groups_id
.append(neutron_sg
["id"])
616 self
.security_groups_id
= None
618 raise vimconn
.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg
)
622 def check_vim_connectivity(self
):
623 # just get network list to check connectivity and credentials
624 self
.get_network_list(filter_dict
={})
626 def get_tenant_list(self
, filter_dict
={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
634 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
637 self
._reload
_connection
()
639 if self
.api_version3
:
640 project_class_list
= self
.keystone
.projects
.list(
641 name
=filter_dict
.get("name")
644 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
648 for project
in project_class_list
:
649 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
652 project_list
.append(project
.to_dict())
656 ksExceptions
.ConnectionError
,
657 ksExceptions
.ClientException
,
660 self
._format
_exception
(e
)
662 def new_tenant(self
, tenant_name
, tenant_description
):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
667 self
._reload
_connection
()
669 if self
.api_version3
:
670 project
= self
.keystone
.projects
.create(
672 self
.config
.get("project_domain_id", "default"),
673 description
=tenant_description
,
677 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
681 ksExceptions
.ConnectionError
,
682 ksExceptions
.ClientException
,
683 ksExceptions
.BadRequest
,
686 self
._format
_exception
(e
)
688 def delete_tenant(self
, tenant_id
):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
693 self
._reload
_connection
()
695 if self
.api_version3
:
696 self
.keystone
.projects
.delete(tenant_id
)
698 self
.keystone
.tenants
.delete(tenant_id
)
702 ksExceptions
.ConnectionError
,
703 ksExceptions
.ClientException
,
704 ksExceptions
.NotFound
,
707 self
._format
_exception
(e
)
715 provider_network_profile
=None,
717 """Adds a tenant network to VIM
719 'net_name': name of the network
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
742 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
749 if provider_network_profile
:
750 vlan
= provider_network_profile
.get("segmentation-id")
754 self
._reload
_connection
()
755 network_dict
= {"name": net_name
, "admin_state_up": True}
757 if net_type
in ("data", "ptp") or provider_network_profile
:
758 provider_physical_network
= None
760 if provider_network_profile
and provider_network_profile
.get(
763 provider_physical_network
= provider_network_profile
.get(
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
771 self
.config
.get("dataplane_physical_net"), (tuple, list)
773 and provider_physical_network
774 not in self
.config
["dataplane_physical_net"]
776 raise vimconn
.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
784 # use the default dataplane_physical_net
785 if not provider_physical_network
:
786 provider_physical_network
= self
.config
.get(
787 "dataplane_physical_net"
790 # if it is non empty list, use the first value. If it is a string use the value directly
792 isinstance(provider_physical_network
, (tuple, list))
793 and provider_physical_network
795 provider_physical_network
= provider_physical_network
[0]
797 if not provider_physical_network
:
798 raise vimconn
.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
805 if not self
.config
.get("multisegment_support"):
807 "provider:physical_network"
808 ] = provider_physical_network
811 provider_network_profile
812 and "network-type" in provider_network_profile
815 "provider:network_type"
816 ] = provider_network_profile
["network-type"]
818 network_dict
["provider:network_type"] = self
.config
.get(
819 "dataplane_network_type", "vlan"
823 network_dict
["provider:segmentation_id"] = vlan
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
831 segment_list
.append(segment1_dict
)
833 "provider:physical_network": provider_physical_network
,
834 "provider:network_type": "vlan",
838 segment2_dict
["provider:segmentation_id"] = vlan
839 elif self
.config
.get("multisegment_vlan_range"):
840 vlanID
= self
._generate
_multisegment
_vlanID
()
841 segment2_dict
["provider:segmentation_id"] = vlanID
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
847 segment_list
.append(segment2_dict
)
848 network_dict
["segments"] = segment_list
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self
.vim_type
== "VIO" and vlan
is None:
852 if self
.config
.get("dataplane_net_vlan_range") is None:
853 raise vimconn
.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
859 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
861 network_dict
["shared"] = shared
863 if self
.config
.get("disable_network_port_security"):
864 network_dict
["port_security_enabled"] = False
866 if self
.config
.get("neutron_availability_zone_hints"):
867 hints
= self
.config
.get("neutron_availability_zone_hints")
869 if isinstance(hints
, str):
872 network_dict
["availability_zone_hints"] = hints
874 new_net
= self
.neutron
.create_network({"network": network_dict
})
876 # create subnetwork, even if there is no profile
881 if not ip_profile
.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand
= random
.randint(0, 255)
884 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
886 if "ip_version" not in ip_profile
:
887 ip_profile
["ip_version"] = "IPv4"
890 "name": net_name
+ "-subnet",
891 "network_id": new_net
["network"]["id"],
892 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile
["subnet_address"],
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile
.get("gateway_address"):
898 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
900 subnet
["gateway_ip"] = None
902 if ip_profile
.get("dns_address"):
903 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
905 if "dhcp_enabled" in ip_profile
:
906 subnet
["enable_dhcp"] = (
908 if ip_profile
["dhcp_enabled"] == "false"
909 or ip_profile
["dhcp_enabled"] is False
913 if ip_profile
.get("dhcp_start_address"):
914 subnet
["allocation_pools"] = []
915 subnet
["allocation_pools"].append(dict())
916 subnet
["allocation_pools"][0]["start"] = ip_profile
[
920 if ip_profile
.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
924 ip_int
+= ip_profile
["dhcp_count"] - 1
925 ip_str
= str(netaddr
.IPAddress(ip_int
))
926 subnet
["allocation_pools"][0]["end"] = ip_str
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self
.neutron
.create_subnet({"subnet": subnet
})
931 if net_type
== "data" and self
.config
.get("multisegment_support"):
932 if self
.config
.get("l2gw_support"):
933 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
934 for l2gw
in l2gw_list
:
936 "l2_gateway_id": l2gw
["id"],
937 "network_id": new_net
["network"]["id"],
938 "segmentation_id": str(vlanID
),
940 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn
}
945 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
948 return new_net
["network"]["id"], created_items
949 except Exception as e
:
950 # delete l2gw connections (if any) before deleting the network
951 for k
, v
in created_items
.items():
952 if not v
: # skip already deleted
956 k_item
, _
, k_id
= k
.partition(":")
958 if k_item
== "l2gwconn":
959 self
.neutron
.delete_l2_gateway_connection(k_id
)
960 except Exception as e2
:
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2
).__name
__, e2
968 self
.neutron
.delete_network(new_net
["network"]["id"])
970 self
._format
_exception
(e
)
972 def get_network_list(self
, filter_dict
={}):
973 """Obtain tenant networks of VIM
979 admin_state_up: boolean
981 Returns the network list of dictionaries
983 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
986 self
._reload
_connection
()
987 filter_dict_os
= filter_dict
.copy()
989 if self
.api_version3
and "tenant_id" in filter_dict_os
:
991 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
993 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
994 net_list
= net_dict
["networks"]
995 self
.__net
_os
2mano
(net_list
)
999 neExceptions
.ConnectionFailed
,
1000 ksExceptions
.ClientException
,
1001 neExceptions
.NeutronException
,
1004 self
._format
_exception
(e
)
1006 def get_network(self
, net_id
):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1010 filter_dict
= {"id": net_id
}
1011 net_list
= self
.get_network_list(filter_dict
)
1013 if len(net_list
) == 0:
1014 raise vimconn
.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id
)
1017 elif len(net_list
) > 1:
1018 raise vimconn
.VimConnConflictException(
1019 "Found more than one network with this criteria"
1024 for subnet_id
in net
.get("subnets", ()):
1026 subnet
= self
.neutron
.show_subnet(subnet_id
)
1027 except Exception as e
:
1029 "osconnector.get_network(): Error getting subnet %s %s"
1032 subnet
= {"id": subnet_id
, "fault": str(e
)}
1034 subnets
.append(subnet
)
1036 net
["subnets"] = subnets
1037 net
["encapsulation"] = net
.get("provider:network_type")
1038 net
["encapsulation_type"] = net
.get("provider:network_type")
1039 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1040 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1044 def delete_network(self
, net_id
, created_items
=None):
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1051 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1053 if created_items
is None:
1057 self
._reload
_connection
()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k
, v
in created_items
.items():
1060 if not v
: # skip already deleted
1064 k_item
, _
, k_id
= k
.partition(":")
1065 if k_item
== "l2gwconn":
1066 self
.neutron
.delete_l2_gateway_connection(k_id
)
1067 except Exception as e
:
1069 "Error deleting l2 gateway connection: {}: {}".format(
1074 # delete VM ports attached to this networks before the network
1075 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1076 for p
in ports
["ports"]:
1078 self
.neutron
.delete_port(p
["id"])
1079 except Exception as e
:
1080 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1082 self
.neutron
.delete_network(net_id
)
1086 neExceptions
.ConnectionFailed
,
1087 neExceptions
.NetworkNotFoundClient
,
1088 neExceptions
.NeutronException
,
1089 ksExceptions
.ClientException
,
1090 neExceptions
.NeutronException
,
1093 self
._format
_exception
(e
)
1095 def refresh_nets_status(self
, net_list
):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1113 for net_id
in net_list
:
1117 net_vim
= self
.get_network(net_id
)
1119 if net_vim
["status"] in netStatus2manoFormat
:
1120 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1122 net
["status"] = "OTHER"
1123 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1125 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1126 net
["status"] = "DOWN"
1128 net
["vim_info"] = self
.serialize(net_vim
)
1130 if net_vim
.get("fault"): # TODO
1131 net
["error_msg"] = str(net_vim
["fault"])
1132 except vimconn
.VimConnNotFoundException
as e
:
1133 self
.logger
.error("Exception getting net status: %s", str(e
))
1134 net
["status"] = "DELETED"
1135 net
["error_msg"] = str(e
)
1136 except vimconn
.VimConnException
as e
:
1137 self
.logger
.error("Exception getting net status: %s", str(e
))
1138 net
["status"] = "VIM_ERROR"
1139 net
["error_msg"] = str(e
)
1140 net_dict
[net_id
] = net
1143 def get_flavor(self
, flavor_id
):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1148 self
._reload
_connection
()
1149 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1152 return flavor
.to_dict()
1154 nvExceptions
.NotFound
,
1155 nvExceptions
.ClientException
,
1156 ksExceptions
.ClientException
,
1159 self
._format
_exception
(e
)
1161 def get_flavor_id_from_data(self
, flavor_dict
):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1169 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1172 self
._reload
_connection
()
1173 flavor_candidate_id
= None
1174 flavor_candidate_data
= (10000, 10000, 10000)
1177 flavor_dict
["vcpus"],
1178 flavor_dict
["disk"],
1179 flavor_dict
.get("ephemeral", 0),
1180 flavor_dict
.get("swap", 0),
1183 extended
= flavor_dict
.get("extended", {})
1186 raise vimconn
.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1192 # numas = extended.get("numas")
1193 for flavor
in self
.nova
.flavors
.list():
1194 epa
= flavor
.get_keys()
1205 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1207 if flavor_data
== flavor_target
:
1211 and flavor_target
< flavor_data
< flavor_candidate_data
1213 flavor_candidate_id
= flavor
.id
1214 flavor_candidate_data
= flavor_data
1216 if not exact_match
and flavor_candidate_id
:
1217 return flavor_candidate_id
1219 raise vimconn
.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1223 nvExceptions
.NotFound
,
1224 nvExceptions
.ClientException
,
1225 ksExceptions
.ClientException
,
1228 self
._format
_exception
(e
)
1231 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1234 quota (dict): Keeping the quota of resurces
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1239 if "limit" in quota
:
1240 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1242 if "reserve" in quota
:
1243 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1245 if "shares" in quota
:
1246 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1247 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1250 def process_numa_memory(
1251 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1253 """Set the memory in extra_specs.
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1260 if not numa
.get("memory"):
1262 memory_mb
= numa
["memory"] * 1024
1263 memory
= "hw:numa_mem.{}".format(node_id
)
1264 extra_specs
[memory
] = int(memory_mb
)
1267 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1268 """Set the cpu in extra_specs.
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1275 if not numa
.get("vcpu"):
1278 cpu
= "hw:numa_cpus.{}".format(node_id
)
1279 vcpu
= ",".join(map(str, vcpu
))
1280 extra_specs
[cpu
] = vcpu
1283 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1290 threads (int) Number of virtual cpus
1293 if not numa
.get("paired-threads"):
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads
= numa
["paired-threads"] * 2
1298 extra_specs
["hw:cpu_thread_policy"] = "require"
1299 extra_specs
["hw:cpu_policy"] = "dedicated"
1303 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1304 """Fill up extra_specs if numa has cores.
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1310 cores (int) Number of virtual cpus
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa
.get("cores"):
1317 cores
= numa
["cores"]
1318 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs
["hw:cpu_policy"] = "dedicated"
1323 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1324 """Fill up extra_specs if numa has threads.
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1330 threads (int) Number of virtual cpus
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa
.get("threads"):
1336 threads
= numa
["threads"]
1337 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs
["hw:cpu_policy"] = "dedicated"
1341 def _process_numa_parameters_of_flavor(
1342 self
, numas
: List
, extra_specs
: Dict
1344 """Process numa parameters and fill up extra_specs.
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1351 numa_nodes
= len(numas
)
1352 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1353 cpu_cores
, cpu_threads
= 0, 0
1355 if self
.vim_type
== "VIO":
1356 self
.process_vio_numa_nodes(numa_nodes
, extra_specs
)
1360 node_id
= numa
["id"]
1361 # overwrite ram and vcpus
1362 # check if key "memory" is present in numa else use ram value at flavor
1363 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1364 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1366 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1369 if "paired-threads" in numa
:
1370 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1371 cpu_threads
+= threads
1373 elif "cores" in numa
:
1374 cores
= self
.process_numa_cores(numa
, extra_specs
)
1377 elif "threads" in numa
:
1378 threads
= self
.process_numa_threads(numa
, extra_specs
)
1379 cpu_threads
+= threads
1382 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1384 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1387 def process_vio_numa_nodes(numa_nodes
: int, extra_specs
: Dict
) -> None:
1388 """According to number of numa nodes, updates the extra_specs for VIO.
1392 numa_nodes (int): List keeps the numa node numbers
1393 extra_specs (dict): Extra specs dict to be updated
1396 # If there is not any numa, numas_nodes equals to 0.
1398 extra_specs
["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1400 # If there are several numas, we do not define specific affinity.
1401 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1403 def _change_flavor_name(
1404 self
, name
: str, name_suffix
: int, flavor_data
: dict
1406 """Change the flavor name if the name already exists.
1409 name (str): Flavor name to be checked
1410 name_suffix (int): Suffix to be appended to name
1411 flavor_data (dict): Flavor dict
1414 name (str): New flavor name to be used
1418 fl
= self
.nova
.flavors
.list()
1419 fl_names
= [f
.name
for f
in fl
]
1421 while name
in fl_names
:
1423 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1427 def _process_extended_config_of_flavor(
1428 self
, extended
: dict, extra_specs
: dict
1430 """Process the extended dict to fill up extra_specs.
1433 extended (dict): Keeping the extra specification of flavor
1434 extra_specs (dict) Dict to be filled to be used during flavor creation
1439 "mem-quota": "memory",
1441 "disk-io-quota": "disk_io",
1449 "PREFER_LARGE": "any",
1453 "cpu-pinning-policy": "hw:cpu_policy",
1454 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1455 "mem-policy": "hw:numa_mempolicy",
1458 numas
= extended
.get("numas")
1460 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1462 for quota
, item
in quotas
.items():
1463 if quota
in extended
.keys():
1464 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1466 # Set the mempage size as specified in the descriptor
1467 if extended
.get("mempage-size"):
1468 if extended
["mempage-size"] in page_sizes
.keys():
1469 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1471 # Normally, validations in NBI should not allow to this condition.
1473 "Invalid mempage-size %s. Will be ignored",
1474 extended
.get("mempage-size"),
1477 for policy
, hw_policy
in policies
.items():
1478 if extended
.get(policy
):
1479 extra_specs
[hw_policy
] = extended
[policy
].lower()
1482 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1483 """Returns the details of flavor
1485 flavor_data (dict): Dictionary that includes required flavor details
1488 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1492 flavor_data
.get("ram", 64),
1493 flavor_data
.get("vcpus", 1),
1495 flavor_data
.get("extended"),
1498 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1499 """Adds a tenant flavor to openstack VIM.
1500 if change_name_if_used is True, it will change name in case of conflict,
1501 because it is not supported name repetition.
1504 flavor_data (dict): Flavor details to be processed
1505 change_name_if_used (bool): Change name in case of conflict
1508 flavor_id (str): flavor identifier
1511 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1517 name
= flavor_data
["name"]
1518 while retry
< max_retries
:
1521 self
._reload
_connection
()
1523 if change_name_if_used
:
1524 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1526 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1530 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1534 new_flavor
= self
.nova
.flavors
.create(
1538 disk
=flavor_data
.get("disk", 0),
1539 ephemeral
=flavor_data
.get("ephemeral", 0),
1540 swap
=flavor_data
.get("swap", 0),
1541 is_public
=flavor_data
.get("is_public", True),
1546 new_flavor
.set_keys(extra_specs
)
1548 return new_flavor
.id
1550 except nvExceptions
.Conflict
as e
:
1552 if change_name_if_used
and retry
< max_retries
:
1555 self
._format
_exception
(e
)
1558 ksExceptions
.ClientException
,
1559 nvExceptions
.ClientException
,
1563 self
._format
_exception
(e
)
1565 def delete_flavor(self
, flavor_id
):
1566 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1568 self
._reload
_connection
()
1569 self
.nova
.flavors
.delete(flavor_id
)
1572 # except nvExceptions.BadRequest as e:
1574 nvExceptions
.NotFound
,
1575 ksExceptions
.ClientException
,
1576 nvExceptions
.ClientException
,
1579 self
._format
_exception
(e
)
1581 def new_image(self
, image_dict
):
1583 Adds a tenant image to VIM. imge_dict is a dictionary with:
1585 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1586 location: path or URI
1587 public: "yes" or "no"
1588 metadata: metadata of the image
1589 Returns the image_id
1594 while retry
< max_retries
:
1597 self
._reload
_connection
()
1599 # determine format http://docs.openstack.org/developer/glance/formats.html
1600 if "disk_format" in image_dict
:
1601 disk_format
= image_dict
["disk_format"]
1602 else: # autodiscover based on extension
1603 if image_dict
["location"].endswith(".qcow2"):
1604 disk_format
= "qcow2"
1605 elif image_dict
["location"].endswith(".vhd"):
1607 elif image_dict
["location"].endswith(".vmdk"):
1608 disk_format
= "vmdk"
1609 elif image_dict
["location"].endswith(".vdi"):
1611 elif image_dict
["location"].endswith(".iso"):
1613 elif image_dict
["location"].endswith(".aki"):
1615 elif image_dict
["location"].endswith(".ari"):
1617 elif image_dict
["location"].endswith(".ami"):
1623 "new_image: '%s' loading from '%s'",
1625 image_dict
["location"],
1627 if self
.vim_type
== "VIO":
1628 container_format
= "bare"
1629 if "container_format" in image_dict
:
1630 container_format
= image_dict
["container_format"]
1632 new_image
= self
.glance
.images
.create(
1633 name
=image_dict
["name"],
1634 container_format
=container_format
,
1635 disk_format
=disk_format
,
1638 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1640 if image_dict
["location"].startswith("http"):
1641 # TODO there is not a method to direct download. It must be downloaded locally with requests
1642 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1644 with
open(image_dict
["location"]) as fimage
:
1645 self
.glance
.images
.upload(new_image
.id, fimage
)
1646 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1647 # image_dict.get("public","yes")=="yes",
1648 # container_format="bare", data=fimage, disk_format=disk_format)
1650 metadata_to_load
= image_dict
.get("metadata")
1652 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1654 if self
.vim_type
== "VIO":
1655 metadata_to_load
["upload_location"] = image_dict
["location"]
1657 metadata_to_load
["location"] = image_dict
["location"]
1659 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1663 nvExceptions
.Conflict
,
1664 ksExceptions
.ClientException
,
1665 nvExceptions
.ClientException
,
1667 self
._format
_exception
(e
)
1670 gl1Exceptions
.HTTPException
,
1671 gl1Exceptions
.CommunicationError
,
1674 if retry
== max_retries
:
1677 self
._format
_exception
(e
)
1678 except IOError as e
: # can not open the file
1679 raise vimconn
.VimConnConnectionException(
1680 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1681 http_code
=vimconn
.HTTP_Bad_Request
,
1684 def delete_image(self
, image_id
):
1685 """Deletes a tenant image from openstack VIM. Returns the old id"""
1687 self
._reload
_connection
()
1688 self
.glance
.images
.delete(image_id
)
1692 nvExceptions
.NotFound
,
1693 ksExceptions
.ClientException
,
1694 nvExceptions
.ClientException
,
1695 gl1Exceptions
.CommunicationError
,
1696 gl1Exceptions
.HTTPNotFound
,
1698 ) as e
: # TODO remove
1699 self
._format
_exception
(e
)
1701 def get_image_id_from_path(self
, path
):
1702 """Get the image id from image path in the VIM database. Returns the image_id"""
1704 self
._reload
_connection
()
1705 images
= self
.glance
.images
.list()
1707 for image
in images
:
1708 if image
.metadata
.get("location") == path
:
1711 raise vimconn
.VimConnNotFoundException(
1712 "image with location '{}' not found".format(path
)
1715 ksExceptions
.ClientException
,
1716 nvExceptions
.ClientException
,
1717 gl1Exceptions
.CommunicationError
,
1720 self
._format
_exception
(e
)
1722 def get_image_list(self
, filter_dict
={}):
1723 """Obtain tenant images from VIM
1727 checksum: image checksum
1728 Returns the image list of dictionaries:
1729 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1732 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1735 self
._reload
_connection
()
1736 # filter_dict_os = filter_dict.copy()
1737 # First we filter by the available filter fields: name, id. The others are removed.
1738 image_list
= self
.glance
.images
.list()
1741 for image
in image_list
:
1743 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1746 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1750 filter_dict
.get("checksum")
1751 and image
["checksum"] != filter_dict
["checksum"]
1755 filtered_list
.append(image
.copy())
1756 except gl1Exceptions
.HTTPNotFound
:
1759 return filtered_list
1761 ksExceptions
.ClientException
,
1762 nvExceptions
.ClientException
,
1763 gl1Exceptions
.CommunicationError
,
1766 self
._format
_exception
(e
)
1768 def __wait_for_vm(self
, vm_id
, status
):
1769 """wait until vm is in the desired status and return True.
1770 If the VM gets in ERROR status, return false.
1771 If the timeout is reached generate an exception"""
1773 while elapsed_time
< server_timeout
:
1774 vm_status
= self
.nova
.servers
.get(vm_id
).status
1776 if vm_status
== status
:
1779 if vm_status
== "ERROR":
1785 # if we exceeded the timeout rollback
1786 if elapsed_time
>= server_timeout
:
1787 raise vimconn
.VimConnException(
1788 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1789 http_code
=vimconn
.HTTP_Request_Timeout
,
1792 def _get_openstack_availablity_zones(self
):
1794 Get from openstack availability zones available
1798 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1799 openstack_availability_zone
= [
1801 for zone
in openstack_availability_zone
1802 if zone
.zoneName
!= "internal"
1805 return openstack_availability_zone
1809 def _set_availablity_zones(self
):
1811 Set vim availablity zone
1814 if "availability_zone" in self
.config
:
1815 vim_availability_zones
= self
.config
.get("availability_zone")
1817 if isinstance(vim_availability_zones
, str):
1818 self
.availability_zone
= [vim_availability_zones
]
1819 elif isinstance(vim_availability_zones
, list):
1820 self
.availability_zone
= vim_availability_zones
1822 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1824 def _get_vm_availability_zone(
1825 self
, availability_zone_index
, availability_zone_list
1828 Return thge availability zone to be used by the created VM.
1829 :return: The VIM availability zone to be used or None
1831 if availability_zone_index
is None:
1832 if not self
.config
.get("availability_zone"):
1834 elif isinstance(self
.config
.get("availability_zone"), str):
1835 return self
.config
["availability_zone"]
1837 # TODO consider using a different parameter at config for default AV and AV list match
1838 return self
.config
["availability_zone"][0]
1840 vim_availability_zones
= self
.availability_zone
1841 # check if VIM offer enough availability zones describe in the VNFD
1842 if vim_availability_zones
and len(availability_zone_list
) <= len(
1843 vim_availability_zones
1845 # check if all the names of NFV AV match VIM AV names
1846 match_by_index
= False
1847 for av
in availability_zone_list
:
1848 if av
not in vim_availability_zones
:
1849 match_by_index
= True
1853 return vim_availability_zones
[availability_zone_index
]
1855 return availability_zone_list
[availability_zone_index
]
1857 raise vimconn
.VimConnConflictException(
1858 "No enough availability zones at VIM for this deployment"
1861 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1862 """Fill up the security_groups in the port_dict.
1865 net (dict): Network details
1866 port_dict (dict): Port details
1870 self
.config
.get("security_groups")
1871 and net
.get("port_security") is not False
1872 and not self
.config
.get("no_port_security_extension")
1874 if not self
.security_groups_id
:
1875 self
._get
_ids
_from
_name
()
1877 port_dict
["security_groups"] = self
.security_groups_id
1879 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1880 """Fill up the network binding depending on network type in the port_dict.
1883 net (dict): Network details
1884 port_dict (dict): Port details
1887 if not net
.get("type"):
1888 raise vimconn
.VimConnException("Type is missing in the network details.")
1890 if net
["type"] == "virtual":
1894 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1896 port_dict
["binding:vnic_type"] = "direct"
1898 # VIO specific Changes
1899 if self
.vim_type
== "VIO":
1900 # Need to create port with port_security_enabled = False and no-security-groups
1901 port_dict
["port_security_enabled"] = False
1902 port_dict
["provider_security_groups"] = []
1903 port_dict
["security_groups"] = []
1906 # For PT PCI-PASSTHROUGH
1907 port_dict
["binding:vnic_type"] = "direct-physical"
1910 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1911 """Set the "ip" parameter in net dictionary.
1914 new_port (dict): New created port
1915 net (dict): Network details
1918 fixed_ips
= new_port
["port"].get("fixed_ips")
1921 net
["ip"] = fixed_ips
[0].get("ip_address")
1926 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1927 """Fill up the mac_address and fixed_ips in port_dict.
1930 net (dict): Network details
1931 port_dict (dict): Port details
1934 if net
.get("mac_address"):
1935 port_dict
["mac_address"] = net
["mac_address"]
1937 if net
.get("ip_address"):
1938 port_dict
["fixed_ips"] = [{"ip_address": net
["ip_address"]}]
1939 # TODO add "subnet_id": <subnet_id>
1941 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
1942 """Create new port using neutron.
1945 port_dict (dict): Port details
1946 created_items (dict): All created items
1947 net (dict): Network details
1950 new_port (dict): New created port
1953 new_port
= self
.neutron
.create_port({"port": port_dict
})
1954 created_items
["port:" + str(new_port
["port"]["id"])] = True
1955 net
["mac_adress"] = new_port
["port"]["mac_address"]
1956 net
["vim_id"] = new_port
["port"]["id"]
1961 self
, net
: dict, name
: str, created_items
: dict
1962 ) -> Tuple
[dict, dict]:
1963 """Create port using net details.
1966 net (dict): Network details
1967 name (str): Name to be used as network name if net dict does not include name
1968 created_items (dict): All created items
1971 new_port, port New created port, port dictionary
1976 "network_id": net
["net_id"],
1977 "name": net
.get("name"),
1978 "admin_state_up": True,
1981 if not port_dict
["name"]:
1982 port_dict
["name"] = name
1984 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
1986 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
1988 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
1990 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
1992 vimconnector
._set
_fixed
_ip
(new_port
, net
)
1994 port
= {"port-id": new_port
["port"]["id"]}
1996 if float(self
.nova
.api_version
.get_string()) >= 2.32:
1997 port
["tag"] = new_port
["port"]["name"]
1999 return new_port
, port
2001 def _prepare_network_for_vminstance(
2005 created_items
: dict,
2007 external_network
: list,
2008 no_secured_ports
: list,
2010 """Create port and fill up net dictionary for new VM instance creation.
2013 name (str): Name of network
2014 net_list (list): List of networks
2015 created_items (dict): All created items belongs to a VM
2016 net_list_vim (list): List of ports
2017 external_network (list): List of external-networks
2018 no_secured_ports (list): Port security disabled ports
2021 self
._reload
_connection
()
2023 for net
in net_list
:
2024 # Skip non-connected iface
2025 if not net
.get("net_id"):
2028 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2030 net_list_vim
.append(port
)
2032 if net
.get("floating_ip", False):
2033 net
["exit_on_floating_ip_error"] = True
2034 external_network
.append(net
)
2036 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2037 net
["exit_on_floating_ip_error"] = False
2038 external_network
.append(net
)
2039 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2041 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2042 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2043 if net
.get("port_security") is False and not self
.config
.get(
2044 "no_port_security_extension"
2046 no_secured_ports
.append(
2048 new_port
["port"]["id"],
2049 net
.get("port_security_disable_strategy"),
2053 def _prepare_persistent_root_volumes(
2058 base_disk_index
: int,
2059 block_device_mapping
: dict,
2060 existing_vim_volumes
: list,
2061 created_items
: dict,
2063 """Prepare persistent root volumes for new VM instance.
2066 name (str): Name of VM instance
2067 vm_av_zone (list): List of availability zones
2068 disk (dict): Disk details
2069 base_disk_index (int): Disk index
2070 block_device_mapping (dict): Block device details
2071 existing_vim_volumes (list): Existing disk details
2072 created_items (dict): All created items belongs to VM
2075 boot_volume_id (str): ID of boot volume
2078 # Disk may include only vim_volume_id or only vim_id."
2079 # Use existing persistent root volume finding with volume_id or vim_id
2080 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2082 if disk
.get(key_id
):
2084 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2085 existing_vim_volumes
.append({"id": disk
[key_id
]})
2088 # Create persistent root volume
2089 volume
= self
.cinder
.volumes
.create(
2091 name
=name
+ "vd" + chr(base_disk_index
),
2092 imageRef
=disk
["image_id"],
2093 # Make sure volume is in the same AZ as the VM to be attached to
2094 availability_zone
=vm_av_zone
,
2096 boot_volume_id
= volume
.id
2097 self
.update_block_device_mapping(
2099 block_device_mapping
=block_device_mapping
,
2100 base_disk_index
=base_disk_index
,
2102 created_items
=created_items
,
2105 return boot_volume_id
2108 def update_block_device_mapping(
2110 block_device_mapping
: dict,
2111 base_disk_index
: int,
2113 created_items
: dict,
2115 """Add volume information to block device mapping dict.
2117 volume (object): Created volume object
2118 block_device_mapping (dict): Block device details
2119 base_disk_index (int): Disk index
2120 disk (dict): Disk details
2121 created_items (dict): All created items belongs to VM
2124 raise vimconn
.VimConnException("Volume is empty.")
2126 if not hasattr(volume
, "id"):
2127 raise vimconn
.VimConnException(
2128 "Created volume is not valid, does not have id attribute."
2131 volume_txt
= "volume:" + str(volume
.id)
2132 if disk
.get("keep"):
2133 volume_txt
+= ":keep"
2134 created_items
[volume_txt
] = True
2135 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2137 def _prepare_non_root_persistent_volumes(
2142 block_device_mapping
: dict,
2143 base_disk_index
: int,
2144 existing_vim_volumes
: list,
2145 created_items
: dict,
2147 """Prepare persistent volumes for new VM instance.
2150 name (str): Name of VM instance
2151 disk (dict): Disk details
2152 vm_av_zone (list): List of availability zones
2153 block_device_mapping (dict): Block device details
2154 base_disk_index (int): Disk index
2155 existing_vim_volumes (list): Existing disk details
2156 created_items (dict): All created items belongs to VM
2158 # Non-root persistent volumes
2159 # Disk may include only vim_volume_id or only vim_id."
2160 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2162 if disk
.get(key_id
):
2164 # Use existing persistent volume
2165 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2166 existing_vim_volumes
.append({"id": disk
[key_id
]})
2169 # Create persistent volume
2170 volume
= self
.cinder
.volumes
.create(
2172 name
=name
+ "vd" + chr(base_disk_index
),
2173 # Make sure volume is in the same AZ as the VM to be attached to
2174 availability_zone
=vm_av_zone
,
2176 self
.update_block_device_mapping(
2178 block_device_mapping
=block_device_mapping
,
2179 base_disk_index
=base_disk_index
,
2181 created_items
=created_items
,
2184 def _wait_for_created_volumes_availability(
2185 self
, elapsed_time
: int, created_items
: dict
2187 """Wait till created volumes become available.
2190 elapsed_time (int): Passed time while waiting
2191 created_items (dict): All created items belongs to VM
2194 elapsed_time (int): Time spent while waiting
2198 while elapsed_time
< volume_timeout
:
2199 for created_item
in created_items
:
2201 created_item
.split(":")[0],
2202 created_item
.split(":")[1],
2205 if self
.cinder
.volumes
.get(volume_id
).status
!= "available":
2208 # All ready: break from while
2216 def _wait_for_existing_volumes_availability(
2217 self
, elapsed_time
: int, existing_vim_volumes
: list
2219 """Wait till existing volumes become available.
2222 elapsed_time (int): Passed time while waiting
2223 existing_vim_volumes (list): Existing volume details
2226 elapsed_time (int): Time spent while waiting
2230 while elapsed_time
< volume_timeout
:
2231 for volume
in existing_vim_volumes
:
2232 if self
.cinder
.volumes
.get(volume
["id"]).status
!= "available":
2234 else: # all ready: break from while
2242 def _prepare_disk_for_vminstance(
2245 existing_vim_volumes
: list,
2246 created_items
: dict,
2248 block_device_mapping
: dict,
2249 disk_list
: list = None,
2251 """Prepare all volumes for new VM instance.
2254 name (str): Name of Instance
2255 existing_vim_volumes (list): List of existing volumes
2256 created_items (dict): All created items belongs to VM
2257 vm_av_zone (list): VM availability zone
2258 block_device_mapping (dict): Block devices to be attached to VM
2259 disk_list (list): List of disks
2262 # Create additional volumes in case these are present in disk_list
2263 base_disk_index
= ord("b")
2264 boot_volume_id
= None
2267 for disk
in disk_list
:
2268 if "image_id" in disk
:
2269 # Root persistent volume
2270 base_disk_index
= ord("a")
2271 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2273 vm_av_zone
=vm_av_zone
,
2275 base_disk_index
=base_disk_index
,
2276 block_device_mapping
=block_device_mapping
,
2277 existing_vim_volumes
=existing_vim_volumes
,
2278 created_items
=created_items
,
2281 # Non-root persistent volume
2282 self
._prepare
_non
_root
_persistent
_volumes
(
2285 vm_av_zone
=vm_av_zone
,
2286 block_device_mapping
=block_device_mapping
,
2287 base_disk_index
=base_disk_index
,
2288 existing_vim_volumes
=existing_vim_volumes
,
2289 created_items
=created_items
,
2291 base_disk_index
+= 1
2293 # Wait until created volumes are with status available
2294 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2295 elapsed_time
, created_items
2297 # Wait until existing volumes in vim are with status available
2298 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2299 elapsed_time
, existing_vim_volumes
2301 # If we exceeded the timeout rollback
2302 if elapsed_time
>= volume_timeout
:
2303 raise vimconn
.VimConnException(
2304 "Timeout creating volumes for instance " + name
,
2305 http_code
=vimconn
.HTTP_Request_Timeout
,
2308 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2310 def _find_the_external_network_for_floating_ip(self
):
2311 """Get the external network ip in order to create floating IP.
2314 pool_id (str): External network pool ID
2318 # Find the external network
2319 external_nets
= list()
2321 for net
in self
.neutron
.list_networks()["networks"]:
2322 if net
["router:external"]:
2323 external_nets
.append(net
)
2325 if len(external_nets
) == 0:
2326 raise vimconn
.VimConnException(
2327 "Cannot create floating_ip automatically since "
2328 "no external network is present",
2329 http_code
=vimconn
.HTTP_Conflict
,
2332 if len(external_nets
) > 1:
2333 raise vimconn
.VimConnException(
2334 "Cannot create floating_ip automatically since "
2335 "multiple external networks are present",
2336 http_code
=vimconn
.HTTP_Conflict
,
2340 return external_nets
[0].get("id")
2342 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2343 """Trigger neutron to create a new floating IP using external network ID.
2346 param (dict): Input parameters to create a floating IP
2347 created_items (dict): All created items belongs to new VM instance
2354 self
.logger
.debug("Creating floating IP")
2355 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2356 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2357 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2359 except Exception as e
:
2360 raise vimconn
.VimConnException(
2361 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2362 http_code
=vimconn
.HTTP_Conflict
,
2365 def _create_floating_ip(
2366 self
, floating_network
: dict, server
: object, created_items
: dict
2368 """Get the available Pool ID and create a new floating IP.
2371 floating_network (dict): Dict including external network ID
2372 server (object): Server object
2373 created_items (dict): All created items belongs to new VM instance
2377 # Pool_id is available
2379 isinstance(floating_network
["floating_ip"], str)
2380 and floating_network
["floating_ip"].lower() != "true"
2382 pool_id
= floating_network
["floating_ip"]
2386 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2390 "floating_network_id": pool_id
,
2391 "tenant_id": server
.tenant_id
,
2395 self
._neutron
_create
_float
_ip
(param
, created_items
)
2397 def _find_floating_ip(
2401 floating_network
: dict,
2403 """Find the available free floating IPs if there are.
2406 server (object): Server object
2407 floating_ips (list): List of floating IPs
2408 floating_network (dict): Details of floating network such as ID
2411 free_floating_ip (str): Free floating ip address
2414 for fip
in floating_ips
:
2415 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2418 if isinstance(floating_network
["floating_ip"], str):
2419 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2424 def _assign_floating_ip(
2425 self
, free_floating_ip
: str, floating_network
: dict
2427 """Assign the free floating ip address to port.
2430 free_floating_ip (str): Floating IP to be assigned
2431 floating_network (dict): ID of floating network
2434 fip (dict) (dict): Floating ip details
2437 # The vim_id key contains the neutron.port_id
2438 self
.neutron
.update_floatingip(
2440 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2442 # For race condition ensure not re-assigned to other VM after 5 seconds
2445 return self
.neutron
.show_floatingip(free_floating_ip
)
2447 def _get_free_floating_ip(
2448 self
, server
: object, floating_network
: dict
2450 """Get the free floating IP address.
2453 server (object): Server Object
2454 floating_network (dict): Floating network details
2457 free_floating_ip (str): Free floating ip addr
2461 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2464 random
.shuffle(floating_ips
)
2466 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2468 def _prepare_external_network_for_vminstance(
2470 external_network
: list,
2472 created_items
: dict,
2473 vm_start_time
: float,
2475 """Assign floating IP address for VM instance.
2478 external_network (list): ID of External network
2479 server (object): Server Object
2480 created_items (dict): All created items belongs to new VM instance
2481 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2487 for floating_network
in external_network
:
2490 floating_ip_retries
= 3
2491 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2495 free_floating_ip
= self
._get
_free
_floating
_ip
(
2496 server
, floating_network
2499 if not free_floating_ip
:
2500 self
._create
_floating
_ip
(
2501 floating_network
, server
, created_items
2505 # For race condition ensure not already assigned
2506 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2508 if fip
["floatingip"].get("port_id"):
2511 # Assign floating ip
2512 fip
= self
._assign
_floating
_ip
(
2513 free_floating_ip
, floating_network
2516 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2517 self
.logger
.warning(
2518 "floating_ip {} re-assigned to other port".format(
2525 "Assigned floating_ip {} to VM {}".format(
2526 free_floating_ip
, server
.id
2532 except Exception as e
:
2533 # Openstack need some time after VM creation to assign an IP. So retry if fails
2534 vm_status
= self
.nova
.servers
.get(server
.id).status
2536 if vm_status
not in ("ACTIVE", "ERROR"):
2537 if time
.time() - vm_start_time
< server_timeout
:
2540 elif floating_ip_retries
> 0:
2541 floating_ip_retries
-= 1
2544 raise vimconn
.VimConnException(
2545 "Cannot create floating_ip: {} {}".format(
2548 http_code
=vimconn
.HTTP_Conflict
,
2551 except Exception as e
:
2552 if not floating_network
["exit_on_floating_ip_error"]:
2553 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2558 def _update_port_security_for_vminstance(
2560 no_secured_ports
: list,
2563 """Updates the port security according to no_secured_ports list.
2566 no_secured_ports (list): List of ports that security will be disabled
2567 server (object): Server Object
2573 # Wait until the VM is active and then disable the port-security
2574 if no_secured_ports
:
2575 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2577 for port
in no_secured_ports
:
2579 "port": {"port_security_enabled": False, "security_groups": None}
2582 if port
[1] == "allow-address-pairs":
2584 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2588 self
.neutron
.update_port(port
[0], port_update
)
2592 raise vimconn
.VimConnException(
2593 "It was not possible to disable port security for port {}".format(
2605 affinity_group_list
: list,
2609 availability_zone_index
=None,
2610 availability_zone_list
=None,
2612 """Adds a VM instance to VIM.
2615 name (str): name of VM
2616 description (str): description
2617 start (bool): indicates if VM must start or boot in pause mode. Ignored
2618 image_id (str) image uuid
2619 flavor_id (str) flavor uuid
2620 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2621 net_list (list): list of interfaces, each one is a dictionary with:
2622 name: name of network
2623 net_id: network uuid to connect
2624 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2625 model: interface model, ignored #TODO
2626 mac_address: used for SR-IOV ifaces #TODO for other types
2627 use: 'data', 'bridge', 'mgmt'
2628 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2629 vim_id: filled/added by this function
2630 floating_ip: True/False (or it can be None)
2631 port_security: True/False
2632 cloud_config (dict): (optional) dictionary with:
2633 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2634 users: (optional) list of users to be inserted, each item is a dict with:
2635 name: (mandatory) user name,
2636 key-pairs: (optional) list of strings with the public key to be inserted to the user
2637 user-data: (optional) string is a text script to be passed directly to cloud-init
2638 config-files: (optional). List of files to be transferred. Each item is a dict with:
2639 dest: (mandatory) string with the destination absolute path
2640 encoding: (optional, by default text). Can be one of:
2641 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2642 content : (mandatory) string with the content of the file
2643 permissions: (optional) string with file permissions, typically octal notation '0644'
2644 owner: (optional) file owner, string with the format 'owner:group'
2645 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2646 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2647 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2648 size: (mandatory) string with the size of the disk in GB
2649 vim_id: (optional) should use this existing volume id
2650 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2651 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2652 availability_zone_index is None
2653 #TODO ip, security groups
2656 A tuple with the instance identifier and created_items or raises an exception on error
2657 created_items can be None or a dictionary where this method can include key-values that will be passed to
2658 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2659 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2664 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2674 # list of external networks to be connected to instance, later on used to create floating_ip
2675 external_network
= []
2676 # List of ports with port-security disabled
2677 no_secured_ports
= []
2678 block_device_mapping
= {}
2679 existing_vim_volumes
= []
2680 server_group_id
= None
2681 scheduller_hints
= {}
2683 # Check the Openstack Connection
2684 self
._reload
_connection
()
2686 # Prepare network list
2687 self
._prepare
_network
_for
_vminstance
(
2690 created_items
=created_items
,
2691 net_list_vim
=net_list_vim
,
2692 external_network
=external_network
,
2693 no_secured_ports
=no_secured_ports
,
2697 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2699 # Get availability Zone
2700 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2701 availability_zone_index
, availability_zone_list
2706 self
._prepare
_disk
_for
_vminstance
(
2708 existing_vim_volumes
=existing_vim_volumes
,
2709 created_items
=created_items
,
2710 vm_av_zone
=vm_av_zone
,
2711 block_device_mapping
=block_device_mapping
,
2712 disk_list
=disk_list
,
2715 if affinity_group_list
:
2716 # Only first id on the list will be used. Openstack restriction
2717 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2718 scheduller_hints
["group"] = server_group_id
2721 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2722 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2723 "block_device_mapping={}, server_group={})".format(
2728 self
.config
.get("security_groups"),
2730 self
.config
.get("keypair"),
2733 block_device_mapping
,
2739 server
= self
.nova
.servers
.create(
2744 security_groups
=self
.config
.get("security_groups"),
2745 # TODO remove security_groups in future versions. Already at neutron port
2746 availability_zone
=vm_av_zone
,
2747 key_name
=self
.config
.get("keypair"),
2749 config_drive
=config_drive
,
2750 block_device_mapping
=block_device_mapping
,
2751 scheduler_hints
=scheduller_hints
,
2754 vm_start_time
= time
.time()
2756 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2758 self
._prepare
_external
_network
_for
_vminstance
(
2759 external_network
=external_network
,
2761 created_items
=created_items
,
2762 vm_start_time
=vm_start_time
,
2765 return server
.id, created_items
2767 except Exception as e
:
2770 server_id
= server
.id
2773 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2777 self
.delete_vminstance(server_id
, created_items
)
2779 except Exception as e2
:
2780 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2782 self
._format
_exception
(e
)
2785 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2786 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2789 created_items (dict): All created items belongs to VM
2792 updated_created_items (dict): Dict which does not include keep flag for volumes.
2796 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2799 def get_vminstance(self
, vm_id
):
2800 """Returns the VM instance information from VIM"""
2801 # self.logger.debug("Getting VM from VIM")
2803 self
._reload
_connection
()
2804 server
= self
.nova
.servers
.find(id=vm_id
)
2805 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2807 return server
.to_dict()
2809 ksExceptions
.ClientException
,
2810 nvExceptions
.ClientException
,
2811 nvExceptions
.NotFound
,
2814 self
._format
_exception
(e
)
2816 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2818 Get a console for the virtual machine
2820 vm_id: uuid of the VM
2821 console_type, can be:
2822 "novnc" (by default), "xvpvnc" for VNC types,
2823 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2824 Returns dict with the console parameters:
2825 protocol: ssh, ftp, http, https, ...
2826 server: usually ip address
2827 port: the http, ssh, ... port
2828 suffix: extra text, e.g. the http path and query string
2830 self
.logger
.debug("Getting VM CONSOLE from VIM")
2833 self
._reload
_connection
()
2834 server
= self
.nova
.servers
.find(id=vm_id
)
2836 if console_type
is None or console_type
== "novnc":
2837 console_dict
= server
.get_vnc_console("novnc")
2838 elif console_type
== "xvpvnc":
2839 console_dict
= server
.get_vnc_console(console_type
)
2840 elif console_type
== "rdp-html5":
2841 console_dict
= server
.get_rdp_console(console_type
)
2842 elif console_type
== "spice-html5":
2843 console_dict
= server
.get_spice_console(console_type
)
2845 raise vimconn
.VimConnException(
2846 "console type '{}' not allowed".format(console_type
),
2847 http_code
=vimconn
.HTTP_Bad_Request
,
2850 console_dict1
= console_dict
.get("console")
2853 console_url
= console_dict1
.get("url")
2857 protocol_index
= console_url
.find("//")
2859 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2862 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2867 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2869 -vimconn
.HTTP_Internal_Server_Error
,
2870 "Unexpected response from VIM",
2874 "protocol": console_url
[0:protocol_index
],
2875 "server": console_url
[protocol_index
+ 2 : port_index
],
2876 "port": console_url
[port_index
:suffix_index
],
2877 "suffix": console_url
[suffix_index
+ 1 :],
2882 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2884 nvExceptions
.NotFound
,
2885 ksExceptions
.ClientException
,
2886 nvExceptions
.ClientException
,
2887 nvExceptions
.BadRequest
,
2890 self
._format
_exception
(e
)
2892 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2893 """Neutron delete ports by id.
2895 k_id (str): Port id in the VIM
2899 port_dict
= self
.neutron
.list_ports()
2900 existing_ports
= [port
["id"] for port
in port_dict
["ports"] if port_dict
]
2902 if k_id
in existing_ports
:
2903 self
.neutron
.delete_port(k_id
)
2905 except Exception as e
:
2907 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2909 def _delete_volumes_by_id_wth_cinder(
2910 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
2912 """Cinder delete volume by id.
2914 k (str): Full item name in created_items
2915 k_id (str): ID of floating ip in VIM
2916 volumes_to_hold (list): Volumes not to delete
2917 created_items (dict): All created items belongs to VM
2920 if k_id
in volumes_to_hold
:
2923 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
2927 self
.cinder
.volumes
.delete(k_id
)
2928 created_items
[k
] = None
2930 except Exception as e
:
2932 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
2935 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
2936 """Neutron delete floating ip by id.
2938 k (str): Full item name in created_items
2939 k_id (str): ID of floating ip in VIM
2940 created_items (dict): All created items belongs to VM
2943 self
.neutron
.delete_floatingip(k_id
)
2944 created_items
[k
] = None
2946 except Exception as e
:
2948 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
2952 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
2953 k_item
, _
, k_id
= k
.partition(":")
2956 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
2957 """Delete VM ports attached to the networks before deleting virtual machine.
2959 created_items (dict): All created items belongs to VM
2962 for k
, v
in created_items
.items():
2963 if not v
: # skip already deleted
2967 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
2968 if k_item
== "port":
2969 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
2971 except Exception as e
:
2973 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
2976 def _delete_created_items(
2977 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
2979 """Delete Volumes and floating ip if they exist in created_items."""
2980 for k
, v
in created_items
.items():
2981 if not v
: # skip already deleted
2985 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
2987 if k_item
== "volume":
2989 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
2990 k
, k_id
, volumes_to_hold
, created_items
2996 elif k_item
== "floating_ip":
2998 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
3000 except Exception as e
:
3001 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3006 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
3007 """Remove the volumes which has key flag from created_items
3010 created_items (dict): All created items belongs to VM
3013 created_items (dict): Persistent volumes eliminated created_items
3017 for (key
, value
) in created_items
.items()
3018 if len(key
.split(":")) == 2
3021 def delete_vminstance(
3022 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
3024 """Removes a VM instance from VIM. Returns the old identifier.
3026 vm_id (str): Identifier of VM instance
3027 created_items (dict): All created items belongs to VM
3028 volumes_to_hold (list): Volumes_to_hold
3030 if created_items
is None:
3032 if volumes_to_hold
is None:
3033 volumes_to_hold
= []
3036 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
3040 self
._reload
_connection
()
3042 # Delete VM ports attached to the networks before the virtual machine
3044 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
3047 self
.nova
.servers
.delete(vm_id
)
3049 # Although having detached, volumes should have in active status before deleting.
3050 # We ensure in this loop
3054 while keep_waiting
and elapsed_time
< volume_timeout
:
3055 keep_waiting
= False
3057 # Delete volumes and floating IP.
3058 keep_waiting
= self
._delete
_created
_items
(
3059 created_items
, volumes_to_hold
, keep_waiting
3067 nvExceptions
.NotFound
,
3068 ksExceptions
.ClientException
,
3069 nvExceptions
.ClientException
,
3072 self
._format
_exception
(e
)
3074 def refresh_vms_status(self
, vm_list
):
3075 """Get the status of the virtual machines and their interfaces/ports
3076 Params: the list of VM identifiers
3077 Returns a dictionary with:
3078 vm_id: #VIM id of this Virtual Machine
3079 status: #Mandatory. Text with one of:
3080 # DELETED (not found at vim)
3081 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3082 # OTHER (Vim reported other status not understood)
3083 # ERROR (VIM indicates an ERROR status)
3084 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3085 # CREATING (on building process), ERROR
3086 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3088 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3089 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3091 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3092 mac_address: #Text format XX:XX:XX:XX:XX:XX
3093 vim_net_id: #network id where this interface is connected
3094 vim_interface_id: #interface/port VIM id
3095 ip_address: #null, or text with IPv4, IPv6 address
3096 compute_node: #identification of compute node where PF,VF interface is allocated
3097 pci: #PCI address of the NIC that hosts the PF,VF
3098 vlan: #physical VLAN used for VF
3102 "refresh_vms status: Getting tenant VM instance information from VIM"
3105 for vm_id
in vm_list
:
3109 vm_vim
= self
.get_vminstance(vm_id
)
3111 if vm_vim
["status"] in vmStatus2manoFormat
:
3112 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3114 vm
["status"] = "OTHER"
3115 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3117 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3118 vm_vim
.pop("user_data", None)
3119 vm
["vim_info"] = self
.serialize(vm_vim
)
3121 vm
["interfaces"] = []
3122 if vm_vim
.get("fault"):
3123 vm
["error_msg"] = str(vm_vim
["fault"])
3127 self
._reload
_connection
()
3128 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3130 for port
in port_dict
["ports"]:
3132 interface
["vim_info"] = self
.serialize(port
)
3133 interface
["mac_address"] = port
.get("mac_address")
3134 interface
["vim_net_id"] = port
["network_id"]
3135 interface
["vim_interface_id"] = port
["id"]
3136 # check if OS-EXT-SRV-ATTR:host is there,
3137 # in case of non-admin credentials, it will be missing
3139 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3140 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3142 interface
["pci"] = None
3144 # check if binding:profile is there,
3145 # in case of non-admin credentials, it will be missing
3146 if port
.get("binding:profile"):
3147 if port
["binding:profile"].get("pci_slot"):
3148 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3150 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3151 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3152 pci
= port
["binding:profile"]["pci_slot"]
3153 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3154 interface
["pci"] = pci
3156 interface
["vlan"] = None
3158 if port
.get("binding:vif_details"):
3159 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3161 # Get vlan from network in case not present in port for those old openstacks and cases where
3162 # it is needed vlan at PT
3163 if not interface
["vlan"]:
3164 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3165 network
= self
.neutron
.show_network(port
["network_id"])
3168 network
["network"].get("provider:network_type")
3171 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3172 interface
["vlan"] = network
["network"].get(
3173 "provider:segmentation_id"
3177 # look for floating ip address
3179 floating_ip_dict
= self
.neutron
.list_floatingips(
3183 if floating_ip_dict
.get("floatingips"):
3185 floating_ip_dict
["floatingips"][0].get(
3186 "floating_ip_address"
3192 for subnet
in port
["fixed_ips"]:
3193 ips
.append(subnet
["ip_address"])
3195 interface
["ip_address"] = ";".join(ips
)
3196 vm
["interfaces"].append(interface
)
3197 except Exception as e
:
3199 "Error getting vm interface information {}: {}".format(
3204 except vimconn
.VimConnNotFoundException
as e
:
3205 self
.logger
.error("Exception getting vm status: %s", str(e
))
3206 vm
["status"] = "DELETED"
3207 vm
["error_msg"] = str(e
)
3208 except vimconn
.VimConnException
as e
:
3209 self
.logger
.error("Exception getting vm status: %s", str(e
))
3210 vm
["status"] = "VIM_ERROR"
3211 vm
["error_msg"] = str(e
)
3217 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3218 """Send and action over a VM instance from VIM
3219 Returns None or the console dict if the action was successfully sent to the VIM"""
3220 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3223 self
._reload
_connection
()
3224 server
= self
.nova
.servers
.find(id=vm_id
)
3226 if "start" in action_dict
:
3227 if action_dict
["start"] == "rebuild":
3230 if server
.status
== "PAUSED":
3232 elif server
.status
== "SUSPENDED":
3234 elif server
.status
== "SHUTOFF":
3238 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3240 raise vimconn
.VimConnException(
3241 "Cannot 'start' instance while it is in active state",
3242 http_code
=vimconn
.HTTP_Bad_Request
,
3245 elif "pause" in action_dict
:
3247 elif "resume" in action_dict
:
3249 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3250 self
.logger
.debug("server status %s", server
.status
)
3251 if server
.status
== "ACTIVE":
3254 self
.logger
.debug("ERROR: VM is not in Active state")
3255 raise vimconn
.VimConnException(
3256 "VM is not in active state, stop operation is not allowed",
3257 http_code
=vimconn
.HTTP_Bad_Request
,
3259 elif "forceOff" in action_dict
:
3260 server
.stop() # TODO
3261 elif "terminate" in action_dict
:
3263 elif "createImage" in action_dict
:
3264 server
.create_image()
3265 # "path":path_schema,
3266 # "description":description_schema,
3267 # "name":name_schema,
3268 # "metadata":metadata_schema,
3269 # "imageRef": id_schema,
3270 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3271 elif "rebuild" in action_dict
:
3272 server
.rebuild(server
.image
["id"])
3273 elif "reboot" in action_dict
:
3274 server
.reboot() # reboot_type="SOFT"
3275 elif "console" in action_dict
:
3276 console_type
= action_dict
["console"]
3278 if console_type
is None or console_type
== "novnc":
3279 console_dict
= server
.get_vnc_console("novnc")
3280 elif console_type
== "xvpvnc":
3281 console_dict
= server
.get_vnc_console(console_type
)
3282 elif console_type
== "rdp-html5":
3283 console_dict
= server
.get_rdp_console(console_type
)
3284 elif console_type
== "spice-html5":
3285 console_dict
= server
.get_spice_console(console_type
)
3287 raise vimconn
.VimConnException(
3288 "console type '{}' not allowed".format(console_type
),
3289 http_code
=vimconn
.HTTP_Bad_Request
,
3293 console_url
= console_dict
["console"]["url"]
3295 protocol_index
= console_url
.find("//")
3297 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3300 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3305 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3306 raise vimconn
.VimConnException(
3307 "Unexpected response from VIM " + str(console_dict
)
3311 "protocol": console_url
[0:protocol_index
],
3312 "server": console_url
[protocol_index
+ 2 : port_index
],
3313 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3314 "suffix": console_url
[suffix_index
+ 1 :],
3317 return console_dict2
3319 raise vimconn
.VimConnException(
3320 "Unexpected response from VIM " + str(console_dict
)
3325 ksExceptions
.ClientException
,
3326 nvExceptions
.ClientException
,
3327 nvExceptions
.NotFound
,
3330 self
._format
_exception
(e
)
3331 # TODO insert exception vimconn.HTTP_Unauthorized
3333 # ###### VIO Specific Changes #########
3334 def _generate_vlanID(self
):
3336 Method to get unused vlanID
3344 networks
= self
.get_network_list()
3346 for net
in networks
:
3347 if net
.get("provider:segmentation_id"):
3348 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3350 used_vlanIDs
= set(usedVlanIDs
)
3352 # find unused VLAN ID
3353 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3355 start_vlanid
, end_vlanid
= map(
3356 int, vlanID_range
.replace(" ", "").split("-")
3359 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3360 if vlanID
not in used_vlanIDs
:
3362 except Exception as exp
:
3363 raise vimconn
.VimConnException(
3364 "Exception {} occurred while generating VLAN ID.".format(exp
)
3367 raise vimconn
.VimConnConflictException(
3368 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3369 self
.config
.get("dataplane_net_vlan_range")
3373 def _generate_multisegment_vlanID(self
):
3375 Method to get unused vlanID
3383 networks
= self
.get_network_list()
3384 for net
in networks
:
3385 if net
.get("provider:network_type") == "vlan" and net
.get(
3386 "provider:segmentation_id"
3388 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3389 elif net
.get("segments"):
3390 for segment
in net
.get("segments"):
3391 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3392 "provider:segmentation_id"
3394 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3396 used_vlanIDs
= set(usedVlanIDs
)
3398 # find unused VLAN ID
3399 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3401 start_vlanid
, end_vlanid
= map(
3402 int, vlanID_range
.replace(" ", "").split("-")
3405 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3406 if vlanID
not in used_vlanIDs
:
3408 except Exception as exp
:
3409 raise vimconn
.VimConnException(
3410 "Exception {} occurred while generating VLAN ID.".format(exp
)
3413 raise vimconn
.VimConnConflictException(
3414 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3415 self
.config
.get("multisegment_vlan_range")
3419 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3421 Method to validate user given vlanID ranges
3425 for vlanID_range
in input_vlan_range
:
3426 vlan_range
= vlanID_range
.replace(" ", "")
3428 vlanID_pattern
= r
"(\d)*-(\d)*$"
3429 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3431 raise vimconn
.VimConnConflictException(
3432 "Invalid VLAN range for {}: {}.You must provide "
3433 "'{}' in format [start_ID - end_ID].".format(
3434 text_vlan_range
, vlanID_range
, text_vlan_range
3438 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3439 if start_vlanid
<= 0:
3440 raise vimconn
.VimConnConflictException(
3441 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3442 "networks valid IDs are 1 to 4094 ".format(
3443 text_vlan_range
, vlanID_range
3447 if end_vlanid
> 4094:
3448 raise vimconn
.VimConnConflictException(
3449 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3450 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3451 text_vlan_range
, vlanID_range
3455 if start_vlanid
> end_vlanid
:
3456 raise vimconn
.VimConnConflictException(
3457 "Invalid VLAN range for {}: {}. You must provide '{}'"
3458 " in format start_ID - end_ID and start_ID < end_ID ".format(
3459 text_vlan_range
, vlanID_range
, text_vlan_range
3463 def delete_user(self
, user_id
):
3464 """Delete a user from openstack VIM
3465 Returns the user identifier"""
3467 print("osconnector: Deleting a user from VIM")
3470 self
._reload
_connection
()
3471 self
.keystone
.users
.delete(user_id
)
3474 except ksExceptions
.ConnectionError
as e
:
3475 error_value
= -vimconn
.HTTP_Bad_Request
3479 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3481 except ksExceptions
.NotFound
as e
:
3482 error_value
= -vimconn
.HTTP_Not_Found
3486 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3488 except ksExceptions
.ClientException
as e
: # TODO remove
3489 error_value
= -vimconn
.HTTP_Bad_Request
3493 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3496 # TODO insert exception vimconn.HTTP_Unauthorized
3497 # if reaching here is because an exception
3498 self
.logger
.debug("delete_tenant " + error_text
)
3500 return error_value
, error_text
3502 def get_hosts_info(self
):
3503 """Get the information of deployed hosts
3504 Returns the hosts content"""
3506 print("osconnector: Getting Host info from VIM")
3510 self
._reload
_connection
()
3511 hypervisors
= self
.nova
.hypervisors
.list()
3513 for hype
in hypervisors
:
3514 h_list
.append(hype
.to_dict())
3516 return 1, {"hosts": h_list
}
3517 except nvExceptions
.NotFound
as e
:
3518 error_value
= -vimconn
.HTTP_Not_Found
3519 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3520 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3521 error_value
= -vimconn
.HTTP_Bad_Request
3525 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3528 # TODO insert exception vimconn.HTTP_Unauthorized
3529 # if reaching here is because an exception
3530 self
.logger
.debug("get_hosts_info " + error_text
)
3532 return error_value
, error_text
3534 def get_hosts(self
, vim_tenant
):
3535 """Get the hosts and deployed instances
3536 Returns the hosts content"""
3537 r
, hype_dict
= self
.get_hosts_info()
3542 hypervisors
= hype_dict
["hosts"]
3545 servers
= self
.nova
.servers
.list()
3546 for hype
in hypervisors
:
3547 for server
in servers
:
3549 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3550 == hype
["hypervisor_hostname"]
3553 hype
["vm"].append(server
.id)
3555 hype
["vm"] = [server
.id]
3558 except nvExceptions
.NotFound
as e
:
3559 error_value
= -vimconn
.HTTP_Not_Found
3560 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3561 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3562 error_value
= -vimconn
.HTTP_Bad_Request
3566 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3569 # TODO insert exception vimconn.HTTP_Unauthorized
3570 # if reaching here is because an exception
3571 self
.logger
.debug("get_hosts " + error_text
)
3573 return error_value
, error_text
3575 def new_classification(self
, name
, ctype
, definition
):
3577 "Adding a new (Traffic) Classification to VIM, named %s", name
3582 self
._reload
_connection
()
3584 if ctype
not in supportedClassificationTypes
:
3585 raise vimconn
.VimConnNotSupportedException(
3586 "OpenStack VIM connector does not support provided "
3587 "Classification Type {}, supported ones are: {}".format(
3588 ctype
, supportedClassificationTypes
3592 if not self
._validate
_classification
(ctype
, definition
):
3593 raise vimconn
.VimConnException(
3594 "Incorrect Classification definition for the type specified."
3597 classification_dict
= definition
3598 classification_dict
["name"] = name
3599 new_class
= self
.neutron
.create_sfc_flow_classifier(
3600 {"flow_classifier": classification_dict
}
3603 return new_class
["flow_classifier"]["id"]
3605 neExceptions
.ConnectionFailed
,
3606 ksExceptions
.ClientException
,
3607 neExceptions
.NeutronException
,
3610 self
.logger
.error("Creation of Classification failed.")
3611 self
._format
_exception
(e
)
3613 def get_classification(self
, class_id
):
3614 self
.logger
.debug(" Getting Classification %s from VIM", class_id
)
3615 filter_dict
= {"id": class_id
}
3616 class_list
= self
.get_classification_list(filter_dict
)
3618 if len(class_list
) == 0:
3619 raise vimconn
.VimConnNotFoundException(
3620 "Classification '{}' not found".format(class_id
)
3622 elif len(class_list
) > 1:
3623 raise vimconn
.VimConnConflictException(
3624 "Found more than one Classification with this criteria"
3627 classification
= class_list
[0]
3629 return classification
3631 def get_classification_list(self
, filter_dict
={}):
3633 "Getting Classifications from VIM filter: '%s'", str(filter_dict
)
3637 filter_dict_os
= filter_dict
.copy()
3638 self
._reload
_connection
()
3640 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3641 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3643 classification_dict
= self
.neutron
.list_sfc_flow_classifiers(
3646 classification_list
= classification_dict
["flow_classifiers"]
3647 self
.__classification
_os
2mano
(classification_list
)
3649 return classification_list
3651 neExceptions
.ConnectionFailed
,
3652 ksExceptions
.ClientException
,
3653 neExceptions
.NeutronException
,
3656 self
._format
_exception
(e
)
3658 def delete_classification(self
, class_id
):
3659 self
.logger
.debug("Deleting Classification '%s' from VIM", class_id
)
3662 self
._reload
_connection
()
3663 self
.neutron
.delete_sfc_flow_classifier(class_id
)
3667 neExceptions
.ConnectionFailed
,
3668 neExceptions
.NeutronException
,
3669 ksExceptions
.ClientException
,
3670 neExceptions
.NeutronException
,
3673 self
._format
_exception
(e
)
3675 def new_sfi(self
, name
, ingress_ports
, egress_ports
, sfc_encap
=True):
3677 "Adding a new Service Function Instance to VIM, named '%s'", name
3682 self
._reload
_connection
()
3688 if len(ingress_ports
) != 1:
3689 raise vimconn
.VimConnNotSupportedException(
3690 "OpenStack VIM connector can only have 1 ingress port per SFI"
3693 if len(egress_ports
) != 1:
3694 raise vimconn
.VimConnNotSupportedException(
3695 "OpenStack VIM connector can only have 1 egress port per SFI"
3700 "ingress": ingress_ports
[0],
3701 "egress": egress_ports
[0],
3702 "service_function_parameters": {"correlation": correlation
},
3704 new_sfi
= self
.neutron
.create_sfc_port_pair({"port_pair": sfi_dict
})
3706 return new_sfi
["port_pair"]["id"]
3708 neExceptions
.ConnectionFailed
,
3709 ksExceptions
.ClientException
,
3710 neExceptions
.NeutronException
,
3715 self
.neutron
.delete_sfc_port_pair(new_sfi
["port_pair"]["id"])
3718 "Creation of Service Function Instance failed, with "
3719 "subsequent deletion failure as well."
3722 self
._format
_exception
(e
)
3724 def get_sfi(self
, sfi_id
):
3725 self
.logger
.debug("Getting Service Function Instance %s from VIM", sfi_id
)
3726 filter_dict
= {"id": sfi_id
}
3727 sfi_list
= self
.get_sfi_list(filter_dict
)
3729 if len(sfi_list
) == 0:
3730 raise vimconn
.VimConnNotFoundException(
3731 "Service Function Instance '{}' not found".format(sfi_id
)
3733 elif len(sfi_list
) > 1:
3734 raise vimconn
.VimConnConflictException(
3735 "Found more than one Service Function Instance with this criteria"
3742 def get_sfi_list(self
, filter_dict
={}):
3744 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict
)
3748 self
._reload
_connection
()
3749 filter_dict_os
= filter_dict
.copy()
3751 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3752 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3754 sfi_dict
= self
.neutron
.list_sfc_port_pairs(**filter_dict_os
)
3755 sfi_list
= sfi_dict
["port_pairs"]
3756 self
.__sfi
_os
2mano
(sfi_list
)
3760 neExceptions
.ConnectionFailed
,
3761 ksExceptions
.ClientException
,
3762 neExceptions
.NeutronException
,
3765 self
._format
_exception
(e
)
3767 def delete_sfi(self
, sfi_id
):
3768 self
.logger
.debug("Deleting Service Function Instance '%s' from VIM", sfi_id
)
3771 self
._reload
_connection
()
3772 self
.neutron
.delete_sfc_port_pair(sfi_id
)
3776 neExceptions
.ConnectionFailed
,
3777 neExceptions
.NeutronException
,
3778 ksExceptions
.ClientException
,
3779 neExceptions
.NeutronException
,
3782 self
._format
_exception
(e
)
3784 def new_sf(self
, name
, sfis
, sfc_encap
=True):
3785 self
.logger
.debug("Adding a new Service Function to VIM, named '%s'", name
)
3789 self
._reload
_connection
()
3790 # correlation = None
3792 # correlation = "nsh"
3794 for instance
in sfis
:
3795 sfi
= self
.get_sfi(instance
)
3797 if sfi
.get("sfc_encap") != sfc_encap
:
3798 raise vimconn
.VimConnNotSupportedException(
3799 "OpenStack VIM connector requires all SFIs of the "
3800 "same SF to share the same SFC Encapsulation"
3803 sf_dict
= {"name": name
, "port_pairs": sfis
}
3804 new_sf
= self
.neutron
.create_sfc_port_pair_group(
3805 {"port_pair_group": sf_dict
}
3808 return new_sf
["port_pair_group"]["id"]
3810 neExceptions
.ConnectionFailed
,
3811 ksExceptions
.ClientException
,
3812 neExceptions
.NeutronException
,
3817 self
.neutron
.delete_sfc_port_pair_group(
3818 new_sf
["port_pair_group"]["id"]
3822 "Creation of Service Function failed, with "
3823 "subsequent deletion failure as well."
3826 self
._format
_exception
(e
)
3828 def get_sf(self
, sf_id
):
3829 self
.logger
.debug("Getting Service Function %s from VIM", sf_id
)
3830 filter_dict
= {"id": sf_id
}
3831 sf_list
= self
.get_sf_list(filter_dict
)
3833 if len(sf_list
) == 0:
3834 raise vimconn
.VimConnNotFoundException(
3835 "Service Function '{}' not found".format(sf_id
)
3837 elif len(sf_list
) > 1:
3838 raise vimconn
.VimConnConflictException(
3839 "Found more than one Service Function with this criteria"
3846 def get_sf_list(self
, filter_dict
={}):
3848 "Getting Service Function from VIM filter: '%s'", str(filter_dict
)
3852 self
._reload
_connection
()
3853 filter_dict_os
= filter_dict
.copy()
3855 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3856 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3858 sf_dict
= self
.neutron
.list_sfc_port_pair_groups(**filter_dict_os
)
3859 sf_list
= sf_dict
["port_pair_groups"]
3860 self
.__sf
_os
2mano
(sf_list
)
3864 neExceptions
.ConnectionFailed
,
3865 ksExceptions
.ClientException
,
3866 neExceptions
.NeutronException
,
3869 self
._format
_exception
(e
)
3871 def delete_sf(self
, sf_id
):
3872 self
.logger
.debug("Deleting Service Function '%s' from VIM", sf_id
)
3875 self
._reload
_connection
()
3876 self
.neutron
.delete_sfc_port_pair_group(sf_id
)
3880 neExceptions
.ConnectionFailed
,
3881 neExceptions
.NeutronException
,
3882 ksExceptions
.ClientException
,
3883 neExceptions
.NeutronException
,
3886 self
._format
_exception
(e
)
3888 def new_sfp(self
, name
, classifications
, sfs
, sfc_encap
=True, spi
=None):
3889 self
.logger
.debug("Adding a new Service Function Path to VIM, named '%s'", name
)
3893 self
._reload
_connection
()
3894 # In networking-sfc the MPLS encapsulation is legacy
3895 # should be used when no full SFC Encapsulation is intended
3896 correlation
= "mpls"
3903 "flow_classifiers": classifications
,
3904 "port_pair_groups": sfs
,
3905 "chain_parameters": {"correlation": correlation
},
3909 sfp_dict
["chain_id"] = spi
3911 new_sfp
= self
.neutron
.create_sfc_port_chain({"port_chain": sfp_dict
})
3913 return new_sfp
["port_chain"]["id"]
3915 neExceptions
.ConnectionFailed
,
3916 ksExceptions
.ClientException
,
3917 neExceptions
.NeutronException
,
3922 self
.neutron
.delete_sfc_port_chain(new_sfp
["port_chain"]["id"])
3925 "Creation of Service Function Path failed, with "
3926 "subsequent deletion failure as well."
3929 self
._format
_exception
(e
)
3931 def get_sfp(self
, sfp_id
):
3932 self
.logger
.debug(" Getting Service Function Path %s from VIM", sfp_id
)
3934 filter_dict
= {"id": sfp_id
}
3935 sfp_list
= self
.get_sfp_list(filter_dict
)
3937 if len(sfp_list
) == 0:
3938 raise vimconn
.VimConnNotFoundException(
3939 "Service Function Path '{}' not found".format(sfp_id
)
3941 elif len(sfp_list
) > 1:
3942 raise vimconn
.VimConnConflictException(
3943 "Found more than one Service Function Path with this criteria"
3950 def get_sfp_list(self
, filter_dict
={}):
3952 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict
)
3956 self
._reload
_connection
()
3957 filter_dict_os
= filter_dict
.copy()
3959 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3960 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3962 sfp_dict
= self
.neutron
.list_sfc_port_chains(**filter_dict_os
)
3963 sfp_list
= sfp_dict
["port_chains"]
3964 self
.__sfp
_os
2mano
(sfp_list
)
3968 neExceptions
.ConnectionFailed
,
3969 ksExceptions
.ClientException
,
3970 neExceptions
.NeutronException
,
3973 self
._format
_exception
(e
)
3975 def delete_sfp(self
, sfp_id
):
3976 self
.logger
.debug("Deleting Service Function Path '%s' from VIM", sfp_id
)
3979 self
._reload
_connection
()
3980 self
.neutron
.delete_sfc_port_chain(sfp_id
)
3984 neExceptions
.ConnectionFailed
,
3985 neExceptions
.NeutronException
,
3986 ksExceptions
.ClientException
,
3987 neExceptions
.NeutronException
,
3990 self
._format
_exception
(e
)
3992 def refresh_sfps_status(self
, sfp_list
):
3993 """Get the status of the service function path
3994 Params: the list of sfp identifiers
3995 Returns a dictionary with:
3996 vm_id: #VIM id of this service function path
3997 status: #Mandatory. Text with one of:
3998 # DELETED (not found at vim)
3999 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4000 # OTHER (Vim reported other status not understood)
4001 # ERROR (VIM indicates an ERROR status)
4003 # CREATING (on building process)
4004 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4005 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
4009 "refresh_sfps status: Getting tenant SFP information from VIM"
4012 for sfp_id
in sfp_list
:
4016 sfp_vim
= self
.get_sfp(sfp_id
)
4019 sfp
["status"] = vmStatus2manoFormat
["ACTIVE"]
4021 sfp
["status"] = "OTHER"
4022 sfp
["error_msg"] = "VIM status reported " + sfp
["status"]
4024 sfp
["vim_info"] = self
.serialize(sfp_vim
)
4026 if sfp_vim
.get("fault"):
4027 sfp
["error_msg"] = str(sfp_vim
["fault"])
4028 except vimconn
.VimConnNotFoundException
as e
:
4029 self
.logger
.error("Exception getting sfp status: %s", str(e
))
4030 sfp
["status"] = "DELETED"
4031 sfp
["error_msg"] = str(e
)
4032 except vimconn
.VimConnException
as e
:
4033 self
.logger
.error("Exception getting sfp status: %s", str(e
))
4034 sfp
["status"] = "VIM_ERROR"
4035 sfp
["error_msg"] = str(e
)
4037 sfp_dict
[sfp_id
] = sfp
4041 def refresh_sfis_status(self
, sfi_list
):
4042 """Get the status of the service function instances
4043 Params: the list of sfi identifiers
4044 Returns a dictionary with:
4045 vm_id: #VIM id of this service function instance
4046 status: #Mandatory. Text with one of:
4047 # DELETED (not found at vim)
4048 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4049 # OTHER (Vim reported other status not understood)
4050 # ERROR (VIM indicates an ERROR status)
4052 # CREATING (on building process)
4053 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4054 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4058 "refresh_sfis status: Getting tenant sfi information from VIM"
4061 for sfi_id
in sfi_list
:
4065 sfi_vim
= self
.get_sfi(sfi_id
)
4068 sfi
["status"] = vmStatus2manoFormat
["ACTIVE"]
4070 sfi
["status"] = "OTHER"
4071 sfi
["error_msg"] = "VIM status reported " + sfi
["status"]
4073 sfi
["vim_info"] = self
.serialize(sfi_vim
)
4075 if sfi_vim
.get("fault"):
4076 sfi
["error_msg"] = str(sfi_vim
["fault"])
4077 except vimconn
.VimConnNotFoundException
as e
:
4078 self
.logger
.error("Exception getting sfi status: %s", str(e
))
4079 sfi
["status"] = "DELETED"
4080 sfi
["error_msg"] = str(e
)
4081 except vimconn
.VimConnException
as e
:
4082 self
.logger
.error("Exception getting sfi status: %s", str(e
))
4083 sfi
["status"] = "VIM_ERROR"
4084 sfi
["error_msg"] = str(e
)
4086 sfi_dict
[sfi_id
] = sfi
4090 def refresh_sfs_status(self
, sf_list
):
4091 """Get the status of the service functions
4092 Params: the list of sf identifiers
4093 Returns a dictionary with:
4094 vm_id: #VIM id of this service function
4095 status: #Mandatory. Text with one of:
4096 # DELETED (not found at vim)
4097 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4098 # OTHER (Vim reported other status not understood)
4099 # ERROR (VIM indicates an ERROR status)
4101 # CREATING (on building process)
4102 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4103 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4106 self
.logger
.debug("refresh_sfs status: Getting tenant sf information from VIM")
4108 for sf_id
in sf_list
:
4112 sf_vim
= self
.get_sf(sf_id
)
4115 sf
["status"] = vmStatus2manoFormat
["ACTIVE"]
4117 sf
["status"] = "OTHER"
4118 sf
["error_msg"] = "VIM status reported " + sf_vim
["status"]
4120 sf
["vim_info"] = self
.serialize(sf_vim
)
4122 if sf_vim
.get("fault"):
4123 sf
["error_msg"] = str(sf_vim
["fault"])
4124 except vimconn
.VimConnNotFoundException
as e
:
4125 self
.logger
.error("Exception getting sf status: %s", str(e
))
4126 sf
["status"] = "DELETED"
4127 sf
["error_msg"] = str(e
)
4128 except vimconn
.VimConnException
as e
:
4129 self
.logger
.error("Exception getting sf status: %s", str(e
))
4130 sf
["status"] = "VIM_ERROR"
4131 sf
["error_msg"] = str(e
)
4137 def refresh_classifications_status(self
, classification_list
):
4138 """Get the status of the classifications
4139 Params: the list of classification identifiers
4140 Returns a dictionary with:
4141 vm_id: #VIM id of this classifier
4142 status: #Mandatory. Text with one of:
4143 # DELETED (not found at vim)
4144 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4145 # OTHER (Vim reported other status not understood)
4146 # ERROR (VIM indicates an ERROR status)
4148 # CREATING (on building process)
4149 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4150 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4152 classification_dict
= {}
4154 "refresh_classifications status: Getting tenant classification information from VIM"
4157 for classification_id
in classification_list
:
4161 classification_vim
= self
.get_classification(classification_id
)
4163 if classification_vim
:
4164 classification
["status"] = vmStatus2manoFormat
["ACTIVE"]
4166 classification
["status"] = "OTHER"
4167 classification
["error_msg"] = (
4168 "VIM status reported " + classification
["status"]
4171 classification
["vim_info"] = self
.serialize(classification_vim
)
4173 if classification_vim
.get("fault"):
4174 classification
["error_msg"] = str(classification_vim
["fault"])
4175 except vimconn
.VimConnNotFoundException
as e
:
4176 self
.logger
.error("Exception getting classification status: %s", str(e
))
4177 classification
["status"] = "DELETED"
4178 classification
["error_msg"] = str(e
)
4179 except vimconn
.VimConnException
as e
:
4180 self
.logger
.error("Exception getting classification status: %s", str(e
))
4181 classification
["status"] = "VIM_ERROR"
4182 classification
["error_msg"] = str(e
)
4184 classification_dict
[classification_id
] = classification
4186 return classification_dict
4188 def new_affinity_group(self
, affinity_group_data
):
4189 """Adds a server group to VIM
4190 affinity_group_data contains a dictionary with information, keys:
4191 name: name in VIM for the server group
4192 type: affinity or anti-affinity
4193 scope: Only nfvi-node allowed
4194 Returns the server group identifier"""
4195 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
4198 name
= affinity_group_data
["name"]
4199 policy
= affinity_group_data
["type"]
4201 self
._reload
_connection
()
4202 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
4204 return new_server_group
.id
4206 ksExceptions
.ClientException
,
4207 nvExceptions
.ClientException
,
4211 self
._format
_exception
(e
)
4213 def get_affinity_group(self
, affinity_group_id
):
4214 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4215 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
4217 self
._reload
_connection
()
4218 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
4220 return server_group
.to_dict()
4222 nvExceptions
.NotFound
,
4223 nvExceptions
.ClientException
,
4224 ksExceptions
.ClientException
,
4227 self
._format
_exception
(e
)
4229 def delete_affinity_group(self
, affinity_group_id
):
4230 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4231 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
4233 self
._reload
_connection
()
4234 self
.nova
.server_groups
.delete(affinity_group_id
)
4236 return affinity_group_id
4238 nvExceptions
.NotFound
,
4239 ksExceptions
.ClientException
,
4240 nvExceptions
.ClientException
,
4243 self
._format
_exception
(e
)
4245 def get_vdu_state(self
, vm_id
):
4247 Getting the state of a vdu
4249 vm_id: ID of an instance
4251 self
.logger
.debug("Getting the status of VM")
4252 self
.logger
.debug("VIM VM ID %s", vm_id
)
4253 self
._reload
_connection
()
4254 server
= self
.nova
.servers
.find(id=vm_id
)
4255 server_dict
= server
.to_dict()
4257 server_dict
["status"],
4258 server_dict
["flavor"]["id"],
4259 server_dict
["OS-EXT-SRV-ATTR:host"],
4260 server_dict
["OS-EXT-AZ:availability_zone"],
4262 self
.logger
.debug("vdu_data %s", vdu_data
)
4265 def check_compute_availability(self
, host
, server_flavor_details
):
4266 self
._reload
_connection
()
4267 hypervisor_search
= self
.nova
.hypervisors
.search(
4268 hypervisor_match
=host
, servers
=True
4270 for hypervisor
in hypervisor_search
:
4271 hypervisor_id
= hypervisor
.to_dict()["id"]
4272 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
4273 hypervisor_dict
= hypervisor_details
.to_dict()
4274 hypervisor_temp
= json
.dumps(hypervisor_dict
)
4275 hypervisor_json
= json
.loads(hypervisor_temp
)
4276 resources_available
= [
4277 hypervisor_json
["free_ram_mb"],
4278 hypervisor_json
["disk_available_least"],
4279 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
4281 compute_available
= all(
4282 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
4284 if compute_available
:
4287 def check_availability_zone(
4288 self
, old_az
, server_flavor_details
, old_host
, host
=None
4290 self
._reload
_connection
()
4291 az_check
= {"zone_check": False, "compute_availability": None}
4292 aggregates_list
= self
.nova
.aggregates
.list()
4293 for aggregate
in aggregates_list
:
4294 aggregate_details
= aggregate
.to_dict()
4295 aggregate_temp
= json
.dumps(aggregate_details
)
4296 aggregate_json
= json
.loads(aggregate_temp
)
4297 if aggregate_json
["availability_zone"] == old_az
:
4298 hosts_list
= aggregate_json
["hosts"]
4299 if host
is not None:
4300 if host
in hosts_list
:
4301 az_check
["zone_check"] = True
4302 available_compute_id
= self
.check_compute_availability(
4303 host
, server_flavor_details
4305 if available_compute_id
is not None:
4306 az_check
["compute_availability"] = available_compute_id
4308 for check_host
in hosts_list
:
4309 if check_host
!= old_host
:
4310 available_compute_id
= self
.check_compute_availability(
4311 check_host
, server_flavor_details
4313 if available_compute_id
is not None:
4314 az_check
["zone_check"] = True
4315 az_check
["compute_availability"] = available_compute_id
4318 az_check
["zone_check"] = True
4321 def migrate_instance(self
, vm_id
, compute_host
=None):
4325 vm_id: ID of an instance
4326 compute_host: Host to migrate the vdu to
4328 self
._reload
_connection
()
4330 instance_state
= self
.get_vdu_state(vm_id
)
4331 server_flavor_id
= instance_state
[1]
4332 server_hypervisor_name
= instance_state
[2]
4333 server_availability_zone
= instance_state
[3]
4335 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
4336 server_flavor_details
= [
4337 server_flavor
["ram"],
4338 server_flavor
["disk"],
4339 server_flavor
["vcpus"],
4341 if compute_host
== server_hypervisor_name
:
4342 raise vimconn
.VimConnException(
4343 "Unable to migrate instance '{}' to the same host '{}'".format(
4346 http_code
=vimconn
.HTTP_Bad_Request
,
4348 az_status
= self
.check_availability_zone(
4349 server_availability_zone
,
4350 server_flavor_details
,
4351 server_hypervisor_name
,
4354 availability_zone_check
= az_status
["zone_check"]
4355 available_compute_id
= az_status
.get("compute_availability")
4357 if availability_zone_check
is False:
4358 raise vimconn
.VimConnException(
4359 "Unable to migrate instance '{}' to a different availability zone".format(
4362 http_code
=vimconn
.HTTP_Bad_Request
,
4364 if available_compute_id
is not None:
4365 self
.nova
.servers
.live_migrate(
4367 host
=available_compute_id
,
4368 block_migration
=True,
4369 disk_over_commit
=False,
4372 changed_compute_host
= ""
4373 if state
== "MIGRATING":
4374 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4375 changed_compute_host
= self
.get_vdu_state(vm_id
)[2]
4376 if vm_state
and changed_compute_host
== available_compute_id
:
4378 "Instance '{}' migrated to the new compute host '{}'".format(
4379 vm_id
, changed_compute_host
4382 return state
, available_compute_id
4384 raise vimconn
.VimConnException(
4385 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4386 vm_id
, available_compute_id
4388 http_code
=vimconn
.HTTP_Bad_Request
,
4391 raise vimconn
.VimConnException(
4392 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4393 available_compute_id
4395 http_code
=vimconn
.HTTP_Bad_Request
,
4398 nvExceptions
.BadRequest
,
4399 nvExceptions
.ClientException
,
4400 nvExceptions
.NotFound
,
4402 self
._format
_exception
(e
)
4404 def resize_instance(self
, vm_id
, new_flavor_id
):
4406 For resizing the vm based on the given
4409 vm_id : ID of an instance
4410 new_flavor_id : Flavor id to be resized
4411 Return the status of a resized instance
4413 self
._reload
_connection
()
4414 self
.logger
.debug("resize the flavor of an instance")
4415 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
4416 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
4417 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
4419 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
4420 if old_flavor_disk
> new_flavor_disk
:
4421 raise nvExceptions
.BadRequest(
4423 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
4426 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
4427 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
4429 instance_resized_status
= self
.confirm_resize(vm_id
)
4430 return instance_resized_status
4432 raise nvExceptions
.BadRequest(
4434 message
="Cannot 'resize' vm_state is in ERROR",
4438 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4439 raise nvExceptions
.BadRequest(
4441 message
="Cannot 'resize' instance while it is in vm_state resized",
4444 nvExceptions
.BadRequest
,
4445 nvExceptions
.ClientException
,
4446 nvExceptions
.NotFound
,
4448 self
._format
_exception
(e
)
4450 def confirm_resize(self
, vm_id
):
4452 Confirm the resize of an instance
4454 vm_id: ID of an instance
4456 self
._reload
_connection
()
4457 self
.nova
.servers
.confirm_resize(server
=vm_id
)
4458 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
4459 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4460 instance_status
= self
.get_vdu_state(vm_id
)[0]
4461 return instance_status