1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 from glanceclient
import client
as glClient
45 import glanceclient
.exc
as gl1Exceptions
46 from keystoneauth1
import session
47 from keystoneauth1
.identity
import v2
, v3
48 import keystoneclient
.exceptions
as ksExceptions
49 import keystoneclient
.v2_0
.client
as ksClient_v2
50 import keystoneclient
.v3
.client
as ksClient_v3
52 from neutronclient
.common
import exceptions
as neExceptions
53 from neutronclient
.neutron
import client
as neClient
54 from novaclient
import client
as nClient
, exceptions
as nvExceptions
55 from osm_ro_plugin
import vimconn
56 from requests
.exceptions
import ConnectionError
59 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__
= "$22-sep-2017 23:59:59$"
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat
= {
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
72 netStatus2manoFormat
= {
75 "INACTIVE": "INACTIVE",
81 supportedClassificationTypes
= ["legacy_flow_classifier"]
83 # global var to have a timeout creating and deleting volumes
88 class SafeDumper(yaml
.SafeDumper
):
89 def represent_data(self
, data
):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data
, dict) and data
.__class
__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data
= dict(data
.items())
96 return super(SafeDumper
, self
).represent_data(data
)
99 class vimconnector(vimconn
.VimConnector
):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
118 api_version
= config
.get("APIversion")
120 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn
.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
126 vim_type
= config
.get("vim_type")
128 if vim_type
and vim_type
not in ("vio", "VIO"):
129 raise vimconn
.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
134 if config
.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self
._validate
_vlan
_ranges
(
137 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
140 if config
.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self
._validate
_vlan
_ranges
(
143 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
146 vimconn
.VimConnector
.__init
__(
160 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
161 raise vimconn
.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
167 if self
.config
.get("insecure"):
170 if self
.config
.get("ca_cert"):
171 self
.verify
= self
.config
.get("ca_cert")
174 raise TypeError("url param can not be NoneType")
176 self
.persistent_info
= persistent_info
177 self
.availability_zone
= persistent_info
.get("availability_zone", None)
178 self
.session
= persistent_info
.get("session", {"reload_client": True})
179 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
180 self
.nova
= self
.session
.get("nova")
181 self
.neutron
= self
.session
.get("neutron")
182 self
.cinder
= self
.session
.get("cinder")
183 self
.glance
= self
.session
.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self
.keystone
= self
.session
.get("keystone")
186 self
.api_version3
= self
.session
.get("api_version3")
187 self
.vim_type
= self
.config
.get("vim_type")
190 self
.vim_type
= self
.vim_type
.upper()
192 if self
.config
.get("use_internal_endpoint"):
193 self
.endpoint_type
= "internalURL"
195 self
.endpoint_type
= None
197 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
198 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
199 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
200 self
.logger
= logging
.getLogger("ro.vim.openstack")
202 # allow security_groups to be a list or a single string
203 if isinstance(self
.config
.get("security_groups"), str):
204 self
.config
["security_groups"] = [self
.config
["security_groups"]]
206 self
.security_groups_id
= None
208 # ###### VIO Specific Changes #########
209 if self
.vim_type
== "VIO":
210 self
.logger
= logging
.getLogger("ro.vim.vio")
213 self
.logger
.setLevel(getattr(logging
, log_level
))
215 def __getitem__(self
, index
):
216 """Get individuals parameters.
218 if index
== "project_domain_id":
219 return self
.config
.get("project_domain_id")
220 elif index
== "user_domain_id":
221 return self
.config
.get("user_domain_id")
223 return vimconn
.VimConnector
.__getitem
__(self
, index
)
225 def __setitem__(self
, index
, value
):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
228 if index
== "project_domain_id":
229 self
.config
["project_domain_id"] = value
230 elif index
== "user_domain_id":
231 self
.config
["user_domain_id"] = value
233 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
235 self
.session
["reload_client"] = True
237 def serialize(self
, value
):
238 """Serialization of python basic types.
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
244 if isinstance(value
, str):
249 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
251 except yaml
.representer
.RepresenterError
:
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
260 def _reload_connection(self
):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self
.session
["reload_client"]:
266 if self
.config
.get("APIversion"):
267 self
.api_version3
= (
268 self
.config
["APIversion"] == "v3.3"
269 or self
.config
["APIversion"] == "3"
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
276 self
.session
["api_version3"] = self
.api_version3
278 if self
.api_version3
:
279 if self
.config
.get("project_domain_id") or self
.config
.get(
280 "project_domain_name"
282 project_domain_id_default
= None
284 project_domain_id_default
= "default"
286 if self
.config
.get("user_domain_id") or self
.config
.get(
289 user_domain_id_default
= None
291 user_domain_id_default
= "default"
295 password
=self
.passwd
,
296 project_name
=self
.tenant_name
,
297 project_id
=self
.tenant_id
,
298 project_domain_id
=self
.config
.get(
299 "project_domain_id", project_domain_id_default
301 user_domain_id
=self
.config
.get(
302 "user_domain_id", user_domain_id_default
304 project_domain_name
=self
.config
.get("project_domain_name"),
305 user_domain_name
=self
.config
.get("user_domain_name"),
311 password
=self
.passwd
,
312 tenant_name
=self
.tenant_name
,
313 tenant_id
=self
.tenant_id
,
316 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name
= self
.config
.get("region_name")
321 if self
.api_version3
:
322 self
.keystone
= ksClient_v3
.Client(
324 endpoint_type
=self
.endpoint_type
,
325 region_name
=region_name
,
328 self
.keystone
= ksClient_v2
.Client(
329 session
=sess
, endpoint_type
=self
.endpoint_type
332 self
.session
["keystone"] = self
.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version
= self
.config
.get("microversion")
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self
.nova
= self
.session
["nova"] = nClient
.Client(
349 endpoint_type
=self
.endpoint_type
,
350 region_name
=region_name
,
352 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
355 endpoint_type
=self
.endpoint_type
,
356 region_name
=region_name
,
358 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
361 endpoint_type
=self
.endpoint_type
,
362 region_name
=region_name
,
366 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
368 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
370 if self
.endpoint_type
== "internalURL":
371 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
372 glance_endpoint
= self
.keystone
.endpoints
.list(
373 glance_service_id
, interface
="internal"
376 glance_endpoint
= None
378 self
.glance
= self
.session
["glance"] = glClient
.Client(
379 2, session
=sess
, endpoint
=glance_endpoint
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self
.session
["reload_client"] = False
385 self
.persistent_info
["session"] = self
.session
386 # add availablity zone info inside self.persistent_info
387 self
._set
_availablity
_zones
()
388 self
.persistent_info
["availability_zone"] = self
.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self
.security_groups_id
= None
392 def __net_os2mano(self
, net_list_dict
):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict
) is dict:
396 net_list_
= (net_list_dict
,)
397 elif type(net_list_dict
) is list:
398 net_list_
= net_list_dict
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net
in net_list_
:
402 if net
.get("provider:network_type") == "vlan":
405 net
["type"] = "bridge"
407 def __classification_os2mano(self
, class_list_dict
):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
411 if isinstance(class_list_dict
, dict):
412 class_list_
= [class_list_dict
]
413 elif isinstance(class_list_dict
, list):
414 class_list_
= class_list_dict
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification
in class_list_
:
418 id = classification
.pop("id")
419 name
= classification
.pop("name")
420 description
= classification
.pop("description")
421 project_id
= classification
.pop("project_id")
422 tenant_id
= classification
.pop("tenant_id")
423 original_classification
= copy
.deepcopy(classification
)
424 classification
.clear()
425 classification
["ctype"] = "legacy_flow_classifier"
426 classification
["definition"] = original_classification
427 classification
["id"] = id
428 classification
["name"] = name
429 classification
["description"] = description
430 classification
["project_id"] = project_id
431 classification
["tenant_id"] = tenant_id
433 def __sfi_os2mano(self
, sfi_list_dict
):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
437 if isinstance(sfi_list_dict
, dict):
438 sfi_list_
= [sfi_list_dict
]
439 elif isinstance(sfi_list_dict
, list):
440 sfi_list_
= sfi_list_dict
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
444 for sfi
in sfi_list_
:
445 sfi
["ingress_ports"] = []
446 sfi
["egress_ports"] = []
448 if sfi
.get("ingress"):
449 sfi
["ingress_ports"].append(sfi
["ingress"])
451 if sfi
.get("egress"):
452 sfi
["egress_ports"].append(sfi
["egress"])
456 params
= sfi
.get("service_function_parameters")
460 correlation
= params
.get("correlation")
465 sfi
["sfc_encap"] = sfc_encap
466 del sfi
["service_function_parameters"]
468 def __sf_os2mano(self
, sf_list_dict
):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
472 if isinstance(sf_list_dict
, dict):
473 sf_list_
= [sf_list_dict
]
474 elif isinstance(sf_list_dict
, list):
475 sf_list_
= sf_list_dict
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
480 del sf
["port_pair_group_parameters"]
481 sf
["sfis"] = sf
["port_pairs"]
484 def __sfp_os2mano(self
, sfp_list_dict
):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
488 if isinstance(sfp_list_dict
, dict):
489 sfp_list_
= [sfp_list_dict
]
490 elif isinstance(sfp_list_dict
, list):
491 sfp_list_
= sfp_list_dict
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
495 for sfp
in sfp_list_
:
496 params
= sfp
.pop("chain_parameters")
500 correlation
= params
.get("correlation")
505 sfp
["sfc_encap"] = sfc_encap
506 sfp
["spi"] = sfp
.pop("chain_id")
507 sfp
["classifications"] = sfp
.pop("flow_classifiers")
508 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
510 # placeholder for now; read TODO note below
511 def _validate_classification(self
, type, definition
):
512 # only legacy_flow_classifier Type is supported at this point
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
519 def _format_exception(self
, exception
):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error
= str(exception
)
527 neExceptions
.NetworkNotFoundClient
,
528 nvExceptions
.NotFound
,
529 ksExceptions
.NotFound
,
530 gl1Exceptions
.HTTPNotFound
,
533 raise vimconn
.VimConnNotFoundException(
534 type(exception
).__name
__ + ": " + message_error
540 gl1Exceptions
.HTTPException
,
541 gl1Exceptions
.CommunicationError
,
543 ksExceptions
.ConnectionError
,
544 neExceptions
.ConnectionFailed
,
547 if type(exception
).__name
__ == "SSLError":
548 tip
= " (maybe option 'insecure' must be added to the VIM)"
550 raise vimconn
.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
557 nvExceptions
.BadRequest
,
558 ksExceptions
.BadRequest
,
561 raise vimconn
.VimConnException(
562 type(exception
).__name
__ + ": " + message_error
567 nvExceptions
.ClientException
,
568 ksExceptions
.ClientException
,
569 neExceptions
.NeutronException
,
572 raise vimconn
.VimConnUnexpectedResponse(
573 type(exception
).__name
__ + ": " + message_error
575 elif isinstance(exception
, nvExceptions
.Conflict
):
576 raise vimconn
.VimConnConflictException(
577 type(exception
).__name
__ + ": " + message_error
579 elif isinstance(exception
, vimconn
.VimConnException
):
582 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
584 raise vimconn
.VimConnConnectionException(
585 type(exception
).__name
__ + ": " + message_error
588 def _get_ids_from_name(self
):
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
593 # get tenant_id if only tenant_name is supplied
594 self
._reload
_connection
()
596 if not self
.my_tenant_id
:
597 raise vimconn
.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self
.tenant_name
, self
.tenant_id
603 if self
.config
.get("security_groups") and not self
.security_groups_id
:
604 # convert from name to id
605 neutron_sg_list
= self
.neutron
.list_security_groups(
606 tenant_id
=self
.my_tenant_id
609 self
.security_groups_id
= []
610 for sg
in self
.config
.get("security_groups"):
611 for neutron_sg
in neutron_sg_list
:
612 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
613 self
.security_groups_id
.append(neutron_sg
["id"])
616 self
.security_groups_id
= None
618 raise vimconn
.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg
)
622 def check_vim_connectivity(self
):
623 # just get network list to check connectivity and credentials
624 self
.get_network_list(filter_dict
={})
626 def get_tenant_list(self
, filter_dict
={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
634 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
637 self
._reload
_connection
()
639 if self
.api_version3
:
640 project_class_list
= self
.keystone
.projects
.list(
641 name
=filter_dict
.get("name")
644 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
648 for project
in project_class_list
:
649 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
652 project_list
.append(project
.to_dict())
656 ksExceptions
.ConnectionError
,
657 ksExceptions
.ClientException
,
660 self
._format
_exception
(e
)
662 def new_tenant(self
, tenant_name
, tenant_description
):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
667 self
._reload
_connection
()
669 if self
.api_version3
:
670 project
= self
.keystone
.projects
.create(
672 self
.config
.get("project_domain_id", "default"),
673 description
=tenant_description
,
677 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
681 ksExceptions
.ConnectionError
,
682 ksExceptions
.ClientException
,
683 ksExceptions
.BadRequest
,
686 self
._format
_exception
(e
)
688 def delete_tenant(self
, tenant_id
):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
693 self
._reload
_connection
()
695 if self
.api_version3
:
696 self
.keystone
.projects
.delete(tenant_id
)
698 self
.keystone
.tenants
.delete(tenant_id
)
702 ksExceptions
.ConnectionError
,
703 ksExceptions
.ClientException
,
704 ksExceptions
.NotFound
,
707 self
._format
_exception
(e
)
715 provider_network_profile
=None,
717 """Adds a tenant network to VIM
719 'net_name': name of the network
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
742 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
749 if provider_network_profile
:
750 vlan
= provider_network_profile
.get("segmentation-id")
754 self
._reload
_connection
()
755 network_dict
= {"name": net_name
, "admin_state_up": True}
757 if net_type
in ("data", "ptp") or provider_network_profile
:
758 provider_physical_network
= None
760 if provider_network_profile
and provider_network_profile
.get(
763 provider_physical_network
= provider_network_profile
.get(
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
771 self
.config
.get("dataplane_physical_net"), (tuple, list)
773 and provider_physical_network
774 not in self
.config
["dataplane_physical_net"]
776 raise vimconn
.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
784 # use the default dataplane_physical_net
785 if not provider_physical_network
:
786 provider_physical_network
= self
.config
.get(
787 "dataplane_physical_net"
790 # if it is non empty list, use the first value. If it is a string use the value directly
792 isinstance(provider_physical_network
, (tuple, list))
793 and provider_physical_network
795 provider_physical_network
= provider_physical_network
[0]
797 if not provider_physical_network
:
798 raise vimconn
.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
805 if not self
.config
.get("multisegment_support"):
807 "provider:physical_network"
808 ] = provider_physical_network
811 provider_network_profile
812 and "network-type" in provider_network_profile
815 "provider:network_type"
816 ] = provider_network_profile
["network-type"]
818 network_dict
["provider:network_type"] = self
.config
.get(
819 "dataplane_network_type", "vlan"
823 network_dict
["provider:segmentation_id"] = vlan
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
831 segment_list
.append(segment1_dict
)
833 "provider:physical_network": provider_physical_network
,
834 "provider:network_type": "vlan",
838 segment2_dict
["provider:segmentation_id"] = vlan
839 elif self
.config
.get("multisegment_vlan_range"):
840 vlanID
= self
._generate
_multisegment
_vlanID
()
841 segment2_dict
["provider:segmentation_id"] = vlanID
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
847 segment_list
.append(segment2_dict
)
848 network_dict
["segments"] = segment_list
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self
.vim_type
== "VIO" and vlan
is None:
852 if self
.config
.get("dataplane_net_vlan_range") is None:
853 raise vimconn
.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
859 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
861 network_dict
["shared"] = shared
863 if self
.config
.get("disable_network_port_security"):
864 network_dict
["port_security_enabled"] = False
866 if self
.config
.get("neutron_availability_zone_hints"):
867 hints
= self
.config
.get("neutron_availability_zone_hints")
869 if isinstance(hints
, str):
872 network_dict
["availability_zone_hints"] = hints
874 new_net
= self
.neutron
.create_network({"network": network_dict
})
876 # create subnetwork, even if there is no profile
881 if not ip_profile
.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand
= random
.randint(0, 255)
884 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
886 if "ip_version" not in ip_profile
:
887 ip_profile
["ip_version"] = "IPv4"
890 "name": net_name
+ "-subnet",
891 "network_id": new_net
["network"]["id"],
892 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile
["subnet_address"],
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile
.get("gateway_address"):
898 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
900 subnet
["gateway_ip"] = None
902 if ip_profile
.get("dns_address"):
903 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
905 if "dhcp_enabled" in ip_profile
:
906 subnet
["enable_dhcp"] = (
908 if ip_profile
["dhcp_enabled"] == "false"
909 or ip_profile
["dhcp_enabled"] is False
913 if ip_profile
.get("dhcp_start_address"):
914 subnet
["allocation_pools"] = []
915 subnet
["allocation_pools"].append(dict())
916 subnet
["allocation_pools"][0]["start"] = ip_profile
[
920 if ip_profile
.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
924 ip_int
+= ip_profile
["dhcp_count"] - 1
925 ip_str
= str(netaddr
.IPAddress(ip_int
))
926 subnet
["allocation_pools"][0]["end"] = ip_str
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self
.neutron
.create_subnet({"subnet": subnet
})
931 if net_type
== "data" and self
.config
.get("multisegment_support"):
932 if self
.config
.get("l2gw_support"):
933 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
934 for l2gw
in l2gw_list
:
936 "l2_gateway_id": l2gw
["id"],
937 "network_id": new_net
["network"]["id"],
938 "segmentation_id": str(vlanID
),
940 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn
}
945 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
948 return new_net
["network"]["id"], created_items
949 except Exception as e
:
950 # delete l2gw connections (if any) before deleting the network
951 for k
, v
in created_items
.items():
952 if not v
: # skip already deleted
956 k_item
, _
, k_id
= k
.partition(":")
958 if k_item
== "l2gwconn":
959 self
.neutron
.delete_l2_gateway_connection(k_id
)
960 except Exception as e2
:
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2
).__name
__, e2
968 self
.neutron
.delete_network(new_net
["network"]["id"])
970 self
._format
_exception
(e
)
972 def get_network_list(self
, filter_dict
={}):
973 """Obtain tenant networks of VIM
979 admin_state_up: boolean
981 Returns the network list of dictionaries
983 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
986 self
._reload
_connection
()
987 filter_dict_os
= filter_dict
.copy()
989 if self
.api_version3
and "tenant_id" in filter_dict_os
:
991 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
993 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
994 net_list
= net_dict
["networks"]
995 self
.__net
_os
2mano
(net_list
)
999 neExceptions
.ConnectionFailed
,
1000 ksExceptions
.ClientException
,
1001 neExceptions
.NeutronException
,
1004 self
._format
_exception
(e
)
1006 def get_network(self
, net_id
):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1010 filter_dict
= {"id": net_id
}
1011 net_list
= self
.get_network_list(filter_dict
)
1013 if len(net_list
) == 0:
1014 raise vimconn
.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id
)
1017 elif len(net_list
) > 1:
1018 raise vimconn
.VimConnConflictException(
1019 "Found more than one network with this criteria"
1024 for subnet_id
in net
.get("subnets", ()):
1026 subnet
= self
.neutron
.show_subnet(subnet_id
)
1027 except Exception as e
:
1029 "osconnector.get_network(): Error getting subnet %s %s"
1032 subnet
= {"id": subnet_id
, "fault": str(e
)}
1034 subnets
.append(subnet
)
1036 net
["subnets"] = subnets
1037 net
["encapsulation"] = net
.get("provider:network_type")
1038 net
["encapsulation_type"] = net
.get("provider:network_type")
1039 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1040 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1044 def delete_network(self
, net_id
, created_items
=None):
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1051 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1053 if created_items
is None:
1057 self
._reload
_connection
()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k
, v
in created_items
.items():
1060 if not v
: # skip already deleted
1064 k_item
, _
, k_id
= k
.partition(":")
1065 if k_item
== "l2gwconn":
1066 self
.neutron
.delete_l2_gateway_connection(k_id
)
1067 except Exception as e
:
1069 "Error deleting l2 gateway connection: {}: {}".format(
1074 # delete VM ports attached to this networks before the network
1075 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1076 for p
in ports
["ports"]:
1078 self
.neutron
.delete_port(p
["id"])
1079 except Exception as e
:
1080 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1082 self
.neutron
.delete_network(net_id
)
1086 neExceptions
.ConnectionFailed
,
1087 neExceptions
.NetworkNotFoundClient
,
1088 neExceptions
.NeutronException
,
1089 ksExceptions
.ClientException
,
1090 neExceptions
.NeutronException
,
1093 self
._format
_exception
(e
)
1095 def refresh_nets_status(self
, net_list
):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1113 for net_id
in net_list
:
1117 net_vim
= self
.get_network(net_id
)
1119 if net_vim
["status"] in netStatus2manoFormat
:
1120 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1122 net
["status"] = "OTHER"
1123 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1125 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1126 net
["status"] = "DOWN"
1128 net
["vim_info"] = self
.serialize(net_vim
)
1130 if net_vim
.get("fault"): # TODO
1131 net
["error_msg"] = str(net_vim
["fault"])
1132 except vimconn
.VimConnNotFoundException
as e
:
1133 self
.logger
.error("Exception getting net status: %s", str(e
))
1134 net
["status"] = "DELETED"
1135 net
["error_msg"] = str(e
)
1136 except vimconn
.VimConnException
as e
:
1137 self
.logger
.error("Exception getting net status: %s", str(e
))
1138 net
["status"] = "VIM_ERROR"
1139 net
["error_msg"] = str(e
)
1140 net_dict
[net_id
] = net
1143 def get_flavor(self
, flavor_id
):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1148 self
._reload
_connection
()
1149 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1152 return flavor
.to_dict()
1154 nvExceptions
.NotFound
,
1155 nvExceptions
.ClientException
,
1156 ksExceptions
.ClientException
,
1159 self
._format
_exception
(e
)
1161 def get_flavor_id_from_data(self
, flavor_dict
):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1169 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1172 self
._reload
_connection
()
1173 flavor_candidate_id
= None
1174 flavor_candidate_data
= (10000, 10000, 10000)
1177 flavor_dict
["vcpus"],
1178 flavor_dict
["disk"],
1179 flavor_dict
.get("ephemeral", 0),
1180 flavor_dict
.get("swap", 0),
1183 extended
= flavor_dict
.get("extended", {})
1186 raise vimconn
.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1192 # numas = extended.get("numas")
1193 for flavor
in self
.nova
.flavors
.list():
1194 epa
= flavor
.get_keys()
1205 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1207 if flavor_data
== flavor_target
:
1211 and flavor_target
< flavor_data
< flavor_candidate_data
1213 flavor_candidate_id
= flavor
.id
1214 flavor_candidate_data
= flavor_data
1216 if not exact_match
and flavor_candidate_id
:
1217 return flavor_candidate_id
1219 raise vimconn
.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1223 nvExceptions
.NotFound
,
1224 nvExceptions
.ClientException
,
1225 ksExceptions
.ClientException
,
1228 self
._format
_exception
(e
)
1230 def process_resource_quota(self
, quota
, prefix
, extra_specs
):
1236 if "limit" in quota
:
1237 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1239 if "reserve" in quota
:
1240 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1242 if "shares" in quota
:
1243 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1244 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1246 def new_flavor(self
, flavor_data
, change_name_if_used
=True):
1247 """Adds a tenant flavor to openstack VIM
1248 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1250 Returns the flavor identifier
1252 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1258 name
= flavor_data
["name"]
1259 while retry
< max_retries
:
1262 self
._reload
_connection
()
1264 if change_name_if_used
:
1267 fl
= self
.nova
.flavors
.list()
1270 fl_names
.append(f
.name
)
1272 while name
in fl_names
:
1274 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1276 ram
= flavor_data
.get("ram", 64)
1277 vcpus
= flavor_data
.get("vcpus", 1)
1280 extended
= flavor_data
.get("extended")
1282 numas
= extended
.get("numas")
1285 numa_nodes
= len(numas
)
1287 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1289 if self
.vim_type
== "VIO":
1291 "vmware:extra_config"
1292 ] = '{"numa.nodeAffinity":"0"}'
1293 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1297 node_id
= numa
["id"]
1299 if "memory" in numa
:
1300 memory_mb
= numa
["memory"] * 1024
1301 memory
= "hw:numa_mem.{}".format(node_id
)
1302 extra_specs
[memory
] = int(memory_mb
)
1306 cpu
= "hw:numa_cpus.{}".format(node_id
)
1307 vcpu
= ",".join(map(str, vcpu
))
1308 extra_specs
[cpu
] = vcpu
1310 # overwrite ram and vcpus
1311 # check if key "memory" is present in numa else use ram value at flavor
1312 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1313 # implemented/virt-driver-cpu-thread-pinning.html
1314 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1316 if "paired-threads" in numa
:
1317 vcpus
= numa
["paired-threads"] * 2
1318 # cpu_thread_policy "require" implies that the compute node must have an
1320 extra_specs
["hw:cpu_thread_policy"] = "require"
1321 extra_specs
["hw:cpu_policy"] = "dedicated"
1322 elif "cores" in numa
:
1323 vcpus
= numa
["cores"]
1324 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1325 # architecture, or a non-SMT architecture will be emulated
1326 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1327 extra_specs
["hw:cpu_policy"] = "dedicated"
1328 elif "threads" in numa
:
1329 vcpus
= numa
["threads"]
1330 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1332 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1333 extra_specs
["hw:cpu_policy"] = "dedicated"
1334 # for interface in numa.get("interfaces",() ):
1335 # if interface["dedicated"]=="yes":
1336 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1337 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1338 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1339 # when a way to connect it is available
1340 elif extended
.get("cpu-quota"):
1341 self
.process_resource_quota(
1342 extended
.get("cpu-quota"), "cpu", extra_specs
1345 if extended
.get("mem-quota"):
1346 self
.process_resource_quota(
1347 extended
.get("mem-quota"), "memory", extra_specs
1350 if extended
.get("vif-quota"):
1351 self
.process_resource_quota(
1352 extended
.get("vif-quota"), "vif", extra_specs
1355 if extended
.get("disk-io-quota"):
1356 self
.process_resource_quota(
1357 extended
.get("disk-io-quota"), "disk_io", extra_specs
1360 # Set the mempage size as specified in the descriptor
1361 if extended
.get("mempage-size"):
1362 if extended
.get("mempage-size") == "LARGE":
1363 extra_specs
["hw:mem_page_size"] = "large"
1364 elif extended
.get("mempage-size") == "SMALL":
1365 extra_specs
["hw:mem_page_size"] = "small"
1366 elif extended
.get("mempage-size") == "SIZE_2MB":
1367 extra_specs
["hw:mem_page_size"] = "2MB"
1368 elif extended
.get("mempage-size") == "SIZE_1GB":
1369 extra_specs
["hw:mem_page_size"] = "1GB"
1370 elif extended
.get("mempage-size") == "PREFER_LARGE":
1371 extra_specs
["hw:mem_page_size"] = "any"
1373 # The validations in NBI should make reaching here not possible.
1374 # If this message is shown, check validations
1376 "Invalid mempage-size %s. Will be ignored",
1377 extended
.get("mempage-size"),
1379 if extended
.get("cpu-pinning-policy"):
1380 extra_specs
["hw:cpu_policy"] = extended
.get(
1381 "cpu-pinning-policy"
1384 # Set the cpu thread pinning policy as specified in the descriptor
1385 if extended
.get("cpu-thread-pinning-policy"):
1386 extra_specs
["hw:cpu_thread_policy"] = extended
.get(
1387 "cpu-thread-pinning-policy"
1390 # Set the mem policy as specified in the descriptor
1391 if extended
.get("mem-policy"):
1392 extra_specs
["hw:numa_mempolicy"] = extended
.get(
1397 new_flavor
= self
.nova
.flavors
.create(
1401 disk
=flavor_data
.get("disk", 0),
1402 ephemeral
=flavor_data
.get("ephemeral", 0),
1403 swap
=flavor_data
.get("swap", 0),
1404 is_public
=flavor_data
.get("is_public", True),
1408 new_flavor
.set_keys(extra_specs
)
1410 return new_flavor
.id
1411 except nvExceptions
.Conflict
as e
:
1412 if change_name_if_used
and retry
< max_retries
:
1415 self
._format
_exception
(e
)
1416 # except nvExceptions.BadRequest as e:
1418 ksExceptions
.ClientException
,
1419 nvExceptions
.ClientException
,
1423 self
._format
_exception
(e
)
1425 def delete_flavor(self
, flavor_id
):
1426 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1428 self
._reload
_connection
()
1429 self
.nova
.flavors
.delete(flavor_id
)
1432 # except nvExceptions.BadRequest as e:
1434 nvExceptions
.NotFound
,
1435 ksExceptions
.ClientException
,
1436 nvExceptions
.ClientException
,
1439 self
._format
_exception
(e
)
1441 def new_image(self
, image_dict
):
1443 Adds a tenant image to VIM. imge_dict is a dictionary with:
1445 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1446 location: path or URI
1447 public: "yes" or "no"
1448 metadata: metadata of the image
1449 Returns the image_id
1454 while retry
< max_retries
:
1457 self
._reload
_connection
()
1459 # determine format http://docs.openstack.org/developer/glance/formats.html
1460 if "disk_format" in image_dict
:
1461 disk_format
= image_dict
["disk_format"]
1462 else: # autodiscover based on extension
1463 if image_dict
["location"].endswith(".qcow2"):
1464 disk_format
= "qcow2"
1465 elif image_dict
["location"].endswith(".vhd"):
1467 elif image_dict
["location"].endswith(".vmdk"):
1468 disk_format
= "vmdk"
1469 elif image_dict
["location"].endswith(".vdi"):
1471 elif image_dict
["location"].endswith(".iso"):
1473 elif image_dict
["location"].endswith(".aki"):
1475 elif image_dict
["location"].endswith(".ari"):
1477 elif image_dict
["location"].endswith(".ami"):
1483 "new_image: '%s' loading from '%s'",
1485 image_dict
["location"],
1487 if self
.vim_type
== "VIO":
1488 container_format
= "bare"
1489 if "container_format" in image_dict
:
1490 container_format
= image_dict
["container_format"]
1492 new_image
= self
.glance
.images
.create(
1493 name
=image_dict
["name"],
1494 container_format
=container_format
,
1495 disk_format
=disk_format
,
1498 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1500 if image_dict
["location"].startswith("http"):
1501 # TODO there is not a method to direct download. It must be downloaded locally with requests
1502 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1504 with
open(image_dict
["location"]) as fimage
:
1505 self
.glance
.images
.upload(new_image
.id, fimage
)
1506 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1507 # image_dict.get("public","yes")=="yes",
1508 # container_format="bare", data=fimage, disk_format=disk_format)
1510 metadata_to_load
= image_dict
.get("metadata")
1512 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1514 if self
.vim_type
== "VIO":
1515 metadata_to_load
["upload_location"] = image_dict
["location"]
1517 metadata_to_load
["location"] = image_dict
["location"]
1519 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1523 nvExceptions
.Conflict
,
1524 ksExceptions
.ClientException
,
1525 nvExceptions
.ClientException
,
1527 self
._format
_exception
(e
)
1530 gl1Exceptions
.HTTPException
,
1531 gl1Exceptions
.CommunicationError
,
1534 if retry
== max_retries
:
1537 self
._format
_exception
(e
)
1538 except IOError as e
: # can not open the file
1539 raise vimconn
.VimConnConnectionException(
1540 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1541 http_code
=vimconn
.HTTP_Bad_Request
,
1544 def delete_image(self
, image_id
):
1545 """Deletes a tenant image from openstack VIM. Returns the old id"""
1547 self
._reload
_connection
()
1548 self
.glance
.images
.delete(image_id
)
1552 nvExceptions
.NotFound
,
1553 ksExceptions
.ClientException
,
1554 nvExceptions
.ClientException
,
1555 gl1Exceptions
.CommunicationError
,
1556 gl1Exceptions
.HTTPNotFound
,
1558 ) as e
: # TODO remove
1559 self
._format
_exception
(e
)
1561 def get_image_id_from_path(self
, path
):
1562 """Get the image id from image path in the VIM database. Returns the image_id"""
1564 self
._reload
_connection
()
1565 images
= self
.glance
.images
.list()
1567 for image
in images
:
1568 if image
.metadata
.get("location") == path
:
1571 raise vimconn
.VimConnNotFoundException(
1572 "image with location '{}' not found".format(path
)
1575 ksExceptions
.ClientException
,
1576 nvExceptions
.ClientException
,
1577 gl1Exceptions
.CommunicationError
,
1580 self
._format
_exception
(e
)
1582 def get_image_list(self
, filter_dict
={}):
1583 """Obtain tenant images from VIM
1587 checksum: image checksum
1588 Returns the image list of dictionaries:
1589 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1592 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1595 self
._reload
_connection
()
1596 # filter_dict_os = filter_dict.copy()
1597 # First we filter by the available filter fields: name, id. The others are removed.
1598 image_list
= self
.glance
.images
.list()
1601 for image
in image_list
:
1603 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1606 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1610 filter_dict
.get("checksum")
1611 and image
["checksum"] != filter_dict
["checksum"]
1615 filtered_list
.append(image
.copy())
1616 except gl1Exceptions
.HTTPNotFound
:
1619 return filtered_list
1621 ksExceptions
.ClientException
,
1622 nvExceptions
.ClientException
,
1623 gl1Exceptions
.CommunicationError
,
1626 self
._format
_exception
(e
)
1628 def __wait_for_vm(self
, vm_id
, status
):
1629 """wait until vm is in the desired status and return True.
1630 If the VM gets in ERROR status, return false.
1631 If the timeout is reached generate an exception"""
1633 while elapsed_time
< server_timeout
:
1634 vm_status
= self
.nova
.servers
.get(vm_id
).status
1636 if vm_status
== status
:
1639 if vm_status
== "ERROR":
1645 # if we exceeded the timeout rollback
1646 if elapsed_time
>= server_timeout
:
1647 raise vimconn
.VimConnException(
1648 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1649 http_code
=vimconn
.HTTP_Request_Timeout
,
1652 def _get_openstack_availablity_zones(self
):
1654 Get from openstack availability zones available
1658 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1659 openstack_availability_zone
= [
1661 for zone
in openstack_availability_zone
1662 if zone
.zoneName
!= "internal"
1665 return openstack_availability_zone
1669 def _set_availablity_zones(self
):
1671 Set vim availablity zone
1674 if "availability_zone" in self
.config
:
1675 vim_availability_zones
= self
.config
.get("availability_zone")
1677 if isinstance(vim_availability_zones
, str):
1678 self
.availability_zone
= [vim_availability_zones
]
1679 elif isinstance(vim_availability_zones
, list):
1680 self
.availability_zone
= vim_availability_zones
1682 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1684 def _get_vm_availability_zone(
1685 self
, availability_zone_index
, availability_zone_list
1688 Return thge availability zone to be used by the created VM.
1689 :return: The VIM availability zone to be used or None
1691 if availability_zone_index
is None:
1692 if not self
.config
.get("availability_zone"):
1694 elif isinstance(self
.config
.get("availability_zone"), str):
1695 return self
.config
["availability_zone"]
1697 # TODO consider using a different parameter at config for default AV and AV list match
1698 return self
.config
["availability_zone"][0]
1700 vim_availability_zones
= self
.availability_zone
1701 # check if VIM offer enough availability zones describe in the VNFD
1702 if vim_availability_zones
and len(availability_zone_list
) <= len(
1703 vim_availability_zones
1705 # check if all the names of NFV AV match VIM AV names
1706 match_by_index
= False
1707 for av
in availability_zone_list
:
1708 if av
not in vim_availability_zones
:
1709 match_by_index
= True
1713 return vim_availability_zones
[availability_zone_index
]
1715 return availability_zone_list
[availability_zone_index
]
1717 raise vimconn
.VimConnConflictException(
1718 "No enough availability zones at VIM for this deployment"
1721 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1722 """Fill up the security_groups in the port_dict.
1725 net (dict): Network details
1726 port_dict (dict): Port details
1730 self
.config
.get("security_groups")
1731 and net
.get("port_security") is not False
1732 and not self
.config
.get("no_port_security_extension")
1734 if not self
.security_groups_id
:
1735 self
._get
_ids
_from
_name
()
1737 port_dict
["security_groups"] = self
.security_groups_id
1739 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1740 """Fill up the network binding depending on network type in the port_dict.
1743 net (dict): Network details
1744 port_dict (dict): Port details
1747 if not net
.get("type"):
1748 raise vimconn
.VimConnException("Type is missing in the network details.")
1750 if net
["type"] == "virtual":
1754 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1756 port_dict
["binding:vnic_type"] = "direct"
1758 # VIO specific Changes
1759 if self
.vim_type
== "VIO":
1760 # Need to create port with port_security_enabled = False and no-security-groups
1761 port_dict
["port_security_enabled"] = False
1762 port_dict
["provider_security_groups"] = []
1763 port_dict
["security_groups"] = []
1766 # For PT PCI-PASSTHROUGH
1767 port_dict
["binding:vnic_type"] = "direct-physical"
1770 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1771 """Set the "ip" parameter in net dictionary.
1774 new_port (dict): New created port
1775 net (dict): Network details
1778 fixed_ips
= new_port
["port"].get("fixed_ips")
1781 net
["ip"] = fixed_ips
[0].get("ip_address")
1786 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1787 """Fill up the mac_address and fixed_ips in port_dict.
1790 net (dict): Network details
1791 port_dict (dict): Port details
1794 if net
.get("mac_address"):
1795 port_dict
["mac_address"] = net
["mac_address"]
1797 if net
.get("ip_address"):
1798 port_dict
["fixed_ips"] = [{"ip_address": net
["ip_address"]}]
1799 # TODO add "subnet_id": <subnet_id>
1801 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
1802 """Create new port using neutron.
1805 port_dict (dict): Port details
1806 created_items (dict): All created items
1807 net (dict): Network details
1810 new_port (dict): New created port
1813 new_port
= self
.neutron
.create_port({"port": port_dict
})
1814 created_items
["port:" + str(new_port
["port"]["id"])] = True
1815 net
["mac_adress"] = new_port
["port"]["mac_address"]
1816 net
["vim_id"] = new_port
["port"]["id"]
1821 self
, net
: dict, name
: str, created_items
: dict
1822 ) -> Tuple
[dict, dict]:
1823 """Create port using net details.
1826 net (dict): Network details
1827 name (str): Name to be used as network name if net dict does not include name
1828 created_items (dict): All created items
1831 new_port, port New created port, port dictionary
1836 "network_id": net
["net_id"],
1837 "name": net
.get("name"),
1838 "admin_state_up": True,
1841 if not port_dict
["name"]:
1842 port_dict
["name"] = name
1844 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
1846 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
1848 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
1850 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
1852 vimconnector
._set
_fixed
_ip
(new_port
, net
)
1854 port
= {"port-id": new_port
["port"]["id"]}
1856 if float(self
.nova
.api_version
.get_string()) >= 2.32:
1857 port
["tag"] = new_port
["port"]["name"]
1859 return new_port
, port
1861 def _prepare_network_for_vminstance(
1865 created_items
: dict,
1867 external_network
: list,
1868 no_secured_ports
: list,
1870 """Create port and fill up net dictionary for new VM instance creation.
1873 name (str): Name of network
1874 net_list (list): List of networks
1875 created_items (dict): All created items belongs to a VM
1876 net_list_vim (list): List of ports
1877 external_network (list): List of external-networks
1878 no_secured_ports (list): Port security disabled ports
1881 self
._reload
_connection
()
1883 for net
in net_list
:
1884 # Skip non-connected iface
1885 if not net
.get("net_id"):
1888 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
1890 net_list_vim
.append(port
)
1892 if net
.get("floating_ip", False):
1893 net
["exit_on_floating_ip_error"] = True
1894 external_network
.append(net
)
1896 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
1897 net
["exit_on_floating_ip_error"] = False
1898 external_network
.append(net
)
1899 net
["floating_ip"] = self
.config
.get("use_floating_ip")
1901 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1902 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
1903 if net
.get("port_security") is False and not self
.config
.get(
1904 "no_port_security_extension"
1906 no_secured_ports
.append(
1908 new_port
["port"]["id"],
1909 net
.get("port_security_disable_strategy"),
1913 def _prepare_persistent_root_volumes(
1918 base_disk_index
: int,
1919 block_device_mapping
: dict,
1920 existing_vim_volumes
: list,
1921 created_items
: dict,
1923 """Prepare persistent root volumes for new VM instance.
1926 name (str): Name of VM instance
1927 vm_av_zone (list): List of availability zones
1928 disk (dict): Disk details
1929 base_disk_index (int): Disk index
1930 block_device_mapping (dict): Block device details
1931 existing_vim_volumes (list): Existing disk details
1932 created_items (dict): All created items belongs to VM
1935 boot_volume_id (str): ID of boot volume
1938 # Disk may include only vim_volume_id or only vim_id."
1939 # Use existing persistent root volume finding with volume_id or vim_id
1940 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
1942 if disk
.get(key_id
):
1944 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
1945 existing_vim_volumes
.append({"id": disk
[key_id
]})
1948 # Create persistent root volume
1949 volume
= self
.cinder
.volumes
.create(
1951 name
=name
+ "vd" + chr(base_disk_index
),
1952 imageRef
=disk
["image_id"],
1953 # Make sure volume is in the same AZ as the VM to be attached to
1954 availability_zone
=vm_av_zone
,
1956 boot_volume_id
= volume
.id
1957 self
.update_block_device_mapping(
1959 block_device_mapping
=block_device_mapping
,
1960 base_disk_index
=base_disk_index
,
1962 created_items
=created_items
,
1965 return boot_volume_id
1968 def update_block_device_mapping(
1970 block_device_mapping
: dict,
1971 base_disk_index
: int,
1973 created_items
: dict,
1975 """Add volume information to block device mapping dict.
1977 volume (object): Created volume object
1978 block_device_mapping (dict): Block device details
1979 base_disk_index (int): Disk index
1980 disk (dict): Disk details
1981 created_items (dict): All created items belongs to VM
1984 raise vimconn
.VimConnException("Volume is empty.")
1986 if not hasattr(volume
, "id"):
1987 raise vimconn
.VimConnException(
1988 "Created volume is not valid, does not have id attribute."
1991 volume_txt
= "volume:" + str(volume
.id)
1992 if disk
.get("keep"):
1993 volume_txt
+= ":keep"
1994 created_items
[volume_txt
] = True
1995 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
1997 def _prepare_non_root_persistent_volumes(
2002 block_device_mapping
: dict,
2003 base_disk_index
: int,
2004 existing_vim_volumes
: list,
2005 created_items
: dict,
2007 """Prepare persistent volumes for new VM instance.
2010 name (str): Name of VM instance
2011 disk (dict): Disk details
2012 vm_av_zone (list): List of availability zones
2013 block_device_mapping (dict): Block device details
2014 base_disk_index (int): Disk index
2015 existing_vim_volumes (list): Existing disk details
2016 created_items (dict): All created items belongs to VM
2018 # Non-root persistent volumes
2019 # Disk may include only vim_volume_id or only vim_id."
2020 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2022 if disk
.get(key_id
):
2024 # Use existing persistent volume
2025 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2026 existing_vim_volumes
.append({"id": disk
[key_id
]})
2029 # Create persistent volume
2030 volume
= self
.cinder
.volumes
.create(
2032 name
=name
+ "vd" + chr(base_disk_index
),
2033 # Make sure volume is in the same AZ as the VM to be attached to
2034 availability_zone
=vm_av_zone
,
2036 self
.update_block_device_mapping(
2038 block_device_mapping
=block_device_mapping
,
2039 base_disk_index
=base_disk_index
,
2041 created_items
=created_items
,
2044 def _wait_for_created_volumes_availability(
2045 self
, elapsed_time
: int, created_items
: dict
2047 """Wait till created volumes become available.
2050 elapsed_time (int): Passed time while waiting
2051 created_items (dict): All created items belongs to VM
2054 elapsed_time (int): Time spent while waiting
2058 while elapsed_time
< volume_timeout
:
2059 for created_item
in created_items
:
2061 created_item
.split(":")[0],
2062 created_item
.split(":")[1],
2065 if self
.cinder
.volumes
.get(volume_id
).status
!= "available":
2068 # All ready: break from while
2076 def _wait_for_existing_volumes_availability(
2077 self
, elapsed_time
: int, existing_vim_volumes
: list
2079 """Wait till existing volumes become available.
2082 elapsed_time (int): Passed time while waiting
2083 existing_vim_volumes (list): Existing volume details
2086 elapsed_time (int): Time spent while waiting
2090 while elapsed_time
< volume_timeout
:
2091 for volume
in existing_vim_volumes
:
2092 if self
.cinder
.volumes
.get(volume
["id"]).status
!= "available":
2094 else: # all ready: break from while
2102 def _prepare_disk_for_vminstance(
2105 existing_vim_volumes
: list,
2106 created_items
: dict,
2108 block_device_mapping
: dict,
2109 disk_list
: list = None,
2111 """Prepare all volumes for new VM instance.
2114 name (str): Name of Instance
2115 existing_vim_volumes (list): List of existing volumes
2116 created_items (dict): All created items belongs to VM
2117 vm_av_zone (list): VM availability zone
2118 block_device_mapping (dict): Block devices to be attached to VM
2119 disk_list (list): List of disks
2122 # Create additional volumes in case these are present in disk_list
2123 base_disk_index
= ord("b")
2124 boot_volume_id
= None
2127 for disk
in disk_list
:
2128 if "image_id" in disk
:
2129 # Root persistent volume
2130 base_disk_index
= ord("a")
2131 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2133 vm_av_zone
=vm_av_zone
,
2135 base_disk_index
=base_disk_index
,
2136 block_device_mapping
=block_device_mapping
,
2137 existing_vim_volumes
=existing_vim_volumes
,
2138 created_items
=created_items
,
2141 # Non-root persistent volume
2142 self
._prepare
_non
_root
_persistent
_volumes
(
2145 vm_av_zone
=vm_av_zone
,
2146 block_device_mapping
=block_device_mapping
,
2147 base_disk_index
=base_disk_index
,
2148 existing_vim_volumes
=existing_vim_volumes
,
2149 created_items
=created_items
,
2151 base_disk_index
+= 1
2153 # Wait until created volumes are with status available
2154 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2155 elapsed_time
, created_items
2157 # Wait until existing volumes in vim are with status available
2158 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2159 elapsed_time
, existing_vim_volumes
2161 # If we exceeded the timeout rollback
2162 if elapsed_time
>= volume_timeout
:
2163 raise vimconn
.VimConnException(
2164 "Timeout creating volumes for instance " + name
,
2165 http_code
=vimconn
.HTTP_Request_Timeout
,
2168 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2170 def _find_the_external_network_for_floating_ip(self
):
2171 """Get the external network ip in order to create floating IP.
2174 pool_id (str): External network pool ID
2178 # Find the external network
2179 external_nets
= list()
2181 for net
in self
.neutron
.list_networks()["networks"]:
2182 if net
["router:external"]:
2183 external_nets
.append(net
)
2185 if len(external_nets
) == 0:
2186 raise vimconn
.VimConnException(
2187 "Cannot create floating_ip automatically since "
2188 "no external network is present",
2189 http_code
=vimconn
.HTTP_Conflict
,
2192 if len(external_nets
) > 1:
2193 raise vimconn
.VimConnException(
2194 "Cannot create floating_ip automatically since "
2195 "multiple external networks are present",
2196 http_code
=vimconn
.HTTP_Conflict
,
2200 return external_nets
[0].get("id")
2202 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2203 """Trigger neutron to create a new floating IP using external network ID.
2206 param (dict): Input parameters to create a floating IP
2207 created_items (dict): All created items belongs to new VM instance
2214 self
.logger
.debug("Creating floating IP")
2215 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2216 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2217 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2219 except Exception as e
:
2220 raise vimconn
.VimConnException(
2221 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2222 http_code
=vimconn
.HTTP_Conflict
,
2225 def _create_floating_ip(
2226 self
, floating_network
: dict, server
: object, created_items
: dict
2228 """Get the available Pool ID and create a new floating IP.
2231 floating_network (dict): Dict including external network ID
2232 server (object): Server object
2233 created_items (dict): All created items belongs to new VM instance
2237 # Pool_id is available
2239 isinstance(floating_network
["floating_ip"], str)
2240 and floating_network
["floating_ip"].lower() != "true"
2242 pool_id
= floating_network
["floating_ip"]
2246 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2250 "floating_network_id": pool_id
,
2251 "tenant_id": server
.tenant_id
,
2255 self
._neutron
_create
_float
_ip
(param
, created_items
)
2257 def _find_floating_ip(
2261 floating_network
: dict,
2263 """Find the available free floating IPs if there are.
2266 server (object): Server object
2267 floating_ips (list): List of floating IPs
2268 floating_network (dict): Details of floating network such as ID
2271 free_floating_ip (str): Free floating ip address
2274 for fip
in floating_ips
:
2275 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2278 if isinstance(floating_network
["floating_ip"], str):
2279 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2284 def _assign_floating_ip(
2285 self
, free_floating_ip
: str, floating_network
: dict
2287 """Assign the free floating ip address to port.
2290 free_floating_ip (str): Floating IP to be assigned
2291 floating_network (dict): ID of floating network
2294 fip (dict) (dict): Floating ip details
2297 # The vim_id key contains the neutron.port_id
2298 self
.neutron
.update_floatingip(
2300 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2302 # For race condition ensure not re-assigned to other VM after 5 seconds
2305 return self
.neutron
.show_floatingip(free_floating_ip
)
2307 def _get_free_floating_ip(
2308 self
, server
: object, floating_network
: dict
2310 """Get the free floating IP address.
2313 server (object): Server Object
2314 floating_network (dict): Floating network details
2317 free_floating_ip (str): Free floating ip addr
2321 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2324 random
.shuffle(floating_ips
)
2326 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2328 def _prepare_external_network_for_vminstance(
2330 external_network
: list,
2332 created_items
: dict,
2333 vm_start_time
: float,
2335 """Assign floating IP address for VM instance.
2338 external_network (list): ID of External network
2339 server (object): Server Object
2340 created_items (dict): All created items belongs to new VM instance
2341 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2347 for floating_network
in external_network
:
2350 floating_ip_retries
= 3
2351 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2355 free_floating_ip
= self
._get
_free
_floating
_ip
(
2356 server
, floating_network
2359 if not free_floating_ip
:
2360 self
._create
_floating
_ip
(
2361 floating_network
, server
, created_items
2365 # For race condition ensure not already assigned
2366 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2368 if fip
["floatingip"].get("port_id"):
2371 # Assign floating ip
2372 fip
= self
._assign
_floating
_ip
(
2373 free_floating_ip
, floating_network
2376 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2377 self
.logger
.warning(
2378 "floating_ip {} re-assigned to other port".format(
2385 "Assigned floating_ip {} to VM {}".format(
2386 free_floating_ip
, server
.id
2392 except Exception as e
:
2393 # Openstack need some time after VM creation to assign an IP. So retry if fails
2394 vm_status
= self
.nova
.servers
.get(server
.id).status
2396 if vm_status
not in ("ACTIVE", "ERROR"):
2397 if time
.time() - vm_start_time
< server_timeout
:
2400 elif floating_ip_retries
> 0:
2401 floating_ip_retries
-= 1
2404 raise vimconn
.VimConnException(
2405 "Cannot create floating_ip: {} {}".format(
2408 http_code
=vimconn
.HTTP_Conflict
,
2411 except Exception as e
:
2412 if not floating_network
["exit_on_floating_ip_error"]:
2413 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2418 def _update_port_security_for_vminstance(
2420 no_secured_ports
: list,
2423 """Updates the port security according to no_secured_ports list.
2426 no_secured_ports (list): List of ports that security will be disabled
2427 server (object): Server Object
2433 # Wait until the VM is active and then disable the port-security
2434 if no_secured_ports
:
2435 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2437 for port
in no_secured_ports
:
2439 "port": {"port_security_enabled": False, "security_groups": None}
2442 if port
[1] == "allow-address-pairs":
2444 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2448 self
.neutron
.update_port(port
[0], port_update
)
2452 raise vimconn
.VimConnException(
2453 "It was not possible to disable port security for port {}".format(
2465 affinity_group_list
: list,
2469 availability_zone_index
=None,
2470 availability_zone_list
=None,
2472 """Adds a VM instance to VIM.
2475 name (str): name of VM
2476 description (str): description
2477 start (bool): indicates if VM must start or boot in pause mode. Ignored
2478 image_id (str) image uuid
2479 flavor_id (str) flavor uuid
2480 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2481 net_list (list): list of interfaces, each one is a dictionary with:
2482 name: name of network
2483 net_id: network uuid to connect
2484 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2485 model: interface model, ignored #TODO
2486 mac_address: used for SR-IOV ifaces #TODO for other types
2487 use: 'data', 'bridge', 'mgmt'
2488 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2489 vim_id: filled/added by this function
2490 floating_ip: True/False (or it can be None)
2491 port_security: True/False
2492 cloud_config (dict): (optional) dictionary with:
2493 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2494 users: (optional) list of users to be inserted, each item is a dict with:
2495 name: (mandatory) user name,
2496 key-pairs: (optional) list of strings with the public key to be inserted to the user
2497 user-data: (optional) string is a text script to be passed directly to cloud-init
2498 config-files: (optional). List of files to be transferred. Each item is a dict with:
2499 dest: (mandatory) string with the destination absolute path
2500 encoding: (optional, by default text). Can be one of:
2501 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2502 content : (mandatory) string with the content of the file
2503 permissions: (optional) string with file permissions, typically octal notation '0644'
2504 owner: (optional) file owner, string with the format 'owner:group'
2505 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2506 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2507 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2508 size: (mandatory) string with the size of the disk in GB
2509 vim_id: (optional) should use this existing volume id
2510 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2511 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2512 availability_zone_index is None
2513 #TODO ip, security groups
2516 A tuple with the instance identifier and created_items or raises an exception on error
2517 created_items can be None or a dictionary where this method can include key-values that will be passed to
2518 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2519 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2524 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2534 # list of external networks to be connected to instance, later on used to create floating_ip
2535 external_network
= []
2536 # List of ports with port-security disabled
2537 no_secured_ports
= []
2538 block_device_mapping
= {}
2539 existing_vim_volumes
= []
2540 server_group_id
= None
2541 scheduller_hints
= {}
2543 # Check the Openstack Connection
2544 self
._reload
_connection
()
2546 # Prepare network list
2547 self
._prepare
_network
_for
_vminstance
(
2550 created_items
=created_items
,
2551 net_list_vim
=net_list_vim
,
2552 external_network
=external_network
,
2553 no_secured_ports
=no_secured_ports
,
2557 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2559 # Get availability Zone
2560 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2561 availability_zone_index
, availability_zone_list
2566 self
._prepare
_disk
_for
_vminstance
(
2568 existing_vim_volumes
=existing_vim_volumes
,
2569 created_items
=created_items
,
2570 vm_av_zone
=vm_av_zone
,
2571 block_device_mapping
=block_device_mapping
,
2572 disk_list
=disk_list
,
2575 if affinity_group_list
:
2576 # Only first id on the list will be used. Openstack restriction
2577 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2578 scheduller_hints
["group"] = server_group_id
2581 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2582 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2583 "block_device_mapping={}, server_group={})".format(
2588 self
.config
.get("security_groups"),
2590 self
.config
.get("keypair"),
2593 block_device_mapping
,
2599 server
= self
.nova
.servers
.create(
2604 security_groups
=self
.config
.get("security_groups"),
2605 # TODO remove security_groups in future versions. Already at neutron port
2606 availability_zone
=vm_av_zone
,
2607 key_name
=self
.config
.get("keypair"),
2609 config_drive
=config_drive
,
2610 block_device_mapping
=block_device_mapping
,
2611 scheduler_hints
=scheduller_hints
,
2614 vm_start_time
= time
.time()
2616 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2618 self
._prepare
_external
_network
_for
_vminstance
(
2619 external_network
=external_network
,
2621 created_items
=created_items
,
2622 vm_start_time
=vm_start_time
,
2625 return server
.id, created_items
2627 except Exception as e
:
2630 server_id
= server
.id
2633 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2637 self
.delete_vminstance(server_id
, created_items
)
2639 except Exception as e2
:
2640 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2642 self
._format
_exception
(e
)
2645 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2646 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2649 created_items (dict): All created items belongs to VM
2652 updated_created_items (dict): Dict which does not include keep flag for volumes.
2656 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2659 def get_vminstance(self
, vm_id
):
2660 """Returns the VM instance information from VIM"""
2661 # self.logger.debug("Getting VM from VIM")
2663 self
._reload
_connection
()
2664 server
= self
.nova
.servers
.find(id=vm_id
)
2665 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2667 return server
.to_dict()
2669 ksExceptions
.ClientException
,
2670 nvExceptions
.ClientException
,
2671 nvExceptions
.NotFound
,
2674 self
._format
_exception
(e
)
2676 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2678 Get a console for the virtual machine
2680 vm_id: uuid of the VM
2681 console_type, can be:
2682 "novnc" (by default), "xvpvnc" for VNC types,
2683 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2684 Returns dict with the console parameters:
2685 protocol: ssh, ftp, http, https, ...
2686 server: usually ip address
2687 port: the http, ssh, ... port
2688 suffix: extra text, e.g. the http path and query string
2690 self
.logger
.debug("Getting VM CONSOLE from VIM")
2693 self
._reload
_connection
()
2694 server
= self
.nova
.servers
.find(id=vm_id
)
2696 if console_type
is None or console_type
== "novnc":
2697 console_dict
= server
.get_vnc_console("novnc")
2698 elif console_type
== "xvpvnc":
2699 console_dict
= server
.get_vnc_console(console_type
)
2700 elif console_type
== "rdp-html5":
2701 console_dict
= server
.get_rdp_console(console_type
)
2702 elif console_type
== "spice-html5":
2703 console_dict
= server
.get_spice_console(console_type
)
2705 raise vimconn
.VimConnException(
2706 "console type '{}' not allowed".format(console_type
),
2707 http_code
=vimconn
.HTTP_Bad_Request
,
2710 console_dict1
= console_dict
.get("console")
2713 console_url
= console_dict1
.get("url")
2717 protocol_index
= console_url
.find("//")
2719 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2722 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2727 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2729 -vimconn
.HTTP_Internal_Server_Error
,
2730 "Unexpected response from VIM",
2734 "protocol": console_url
[0:protocol_index
],
2735 "server": console_url
[protocol_index
+ 2 : port_index
],
2736 "port": console_url
[port_index
:suffix_index
],
2737 "suffix": console_url
[suffix_index
+ 1 :],
2742 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2744 nvExceptions
.NotFound
,
2745 ksExceptions
.ClientException
,
2746 nvExceptions
.ClientException
,
2747 nvExceptions
.BadRequest
,
2750 self
._format
_exception
(e
)
2752 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2753 """Neutron delete ports by id.
2755 k_id (str): Port id in the VIM
2759 port_dict
= self
.neutron
.list_ports()
2760 existing_ports
= [port
["id"] for port
in port_dict
["ports"] if port_dict
]
2762 if k_id
in existing_ports
:
2763 self
.neutron
.delete_port(k_id
)
2765 except Exception as e
:
2767 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2769 def _delete_volumes_by_id_wth_cinder(
2770 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
2772 """Cinder delete volume by id.
2774 k (str): Full item name in created_items
2775 k_id (str): ID of floating ip in VIM
2776 volumes_to_hold (list): Volumes not to delete
2777 created_items (dict): All created items belongs to VM
2780 if k_id
in volumes_to_hold
:
2783 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
2787 self
.cinder
.volumes
.delete(k_id
)
2788 created_items
[k
] = None
2790 except Exception as e
:
2792 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
2795 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
2796 """Neutron delete floating ip by id.
2798 k (str): Full item name in created_items
2799 k_id (str): ID of floating ip in VIM
2800 created_items (dict): All created items belongs to VM
2803 self
.neutron
.delete_floatingip(k_id
)
2804 created_items
[k
] = None
2806 except Exception as e
:
2808 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
2812 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
2813 k_item
, _
, k_id
= k
.partition(":")
2816 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
2817 """Delete VM ports attached to the networks before deleting virtual machine.
2819 created_items (dict): All created items belongs to VM
2822 for k
, v
in created_items
.items():
2823 if not v
: # skip already deleted
2827 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
2828 if k_item
== "port":
2829 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
2831 except Exception as e
:
2833 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
2836 def _delete_created_items(
2837 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
2839 """Delete Volumes and floating ip if they exist in created_items."""
2840 for k
, v
in created_items
.items():
2841 if not v
: # skip already deleted
2845 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
2847 if k_item
== "volume":
2849 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
2850 k
, k_id
, volumes_to_hold
, created_items
2856 elif k_item
== "floating_ip":
2858 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
2860 except Exception as e
:
2861 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
2866 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
2867 """Remove the volumes which has key flag from created_items
2870 created_items (dict): All created items belongs to VM
2873 created_items (dict): Persistent volumes eliminated created_items
2877 for (key
, value
) in created_items
.items()
2878 if len(key
.split(":")) == 2
2881 def delete_vminstance(
2882 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
2884 """Removes a VM instance from VIM. Returns the old identifier.
2886 vm_id (str): Identifier of VM instance
2887 created_items (dict): All created items belongs to VM
2888 volumes_to_hold (list): Volumes_to_hold
2890 if created_items
is None:
2892 if volumes_to_hold
is None:
2893 volumes_to_hold
= []
2896 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
2900 self
._reload
_connection
()
2902 # Delete VM ports attached to the networks before the virtual machine
2904 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
2907 self
.nova
.servers
.delete(vm_id
)
2909 # Although having detached, volumes should have in active status before deleting.
2910 # We ensure in this loop
2914 while keep_waiting
and elapsed_time
< volume_timeout
:
2915 keep_waiting
= False
2917 # Delete volumes and floating IP.
2918 keep_waiting
= self
._delete
_created
_items
(
2919 created_items
, volumes_to_hold
, keep_waiting
2927 nvExceptions
.NotFound
,
2928 ksExceptions
.ClientException
,
2929 nvExceptions
.ClientException
,
2932 self
._format
_exception
(e
)
2934 def refresh_vms_status(self
, vm_list
):
2935 """Get the status of the virtual machines and their interfaces/ports
2936 Params: the list of VM identifiers
2937 Returns a dictionary with:
2938 vm_id: #VIM id of this Virtual Machine
2939 status: #Mandatory. Text with one of:
2940 # DELETED (not found at vim)
2941 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2942 # OTHER (Vim reported other status not understood)
2943 # ERROR (VIM indicates an ERROR status)
2944 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2945 # CREATING (on building process), ERROR
2946 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2948 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2949 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2951 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2952 mac_address: #Text format XX:XX:XX:XX:XX:XX
2953 vim_net_id: #network id where this interface is connected
2954 vim_interface_id: #interface/port VIM id
2955 ip_address: #null, or text with IPv4, IPv6 address
2956 compute_node: #identification of compute node where PF,VF interface is allocated
2957 pci: #PCI address of the NIC that hosts the PF,VF
2958 vlan: #physical VLAN used for VF
2962 "refresh_vms status: Getting tenant VM instance information from VIM"
2965 for vm_id
in vm_list
:
2969 vm_vim
= self
.get_vminstance(vm_id
)
2971 if vm_vim
["status"] in vmStatus2manoFormat
:
2972 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
2974 vm
["status"] = "OTHER"
2975 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
2977 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
2978 vm_vim
.pop("user_data", None)
2979 vm
["vim_info"] = self
.serialize(vm_vim
)
2981 vm
["interfaces"] = []
2982 if vm_vim
.get("fault"):
2983 vm
["error_msg"] = str(vm_vim
["fault"])
2987 self
._reload
_connection
()
2988 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
2990 for port
in port_dict
["ports"]:
2992 interface
["vim_info"] = self
.serialize(port
)
2993 interface
["mac_address"] = port
.get("mac_address")
2994 interface
["vim_net_id"] = port
["network_id"]
2995 interface
["vim_interface_id"] = port
["id"]
2996 # check if OS-EXT-SRV-ATTR:host is there,
2997 # in case of non-admin credentials, it will be missing
2999 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3000 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3002 interface
["pci"] = None
3004 # check if binding:profile is there,
3005 # in case of non-admin credentials, it will be missing
3006 if port
.get("binding:profile"):
3007 if port
["binding:profile"].get("pci_slot"):
3008 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3010 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3011 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3012 pci
= port
["binding:profile"]["pci_slot"]
3013 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3014 interface
["pci"] = pci
3016 interface
["vlan"] = None
3018 if port
.get("binding:vif_details"):
3019 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3021 # Get vlan from network in case not present in port for those old openstacks and cases where
3022 # it is needed vlan at PT
3023 if not interface
["vlan"]:
3024 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3025 network
= self
.neutron
.show_network(port
["network_id"])
3028 network
["network"].get("provider:network_type")
3031 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3032 interface
["vlan"] = network
["network"].get(
3033 "provider:segmentation_id"
3037 # look for floating ip address
3039 floating_ip_dict
= self
.neutron
.list_floatingips(
3043 if floating_ip_dict
.get("floatingips"):
3045 floating_ip_dict
["floatingips"][0].get(
3046 "floating_ip_address"
3052 for subnet
in port
["fixed_ips"]:
3053 ips
.append(subnet
["ip_address"])
3055 interface
["ip_address"] = ";".join(ips
)
3056 vm
["interfaces"].append(interface
)
3057 except Exception as e
:
3059 "Error getting vm interface information {}: {}".format(
3064 except vimconn
.VimConnNotFoundException
as e
:
3065 self
.logger
.error("Exception getting vm status: %s", str(e
))
3066 vm
["status"] = "DELETED"
3067 vm
["error_msg"] = str(e
)
3068 except vimconn
.VimConnException
as e
:
3069 self
.logger
.error("Exception getting vm status: %s", str(e
))
3070 vm
["status"] = "VIM_ERROR"
3071 vm
["error_msg"] = str(e
)
3077 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3078 """Send and action over a VM instance from VIM
3079 Returns None or the console dict if the action was successfully sent to the VIM"""
3080 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3083 self
._reload
_connection
()
3084 server
= self
.nova
.servers
.find(id=vm_id
)
3086 if "start" in action_dict
:
3087 if action_dict
["start"] == "rebuild":
3090 if server
.status
== "PAUSED":
3092 elif server
.status
== "SUSPENDED":
3094 elif server
.status
== "SHUTOFF":
3098 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3100 raise vimconn
.VimConnException(
3101 "Cannot 'start' instance while it is in active state",
3102 http_code
=vimconn
.HTTP_Bad_Request
,
3105 elif "pause" in action_dict
:
3107 elif "resume" in action_dict
:
3109 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3110 self
.logger
.debug("server status %s", server
.status
)
3111 if server
.status
== "ACTIVE":
3114 self
.logger
.debug("ERROR: VM is not in Active state")
3115 raise vimconn
.VimConnException(
3116 "VM is not in active state, stop operation is not allowed",
3117 http_code
=vimconn
.HTTP_Bad_Request
,
3119 elif "forceOff" in action_dict
:
3120 server
.stop() # TODO
3121 elif "terminate" in action_dict
:
3123 elif "createImage" in action_dict
:
3124 server
.create_image()
3125 # "path":path_schema,
3126 # "description":description_schema,
3127 # "name":name_schema,
3128 # "metadata":metadata_schema,
3129 # "imageRef": id_schema,
3130 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3131 elif "rebuild" in action_dict
:
3132 server
.rebuild(server
.image
["id"])
3133 elif "reboot" in action_dict
:
3134 server
.reboot() # reboot_type="SOFT"
3135 elif "console" in action_dict
:
3136 console_type
= action_dict
["console"]
3138 if console_type
is None or console_type
== "novnc":
3139 console_dict
= server
.get_vnc_console("novnc")
3140 elif console_type
== "xvpvnc":
3141 console_dict
= server
.get_vnc_console(console_type
)
3142 elif console_type
== "rdp-html5":
3143 console_dict
= server
.get_rdp_console(console_type
)
3144 elif console_type
== "spice-html5":
3145 console_dict
= server
.get_spice_console(console_type
)
3147 raise vimconn
.VimConnException(
3148 "console type '{}' not allowed".format(console_type
),
3149 http_code
=vimconn
.HTTP_Bad_Request
,
3153 console_url
= console_dict
["console"]["url"]
3155 protocol_index
= console_url
.find("//")
3157 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3160 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3165 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3166 raise vimconn
.VimConnException(
3167 "Unexpected response from VIM " + str(console_dict
)
3171 "protocol": console_url
[0:protocol_index
],
3172 "server": console_url
[protocol_index
+ 2 : port_index
],
3173 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3174 "suffix": console_url
[suffix_index
+ 1 :],
3177 return console_dict2
3179 raise vimconn
.VimConnException(
3180 "Unexpected response from VIM " + str(console_dict
)
3185 ksExceptions
.ClientException
,
3186 nvExceptions
.ClientException
,
3187 nvExceptions
.NotFound
,
3190 self
._format
_exception
(e
)
3191 # TODO insert exception vimconn.HTTP_Unauthorized
3193 # ###### VIO Specific Changes #########
3194 def _generate_vlanID(self
):
3196 Method to get unused vlanID
3204 networks
= self
.get_network_list()
3206 for net
in networks
:
3207 if net
.get("provider:segmentation_id"):
3208 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3210 used_vlanIDs
= set(usedVlanIDs
)
3212 # find unused VLAN ID
3213 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3215 start_vlanid
, end_vlanid
= map(
3216 int, vlanID_range
.replace(" ", "").split("-")
3219 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3220 if vlanID
not in used_vlanIDs
:
3222 except Exception as exp
:
3223 raise vimconn
.VimConnException(
3224 "Exception {} occurred while generating VLAN ID.".format(exp
)
3227 raise vimconn
.VimConnConflictException(
3228 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3229 self
.config
.get("dataplane_net_vlan_range")
3233 def _generate_multisegment_vlanID(self
):
3235 Method to get unused vlanID
3243 networks
= self
.get_network_list()
3244 for net
in networks
:
3245 if net
.get("provider:network_type") == "vlan" and net
.get(
3246 "provider:segmentation_id"
3248 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3249 elif net
.get("segments"):
3250 for segment
in net
.get("segments"):
3251 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3252 "provider:segmentation_id"
3254 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3256 used_vlanIDs
= set(usedVlanIDs
)
3258 # find unused VLAN ID
3259 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3261 start_vlanid
, end_vlanid
= map(
3262 int, vlanID_range
.replace(" ", "").split("-")
3265 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3266 if vlanID
not in used_vlanIDs
:
3268 except Exception as exp
:
3269 raise vimconn
.VimConnException(
3270 "Exception {} occurred while generating VLAN ID.".format(exp
)
3273 raise vimconn
.VimConnConflictException(
3274 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3275 self
.config
.get("multisegment_vlan_range")
3279 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3281 Method to validate user given vlanID ranges
3285 for vlanID_range
in input_vlan_range
:
3286 vlan_range
= vlanID_range
.replace(" ", "")
3288 vlanID_pattern
= r
"(\d)*-(\d)*$"
3289 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3291 raise vimconn
.VimConnConflictException(
3292 "Invalid VLAN range for {}: {}.You must provide "
3293 "'{}' in format [start_ID - end_ID].".format(
3294 text_vlan_range
, vlanID_range
, text_vlan_range
3298 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3299 if start_vlanid
<= 0:
3300 raise vimconn
.VimConnConflictException(
3301 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3302 "networks valid IDs are 1 to 4094 ".format(
3303 text_vlan_range
, vlanID_range
3307 if end_vlanid
> 4094:
3308 raise vimconn
.VimConnConflictException(
3309 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3310 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3311 text_vlan_range
, vlanID_range
3315 if start_vlanid
> end_vlanid
:
3316 raise vimconn
.VimConnConflictException(
3317 "Invalid VLAN range for {}: {}. You must provide '{}'"
3318 " in format start_ID - end_ID and start_ID < end_ID ".format(
3319 text_vlan_range
, vlanID_range
, text_vlan_range
3323 # NOT USED FUNCTIONS
3325 def new_external_port(self
, port_data
):
3326 """Adds a external port to VIM
3327 Returns the port identifier"""
3328 # TODO openstack if needed
3330 -vimconn
.HTTP_Internal_Server_Error
,
3331 "osconnector.new_external_port() not implemented",
3334 def connect_port_network(self
, port_id
, network_id
, admin
=False):
3335 """Connects a external port to a network
3336 Returns status code of the VIM response"""
3337 # TODO openstack if needed
3339 -vimconn
.HTTP_Internal_Server_Error
,
3340 "osconnector.connect_port_network() not implemented",
3343 def new_user(self
, user_name
, user_passwd
, tenant_id
=None):
3344 """Adds a new user to openstack VIM
3345 Returns the user identifier"""
3346 self
.logger
.debug("osconnector: Adding a new user to VIM")
3349 self
._reload
_connection
()
3350 user
= self
.keystone
.users
.create(
3351 user_name
, password
=user_passwd
, default_project
=tenant_id
3353 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3356 except ksExceptions
.ConnectionError
as e
:
3357 error_value
= -vimconn
.HTTP_Bad_Request
3361 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3363 except ksExceptions
.ClientException
as e
: # TODO remove
3364 error_value
= -vimconn
.HTTP_Bad_Request
3368 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3371 # TODO insert exception vimconn.HTTP_Unauthorized
3372 # if reaching here is because an exception
3373 self
.logger
.debug("new_user " + error_text
)
3375 return error_value
, error_text
3377 def delete_user(self
, user_id
):
3378 """Delete a user from openstack VIM
3379 Returns the user identifier"""
3381 print("osconnector: Deleting a user from VIM")
3384 self
._reload
_connection
()
3385 self
.keystone
.users
.delete(user_id
)
3388 except ksExceptions
.ConnectionError
as e
:
3389 error_value
= -vimconn
.HTTP_Bad_Request
3393 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3395 except ksExceptions
.NotFound
as e
:
3396 error_value
= -vimconn
.HTTP_Not_Found
3400 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3402 except ksExceptions
.ClientException
as e
: # TODO remove
3403 error_value
= -vimconn
.HTTP_Bad_Request
3407 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3410 # TODO insert exception vimconn.HTTP_Unauthorized
3411 # if reaching here is because an exception
3412 self
.logger
.debug("delete_tenant " + error_text
)
3414 return error_value
, error_text
3416 def get_hosts_info(self
):
3417 """Get the information of deployed hosts
3418 Returns the hosts content"""
3420 print("osconnector: Getting Host info from VIM")
3424 self
._reload
_connection
()
3425 hypervisors
= self
.nova
.hypervisors
.list()
3427 for hype
in hypervisors
:
3428 h_list
.append(hype
.to_dict())
3430 return 1, {"hosts": h_list
}
3431 except nvExceptions
.NotFound
as e
:
3432 error_value
= -vimconn
.HTTP_Not_Found
3433 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3434 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3435 error_value
= -vimconn
.HTTP_Bad_Request
3439 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3442 # TODO insert exception vimconn.HTTP_Unauthorized
3443 # if reaching here is because an exception
3444 self
.logger
.debug("get_hosts_info " + error_text
)
3446 return error_value
, error_text
3448 def get_hosts(self
, vim_tenant
):
3449 """Get the hosts and deployed instances
3450 Returns the hosts content"""
3451 r
, hype_dict
= self
.get_hosts_info()
3456 hypervisors
= hype_dict
["hosts"]
3459 servers
= self
.nova
.servers
.list()
3460 for hype
in hypervisors
:
3461 for server
in servers
:
3463 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3464 == hype
["hypervisor_hostname"]
3467 hype
["vm"].append(server
.id)
3469 hype
["vm"] = [server
.id]
3472 except nvExceptions
.NotFound
as e
:
3473 error_value
= -vimconn
.HTTP_Not_Found
3474 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3475 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3476 error_value
= -vimconn
.HTTP_Bad_Request
3480 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3483 # TODO insert exception vimconn.HTTP_Unauthorized
3484 # if reaching here is because an exception
3485 self
.logger
.debug("get_hosts " + error_text
)
3487 return error_value
, error_text
3489 def new_classification(self
, name
, ctype
, definition
):
3491 "Adding a new (Traffic) Classification to VIM, named %s", name
3496 self
._reload
_connection
()
3498 if ctype
not in supportedClassificationTypes
:
3499 raise vimconn
.VimConnNotSupportedException(
3500 "OpenStack VIM connector does not support provided "
3501 "Classification Type {}, supported ones are: {}".format(
3502 ctype
, supportedClassificationTypes
3506 if not self
._validate
_classification
(ctype
, definition
):
3507 raise vimconn
.VimConnException(
3508 "Incorrect Classification definition for the type specified."
3511 classification_dict
= definition
3512 classification_dict
["name"] = name
3513 new_class
= self
.neutron
.create_sfc_flow_classifier(
3514 {"flow_classifier": classification_dict
}
3517 return new_class
["flow_classifier"]["id"]
3519 neExceptions
.ConnectionFailed
,
3520 ksExceptions
.ClientException
,
3521 neExceptions
.NeutronException
,
3524 self
.logger
.error("Creation of Classification failed.")
3525 self
._format
_exception
(e
)
3527 def get_classification(self
, class_id
):
3528 self
.logger
.debug(" Getting Classification %s from VIM", class_id
)
3529 filter_dict
= {"id": class_id
}
3530 class_list
= self
.get_classification_list(filter_dict
)
3532 if len(class_list
) == 0:
3533 raise vimconn
.VimConnNotFoundException(
3534 "Classification '{}' not found".format(class_id
)
3536 elif len(class_list
) > 1:
3537 raise vimconn
.VimConnConflictException(
3538 "Found more than one Classification with this criteria"
3541 classification
= class_list
[0]
3543 return classification
3545 def get_classification_list(self
, filter_dict
={}):
3547 "Getting Classifications from VIM filter: '%s'", str(filter_dict
)
3551 filter_dict_os
= filter_dict
.copy()
3552 self
._reload
_connection
()
3554 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3555 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3557 classification_dict
= self
.neutron
.list_sfc_flow_classifiers(
3560 classification_list
= classification_dict
["flow_classifiers"]
3561 self
.__classification
_os
2mano
(classification_list
)
3563 return classification_list
3565 neExceptions
.ConnectionFailed
,
3566 ksExceptions
.ClientException
,
3567 neExceptions
.NeutronException
,
3570 self
._format
_exception
(e
)
3572 def delete_classification(self
, class_id
):
3573 self
.logger
.debug("Deleting Classification '%s' from VIM", class_id
)
3576 self
._reload
_connection
()
3577 self
.neutron
.delete_sfc_flow_classifier(class_id
)
3581 neExceptions
.ConnectionFailed
,
3582 neExceptions
.NeutronException
,
3583 ksExceptions
.ClientException
,
3584 neExceptions
.NeutronException
,
3587 self
._format
_exception
(e
)
3589 def new_sfi(self
, name
, ingress_ports
, egress_ports
, sfc_encap
=True):
3591 "Adding a new Service Function Instance to VIM, named '%s'", name
3596 self
._reload
_connection
()
3602 if len(ingress_ports
) != 1:
3603 raise vimconn
.VimConnNotSupportedException(
3604 "OpenStack VIM connector can only have 1 ingress port per SFI"
3607 if len(egress_ports
) != 1:
3608 raise vimconn
.VimConnNotSupportedException(
3609 "OpenStack VIM connector can only have 1 egress port per SFI"
3614 "ingress": ingress_ports
[0],
3615 "egress": egress_ports
[0],
3616 "service_function_parameters": {"correlation": correlation
},
3618 new_sfi
= self
.neutron
.create_sfc_port_pair({"port_pair": sfi_dict
})
3620 return new_sfi
["port_pair"]["id"]
3622 neExceptions
.ConnectionFailed
,
3623 ksExceptions
.ClientException
,
3624 neExceptions
.NeutronException
,
3629 self
.neutron
.delete_sfc_port_pair(new_sfi
["port_pair"]["id"])
3632 "Creation of Service Function Instance failed, with "
3633 "subsequent deletion failure as well."
3636 self
._format
_exception
(e
)
3638 def get_sfi(self
, sfi_id
):
3639 self
.logger
.debug("Getting Service Function Instance %s from VIM", sfi_id
)
3640 filter_dict
= {"id": sfi_id
}
3641 sfi_list
= self
.get_sfi_list(filter_dict
)
3643 if len(sfi_list
) == 0:
3644 raise vimconn
.VimConnNotFoundException(
3645 "Service Function Instance '{}' not found".format(sfi_id
)
3647 elif len(sfi_list
) > 1:
3648 raise vimconn
.VimConnConflictException(
3649 "Found more than one Service Function Instance with this criteria"
3656 def get_sfi_list(self
, filter_dict
={}):
3658 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict
)
3662 self
._reload
_connection
()
3663 filter_dict_os
= filter_dict
.copy()
3665 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3666 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3668 sfi_dict
= self
.neutron
.list_sfc_port_pairs(**filter_dict_os
)
3669 sfi_list
= sfi_dict
["port_pairs"]
3670 self
.__sfi
_os
2mano
(sfi_list
)
3674 neExceptions
.ConnectionFailed
,
3675 ksExceptions
.ClientException
,
3676 neExceptions
.NeutronException
,
3679 self
._format
_exception
(e
)
3681 def delete_sfi(self
, sfi_id
):
3682 self
.logger
.debug("Deleting Service Function Instance '%s' from VIM", sfi_id
)
3685 self
._reload
_connection
()
3686 self
.neutron
.delete_sfc_port_pair(sfi_id
)
3690 neExceptions
.ConnectionFailed
,
3691 neExceptions
.NeutronException
,
3692 ksExceptions
.ClientException
,
3693 neExceptions
.NeutronException
,
3696 self
._format
_exception
(e
)
3698 def new_sf(self
, name
, sfis
, sfc_encap
=True):
3699 self
.logger
.debug("Adding a new Service Function to VIM, named '%s'", name
)
3703 self
._reload
_connection
()
3704 # correlation = None
3706 # correlation = "nsh"
3708 for instance
in sfis
:
3709 sfi
= self
.get_sfi(instance
)
3711 if sfi
.get("sfc_encap") != sfc_encap
:
3712 raise vimconn
.VimConnNotSupportedException(
3713 "OpenStack VIM connector requires all SFIs of the "
3714 "same SF to share the same SFC Encapsulation"
3717 sf_dict
= {"name": name
, "port_pairs": sfis
}
3718 new_sf
= self
.neutron
.create_sfc_port_pair_group(
3719 {"port_pair_group": sf_dict
}
3722 return new_sf
["port_pair_group"]["id"]
3724 neExceptions
.ConnectionFailed
,
3725 ksExceptions
.ClientException
,
3726 neExceptions
.NeutronException
,
3731 self
.neutron
.delete_sfc_port_pair_group(
3732 new_sf
["port_pair_group"]["id"]
3736 "Creation of Service Function failed, with "
3737 "subsequent deletion failure as well."
3740 self
._format
_exception
(e
)
3742 def get_sf(self
, sf_id
):
3743 self
.logger
.debug("Getting Service Function %s from VIM", sf_id
)
3744 filter_dict
= {"id": sf_id
}
3745 sf_list
= self
.get_sf_list(filter_dict
)
3747 if len(sf_list
) == 0:
3748 raise vimconn
.VimConnNotFoundException(
3749 "Service Function '{}' not found".format(sf_id
)
3751 elif len(sf_list
) > 1:
3752 raise vimconn
.VimConnConflictException(
3753 "Found more than one Service Function with this criteria"
3760 def get_sf_list(self
, filter_dict
={}):
3762 "Getting Service Function from VIM filter: '%s'", str(filter_dict
)
3766 self
._reload
_connection
()
3767 filter_dict_os
= filter_dict
.copy()
3769 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3770 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3772 sf_dict
= self
.neutron
.list_sfc_port_pair_groups(**filter_dict_os
)
3773 sf_list
= sf_dict
["port_pair_groups"]
3774 self
.__sf
_os
2mano
(sf_list
)
3778 neExceptions
.ConnectionFailed
,
3779 ksExceptions
.ClientException
,
3780 neExceptions
.NeutronException
,
3783 self
._format
_exception
(e
)
3785 def delete_sf(self
, sf_id
):
3786 self
.logger
.debug("Deleting Service Function '%s' from VIM", sf_id
)
3789 self
._reload
_connection
()
3790 self
.neutron
.delete_sfc_port_pair_group(sf_id
)
3794 neExceptions
.ConnectionFailed
,
3795 neExceptions
.NeutronException
,
3796 ksExceptions
.ClientException
,
3797 neExceptions
.NeutronException
,
3800 self
._format
_exception
(e
)
3802 def new_sfp(self
, name
, classifications
, sfs
, sfc_encap
=True, spi
=None):
3803 self
.logger
.debug("Adding a new Service Function Path to VIM, named '%s'", name
)
3807 self
._reload
_connection
()
3808 # In networking-sfc the MPLS encapsulation is legacy
3809 # should be used when no full SFC Encapsulation is intended
3810 correlation
= "mpls"
3817 "flow_classifiers": classifications
,
3818 "port_pair_groups": sfs
,
3819 "chain_parameters": {"correlation": correlation
},
3823 sfp_dict
["chain_id"] = spi
3825 new_sfp
= self
.neutron
.create_sfc_port_chain({"port_chain": sfp_dict
})
3827 return new_sfp
["port_chain"]["id"]
3829 neExceptions
.ConnectionFailed
,
3830 ksExceptions
.ClientException
,
3831 neExceptions
.NeutronException
,
3836 self
.neutron
.delete_sfc_port_chain(new_sfp
["port_chain"]["id"])
3839 "Creation of Service Function Path failed, with "
3840 "subsequent deletion failure as well."
3843 self
._format
_exception
(e
)
3845 def get_sfp(self
, sfp_id
):
3846 self
.logger
.debug(" Getting Service Function Path %s from VIM", sfp_id
)
3848 filter_dict
= {"id": sfp_id
}
3849 sfp_list
= self
.get_sfp_list(filter_dict
)
3851 if len(sfp_list
) == 0:
3852 raise vimconn
.VimConnNotFoundException(
3853 "Service Function Path '{}' not found".format(sfp_id
)
3855 elif len(sfp_list
) > 1:
3856 raise vimconn
.VimConnConflictException(
3857 "Found more than one Service Function Path with this criteria"
3864 def get_sfp_list(self
, filter_dict
={}):
3866 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict
)
3870 self
._reload
_connection
()
3871 filter_dict_os
= filter_dict
.copy()
3873 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3874 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3876 sfp_dict
= self
.neutron
.list_sfc_port_chains(**filter_dict_os
)
3877 sfp_list
= sfp_dict
["port_chains"]
3878 self
.__sfp
_os
2mano
(sfp_list
)
3882 neExceptions
.ConnectionFailed
,
3883 ksExceptions
.ClientException
,
3884 neExceptions
.NeutronException
,
3887 self
._format
_exception
(e
)
3889 def delete_sfp(self
, sfp_id
):
3890 self
.logger
.debug("Deleting Service Function Path '%s' from VIM", sfp_id
)
3893 self
._reload
_connection
()
3894 self
.neutron
.delete_sfc_port_chain(sfp_id
)
3898 neExceptions
.ConnectionFailed
,
3899 neExceptions
.NeutronException
,
3900 ksExceptions
.ClientException
,
3901 neExceptions
.NeutronException
,
3904 self
._format
_exception
(e
)
3906 def refresh_sfps_status(self
, sfp_list
):
3907 """Get the status of the service function path
3908 Params: the list of sfp identifiers
3909 Returns a dictionary with:
3910 vm_id: #VIM id of this service function path
3911 status: #Mandatory. Text with one of:
3912 # DELETED (not found at vim)
3913 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3914 # OTHER (Vim reported other status not understood)
3915 # ERROR (VIM indicates an ERROR status)
3917 # CREATING (on building process)
3918 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3919 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3923 "refresh_sfps status: Getting tenant SFP information from VIM"
3926 for sfp_id
in sfp_list
:
3930 sfp_vim
= self
.get_sfp(sfp_id
)
3933 sfp
["status"] = vmStatus2manoFormat
["ACTIVE"]
3935 sfp
["status"] = "OTHER"
3936 sfp
["error_msg"] = "VIM status reported " + sfp
["status"]
3938 sfp
["vim_info"] = self
.serialize(sfp_vim
)
3940 if sfp_vim
.get("fault"):
3941 sfp
["error_msg"] = str(sfp_vim
["fault"])
3942 except vimconn
.VimConnNotFoundException
as e
:
3943 self
.logger
.error("Exception getting sfp status: %s", str(e
))
3944 sfp
["status"] = "DELETED"
3945 sfp
["error_msg"] = str(e
)
3946 except vimconn
.VimConnException
as e
:
3947 self
.logger
.error("Exception getting sfp status: %s", str(e
))
3948 sfp
["status"] = "VIM_ERROR"
3949 sfp
["error_msg"] = str(e
)
3951 sfp_dict
[sfp_id
] = sfp
3955 def refresh_sfis_status(self
, sfi_list
):
3956 """Get the status of the service function instances
3957 Params: the list of sfi identifiers
3958 Returns a dictionary with:
3959 vm_id: #VIM id of this service function instance
3960 status: #Mandatory. Text with one of:
3961 # DELETED (not found at vim)
3962 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3963 # OTHER (Vim reported other status not understood)
3964 # ERROR (VIM indicates an ERROR status)
3966 # CREATING (on building process)
3967 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3968 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3972 "refresh_sfis status: Getting tenant sfi information from VIM"
3975 for sfi_id
in sfi_list
:
3979 sfi_vim
= self
.get_sfi(sfi_id
)
3982 sfi
["status"] = vmStatus2manoFormat
["ACTIVE"]
3984 sfi
["status"] = "OTHER"
3985 sfi
["error_msg"] = "VIM status reported " + sfi
["status"]
3987 sfi
["vim_info"] = self
.serialize(sfi_vim
)
3989 if sfi_vim
.get("fault"):
3990 sfi
["error_msg"] = str(sfi_vim
["fault"])
3991 except vimconn
.VimConnNotFoundException
as e
:
3992 self
.logger
.error("Exception getting sfi status: %s", str(e
))
3993 sfi
["status"] = "DELETED"
3994 sfi
["error_msg"] = str(e
)
3995 except vimconn
.VimConnException
as e
:
3996 self
.logger
.error("Exception getting sfi status: %s", str(e
))
3997 sfi
["status"] = "VIM_ERROR"
3998 sfi
["error_msg"] = str(e
)
4000 sfi_dict
[sfi_id
] = sfi
4004 def refresh_sfs_status(self
, sf_list
):
4005 """Get the status of the service functions
4006 Params: the list of sf identifiers
4007 Returns a dictionary with:
4008 vm_id: #VIM id of this service function
4009 status: #Mandatory. Text with one of:
4010 # DELETED (not found at vim)
4011 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4012 # OTHER (Vim reported other status not understood)
4013 # ERROR (VIM indicates an ERROR status)
4015 # CREATING (on building process)
4016 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4017 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4020 self
.logger
.debug("refresh_sfs status: Getting tenant sf information from VIM")
4022 for sf_id
in sf_list
:
4026 sf_vim
= self
.get_sf(sf_id
)
4029 sf
["status"] = vmStatus2manoFormat
["ACTIVE"]
4031 sf
["status"] = "OTHER"
4032 sf
["error_msg"] = "VIM status reported " + sf_vim
["status"]
4034 sf
["vim_info"] = self
.serialize(sf_vim
)
4036 if sf_vim
.get("fault"):
4037 sf
["error_msg"] = str(sf_vim
["fault"])
4038 except vimconn
.VimConnNotFoundException
as e
:
4039 self
.logger
.error("Exception getting sf status: %s", str(e
))
4040 sf
["status"] = "DELETED"
4041 sf
["error_msg"] = str(e
)
4042 except vimconn
.VimConnException
as e
:
4043 self
.logger
.error("Exception getting sf status: %s", str(e
))
4044 sf
["status"] = "VIM_ERROR"
4045 sf
["error_msg"] = str(e
)
4051 def refresh_classifications_status(self
, classification_list
):
4052 """Get the status of the classifications
4053 Params: the list of classification identifiers
4054 Returns a dictionary with:
4055 vm_id: #VIM id of this classifier
4056 status: #Mandatory. Text with one of:
4057 # DELETED (not found at vim)
4058 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4059 # OTHER (Vim reported other status not understood)
4060 # ERROR (VIM indicates an ERROR status)
4062 # CREATING (on building process)
4063 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4064 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4066 classification_dict
= {}
4068 "refresh_classifications status: Getting tenant classification information from VIM"
4071 for classification_id
in classification_list
:
4075 classification_vim
= self
.get_classification(classification_id
)
4077 if classification_vim
:
4078 classification
["status"] = vmStatus2manoFormat
["ACTIVE"]
4080 classification
["status"] = "OTHER"
4081 classification
["error_msg"] = (
4082 "VIM status reported " + classification
["status"]
4085 classification
["vim_info"] = self
.serialize(classification_vim
)
4087 if classification_vim
.get("fault"):
4088 classification
["error_msg"] = str(classification_vim
["fault"])
4089 except vimconn
.VimConnNotFoundException
as e
:
4090 self
.logger
.error("Exception getting classification status: %s", str(e
))
4091 classification
["status"] = "DELETED"
4092 classification
["error_msg"] = str(e
)
4093 except vimconn
.VimConnException
as e
:
4094 self
.logger
.error("Exception getting classification status: %s", str(e
))
4095 classification
["status"] = "VIM_ERROR"
4096 classification
["error_msg"] = str(e
)
4098 classification_dict
[classification_id
] = classification
4100 return classification_dict
4102 def new_affinity_group(self
, affinity_group_data
):
4103 """Adds a server group to VIM
4104 affinity_group_data contains a dictionary with information, keys:
4105 name: name in VIM for the server group
4106 type: affinity or anti-affinity
4107 scope: Only nfvi-node allowed
4108 Returns the server group identifier"""
4109 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
4112 name
= affinity_group_data
["name"]
4113 policy
= affinity_group_data
["type"]
4115 self
._reload
_connection
()
4116 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
4118 return new_server_group
.id
4120 ksExceptions
.ClientException
,
4121 nvExceptions
.ClientException
,
4125 self
._format
_exception
(e
)
4127 def get_affinity_group(self
, affinity_group_id
):
4128 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4129 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
4131 self
._reload
_connection
()
4132 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
4134 return server_group
.to_dict()
4136 nvExceptions
.NotFound
,
4137 nvExceptions
.ClientException
,
4138 ksExceptions
.ClientException
,
4141 self
._format
_exception
(e
)
4143 def delete_affinity_group(self
, affinity_group_id
):
4144 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4145 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
4147 self
._reload
_connection
()
4148 self
.nova
.server_groups
.delete(affinity_group_id
)
4150 return affinity_group_id
4152 nvExceptions
.NotFound
,
4153 ksExceptions
.ClientException
,
4154 nvExceptions
.ClientException
,
4157 self
._format
_exception
(e
)
4159 def get_vdu_state(self
, vm_id
):
4161 Getting the state of a vdu
4163 vm_id: ID of an instance
4165 self
.logger
.debug("Getting the status of VM")
4166 self
.logger
.debug("VIM VM ID %s", vm_id
)
4167 self
._reload
_connection
()
4168 server
= self
.nova
.servers
.find(id=vm_id
)
4169 server_dict
= server
.to_dict()
4171 server_dict
["status"],
4172 server_dict
["flavor"]["id"],
4173 server_dict
["OS-EXT-SRV-ATTR:host"],
4174 server_dict
["OS-EXT-AZ:availability_zone"],
4176 self
.logger
.debug("vdu_data %s", vdu_data
)
4179 def check_compute_availability(self
, host
, server_flavor_details
):
4180 self
._reload
_connection
()
4181 hypervisor_search
= self
.nova
.hypervisors
.search(
4182 hypervisor_match
=host
, servers
=True
4184 for hypervisor
in hypervisor_search
:
4185 hypervisor_id
= hypervisor
.to_dict()["id"]
4186 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
4187 hypervisor_dict
= hypervisor_details
.to_dict()
4188 hypervisor_temp
= json
.dumps(hypervisor_dict
)
4189 hypervisor_json
= json
.loads(hypervisor_temp
)
4190 resources_available
= [
4191 hypervisor_json
["free_ram_mb"],
4192 hypervisor_json
["disk_available_least"],
4193 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
4195 compute_available
= all(
4196 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
4198 if compute_available
:
4201 def check_availability_zone(
4202 self
, old_az
, server_flavor_details
, old_host
, host
=None
4204 self
._reload
_connection
()
4205 az_check
= {"zone_check": False, "compute_availability": None}
4206 aggregates_list
= self
.nova
.aggregates
.list()
4207 for aggregate
in aggregates_list
:
4208 aggregate_details
= aggregate
.to_dict()
4209 aggregate_temp
= json
.dumps(aggregate_details
)
4210 aggregate_json
= json
.loads(aggregate_temp
)
4211 if aggregate_json
["availability_zone"] == old_az
:
4212 hosts_list
= aggregate_json
["hosts"]
4213 if host
is not None:
4214 if host
in hosts_list
:
4215 az_check
["zone_check"] = True
4216 available_compute_id
= self
.check_compute_availability(
4217 host
, server_flavor_details
4219 if available_compute_id
is not None:
4220 az_check
["compute_availability"] = available_compute_id
4222 for check_host
in hosts_list
:
4223 if check_host
!= old_host
:
4224 available_compute_id
= self
.check_compute_availability(
4225 check_host
, server_flavor_details
4227 if available_compute_id
is not None:
4228 az_check
["zone_check"] = True
4229 az_check
["compute_availability"] = available_compute_id
4232 az_check
["zone_check"] = True
4235 def migrate_instance(self
, vm_id
, compute_host
=None):
4239 vm_id: ID of an instance
4240 compute_host: Host to migrate the vdu to
4242 self
._reload
_connection
()
4244 instance_state
= self
.get_vdu_state(vm_id
)
4245 server_flavor_id
= instance_state
[1]
4246 server_hypervisor_name
= instance_state
[2]
4247 server_availability_zone
= instance_state
[3]
4249 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
4250 server_flavor_details
= [
4251 server_flavor
["ram"],
4252 server_flavor
["disk"],
4253 server_flavor
["vcpus"],
4255 if compute_host
== server_hypervisor_name
:
4256 raise vimconn
.VimConnException(
4257 "Unable to migrate instance '{}' to the same host '{}'".format(
4260 http_code
=vimconn
.HTTP_Bad_Request
,
4262 az_status
= self
.check_availability_zone(
4263 server_availability_zone
,
4264 server_flavor_details
,
4265 server_hypervisor_name
,
4268 availability_zone_check
= az_status
["zone_check"]
4269 available_compute_id
= az_status
.get("compute_availability")
4271 if availability_zone_check
is False:
4272 raise vimconn
.VimConnException(
4273 "Unable to migrate instance '{}' to a different availability zone".format(
4276 http_code
=vimconn
.HTTP_Bad_Request
,
4278 if available_compute_id
is not None:
4279 self
.nova
.servers
.live_migrate(
4281 host
=available_compute_id
,
4282 block_migration
=True,
4283 disk_over_commit
=False,
4286 changed_compute_host
= ""
4287 if state
== "MIGRATING":
4288 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4289 changed_compute_host
= self
.get_vdu_state(vm_id
)[2]
4290 if vm_state
and changed_compute_host
== available_compute_id
:
4292 "Instance '{}' migrated to the new compute host '{}'".format(
4293 vm_id
, changed_compute_host
4296 return state
, available_compute_id
4298 raise vimconn
.VimConnException(
4299 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4300 vm_id
, available_compute_id
4302 http_code
=vimconn
.HTTP_Bad_Request
,
4305 raise vimconn
.VimConnException(
4306 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4307 available_compute_id
4309 http_code
=vimconn
.HTTP_Bad_Request
,
4312 nvExceptions
.BadRequest
,
4313 nvExceptions
.ClientException
,
4314 nvExceptions
.NotFound
,
4316 self
._format
_exception
(e
)
4318 def resize_instance(self
, vm_id
, new_flavor_id
):
4320 For resizing the vm based on the given
4323 vm_id : ID of an instance
4324 new_flavor_id : Flavor id to be resized
4325 Return the status of a resized instance
4327 self
._reload
_connection
()
4328 self
.logger
.debug("resize the flavor of an instance")
4329 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
4330 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
4331 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
4333 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
4334 if old_flavor_disk
> new_flavor_disk
:
4335 raise nvExceptions
.BadRequest(
4337 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
4340 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
4341 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
4343 instance_resized_status
= self
.confirm_resize(vm_id
)
4344 return instance_resized_status
4346 raise nvExceptions
.BadRequest(
4348 message
="Cannot 'resize' vm_state is in ERROR",
4352 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4353 raise nvExceptions
.BadRequest(
4355 message
="Cannot 'resize' instance while it is in vm_state resized",
4358 nvExceptions
.BadRequest
,
4359 nvExceptions
.ClientException
,
4360 nvExceptions
.NotFound
,
4362 self
._format
_exception
(e
)
4364 def confirm_resize(self
, vm_id
):
4366 Confirm the resize of an instance
4368 vm_id: ID of an instance
4370 self
._reload
_connection
()
4371 self
.nova
.servers
.confirm_resize(server
=vm_id
)
4372 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
4373 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4374 instance_status
= self
.get_vdu_state(vm_id
)[0]
4375 return instance_status