1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 from glanceclient
import client
as glClient
45 import glanceclient
.exc
as gl1Exceptions
46 from keystoneauth1
import session
47 from keystoneauth1
.identity
import v2
, v3
48 import keystoneclient
.exceptions
as ksExceptions
49 import keystoneclient
.v2_0
.client
as ksClient_v2
50 import keystoneclient
.v3
.client
as ksClient_v3
52 from neutronclient
.common
import exceptions
as neExceptions
53 from neutronclient
.neutron
import client
as neClient
54 from novaclient
import client
as nClient
, exceptions
as nvExceptions
55 from osm_ro_plugin
import vimconn
56 from requests
.exceptions
import ConnectionError
59 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__
= "$22-sep-2017 23:59:59$"
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat
= {
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
72 netStatus2manoFormat
= {
75 "INACTIVE": "INACTIVE",
81 supportedClassificationTypes
= ["legacy_flow_classifier"]
83 # global var to have a timeout creating and deleting volumes
88 class SafeDumper(yaml
.SafeDumper
):
89 def represent_data(self
, data
):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data
, dict) and data
.__class
__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data
= dict(data
.items())
96 return super(SafeDumper
, self
).represent_data(data
)
99 class vimconnector(vimconn
.VimConnector
):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
118 api_version
= config
.get("APIversion")
120 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn
.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
126 vim_type
= config
.get("vim_type")
128 if vim_type
and vim_type
not in ("vio", "VIO"):
129 raise vimconn
.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
134 if config
.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self
._validate
_vlan
_ranges
(
137 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
140 if config
.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self
._validate
_vlan
_ranges
(
143 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
146 vimconn
.VimConnector
.__init
__(
160 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
161 raise vimconn
.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
167 if self
.config
.get("insecure"):
170 if self
.config
.get("ca_cert"):
171 self
.verify
= self
.config
.get("ca_cert")
174 raise TypeError("url param can not be NoneType")
176 self
.persistent_info
= persistent_info
177 self
.availability_zone
= persistent_info
.get("availability_zone", None)
178 self
.session
= persistent_info
.get("session", {"reload_client": True})
179 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
180 self
.nova
= self
.session
.get("nova")
181 self
.neutron
= self
.session
.get("neutron")
182 self
.cinder
= self
.session
.get("cinder")
183 self
.glance
= self
.session
.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self
.keystone
= self
.session
.get("keystone")
186 self
.api_version3
= self
.session
.get("api_version3")
187 self
.vim_type
= self
.config
.get("vim_type")
190 self
.vim_type
= self
.vim_type
.upper()
192 if self
.config
.get("use_internal_endpoint"):
193 self
.endpoint_type
= "internalURL"
195 self
.endpoint_type
= None
197 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
198 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
199 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
200 self
.logger
= logging
.getLogger("ro.vim.openstack")
202 # allow security_groups to be a list or a single string
203 if isinstance(self
.config
.get("security_groups"), str):
204 self
.config
["security_groups"] = [self
.config
["security_groups"]]
206 self
.security_groups_id
= None
208 # ###### VIO Specific Changes #########
209 if self
.vim_type
== "VIO":
210 self
.logger
= logging
.getLogger("ro.vim.vio")
213 self
.logger
.setLevel(getattr(logging
, log_level
))
215 def __getitem__(self
, index
):
216 """Get individuals parameters.
218 if index
== "project_domain_id":
219 return self
.config
.get("project_domain_id")
220 elif index
== "user_domain_id":
221 return self
.config
.get("user_domain_id")
223 return vimconn
.VimConnector
.__getitem
__(self
, index
)
225 def __setitem__(self
, index
, value
):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
228 if index
== "project_domain_id":
229 self
.config
["project_domain_id"] = value
230 elif index
== "user_domain_id":
231 self
.config
["user_domain_id"] = value
233 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
235 self
.session
["reload_client"] = True
237 def serialize(self
, value
):
238 """Serialization of python basic types.
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
244 if isinstance(value
, str):
249 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
251 except yaml
.representer
.RepresenterError
:
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
260 def _reload_connection(self
):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self
.session
["reload_client"]:
266 if self
.config
.get("APIversion"):
267 self
.api_version3
= (
268 self
.config
["APIversion"] == "v3.3"
269 or self
.config
["APIversion"] == "3"
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
276 self
.session
["api_version3"] = self
.api_version3
278 if self
.api_version3
:
279 if self
.config
.get("project_domain_id") or self
.config
.get(
280 "project_domain_name"
282 project_domain_id_default
= None
284 project_domain_id_default
= "default"
286 if self
.config
.get("user_domain_id") or self
.config
.get(
289 user_domain_id_default
= None
291 user_domain_id_default
= "default"
295 password
=self
.passwd
,
296 project_name
=self
.tenant_name
,
297 project_id
=self
.tenant_id
,
298 project_domain_id
=self
.config
.get(
299 "project_domain_id", project_domain_id_default
301 user_domain_id
=self
.config
.get(
302 "user_domain_id", user_domain_id_default
304 project_domain_name
=self
.config
.get("project_domain_name"),
305 user_domain_name
=self
.config
.get("user_domain_name"),
311 password
=self
.passwd
,
312 tenant_name
=self
.tenant_name
,
313 tenant_id
=self
.tenant_id
,
316 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name
= self
.config
.get("region_name")
321 if self
.api_version3
:
322 self
.keystone
= ksClient_v3
.Client(
324 endpoint_type
=self
.endpoint_type
,
325 region_name
=region_name
,
328 self
.keystone
= ksClient_v2
.Client(
329 session
=sess
, endpoint_type
=self
.endpoint_type
332 self
.session
["keystone"] = self
.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version
= self
.config
.get("microversion")
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self
.nova
= self
.session
["nova"] = nClient
.Client(
349 endpoint_type
=self
.endpoint_type
,
350 region_name
=region_name
,
352 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
355 endpoint_type
=self
.endpoint_type
,
356 region_name
=region_name
,
358 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
361 endpoint_type
=self
.endpoint_type
,
362 region_name
=region_name
,
366 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
368 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
370 if self
.endpoint_type
== "internalURL":
371 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
372 glance_endpoint
= self
.keystone
.endpoints
.list(
373 glance_service_id
, interface
="internal"
376 glance_endpoint
= None
378 self
.glance
= self
.session
["glance"] = glClient
.Client(
379 2, session
=sess
, endpoint
=glance_endpoint
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self
.session
["reload_client"] = False
385 self
.persistent_info
["session"] = self
.session
386 # add availablity zone info inside self.persistent_info
387 self
._set
_availablity
_zones
()
388 self
.persistent_info
["availability_zone"] = self
.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self
.security_groups_id
= None
392 def __net_os2mano(self
, net_list_dict
):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict
) is dict:
396 net_list_
= (net_list_dict
,)
397 elif type(net_list_dict
) is list:
398 net_list_
= net_list_dict
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net
in net_list_
:
402 if net
.get("provider:network_type") == "vlan":
405 net
["type"] = "bridge"
407 def __classification_os2mano(self
, class_list_dict
):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
411 if isinstance(class_list_dict
, dict):
412 class_list_
= [class_list_dict
]
413 elif isinstance(class_list_dict
, list):
414 class_list_
= class_list_dict
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification
in class_list_
:
418 id = classification
.pop("id")
419 name
= classification
.pop("name")
420 description
= classification
.pop("description")
421 project_id
= classification
.pop("project_id")
422 tenant_id
= classification
.pop("tenant_id")
423 original_classification
= copy
.deepcopy(classification
)
424 classification
.clear()
425 classification
["ctype"] = "legacy_flow_classifier"
426 classification
["definition"] = original_classification
427 classification
["id"] = id
428 classification
["name"] = name
429 classification
["description"] = description
430 classification
["project_id"] = project_id
431 classification
["tenant_id"] = tenant_id
433 def __sfi_os2mano(self
, sfi_list_dict
):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
437 if isinstance(sfi_list_dict
, dict):
438 sfi_list_
= [sfi_list_dict
]
439 elif isinstance(sfi_list_dict
, list):
440 sfi_list_
= sfi_list_dict
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
444 for sfi
in sfi_list_
:
445 sfi
["ingress_ports"] = []
446 sfi
["egress_ports"] = []
448 if sfi
.get("ingress"):
449 sfi
["ingress_ports"].append(sfi
["ingress"])
451 if sfi
.get("egress"):
452 sfi
["egress_ports"].append(sfi
["egress"])
456 params
= sfi
.get("service_function_parameters")
460 correlation
= params
.get("correlation")
465 sfi
["sfc_encap"] = sfc_encap
466 del sfi
["service_function_parameters"]
468 def __sf_os2mano(self
, sf_list_dict
):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
472 if isinstance(sf_list_dict
, dict):
473 sf_list_
= [sf_list_dict
]
474 elif isinstance(sf_list_dict
, list):
475 sf_list_
= sf_list_dict
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
480 del sf
["port_pair_group_parameters"]
481 sf
["sfis"] = sf
["port_pairs"]
484 def __sfp_os2mano(self
, sfp_list_dict
):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
488 if isinstance(sfp_list_dict
, dict):
489 sfp_list_
= [sfp_list_dict
]
490 elif isinstance(sfp_list_dict
, list):
491 sfp_list_
= sfp_list_dict
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
495 for sfp
in sfp_list_
:
496 params
= sfp
.pop("chain_parameters")
500 correlation
= params
.get("correlation")
505 sfp
["sfc_encap"] = sfc_encap
506 sfp
["spi"] = sfp
.pop("chain_id")
507 sfp
["classifications"] = sfp
.pop("flow_classifiers")
508 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
510 # placeholder for now; read TODO note below
511 def _validate_classification(self
, type, definition
):
512 # only legacy_flow_classifier Type is supported at this point
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
519 def _format_exception(self
, exception
):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error
= str(exception
)
527 neExceptions
.NetworkNotFoundClient
,
528 nvExceptions
.NotFound
,
529 ksExceptions
.NotFound
,
530 gl1Exceptions
.HTTPNotFound
,
533 raise vimconn
.VimConnNotFoundException(
534 type(exception
).__name
__ + ": " + message_error
540 gl1Exceptions
.HTTPException
,
541 gl1Exceptions
.CommunicationError
,
543 ksExceptions
.ConnectionError
,
544 neExceptions
.ConnectionFailed
,
547 if type(exception
).__name
__ == "SSLError":
548 tip
= " (maybe option 'insecure' must be added to the VIM)"
550 raise vimconn
.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
557 nvExceptions
.BadRequest
,
558 ksExceptions
.BadRequest
,
561 raise vimconn
.VimConnException(
562 type(exception
).__name
__ + ": " + message_error
567 nvExceptions
.ClientException
,
568 ksExceptions
.ClientException
,
569 neExceptions
.NeutronException
,
572 raise vimconn
.VimConnUnexpectedResponse(
573 type(exception
).__name
__ + ": " + message_error
575 elif isinstance(exception
, nvExceptions
.Conflict
):
576 raise vimconn
.VimConnConflictException(
577 type(exception
).__name
__ + ": " + message_error
579 elif isinstance(exception
, vimconn
.VimConnException
):
582 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
584 raise vimconn
.VimConnConnectionException(
585 type(exception
).__name
__ + ": " + message_error
588 def _get_ids_from_name(self
):
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
593 # get tenant_id if only tenant_name is supplied
594 self
._reload
_connection
()
596 if not self
.my_tenant_id
:
597 raise vimconn
.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self
.tenant_name
, self
.tenant_id
603 if self
.config
.get("security_groups") and not self
.security_groups_id
:
604 # convert from name to id
605 neutron_sg_list
= self
.neutron
.list_security_groups(
606 tenant_id
=self
.my_tenant_id
609 self
.security_groups_id
= []
610 for sg
in self
.config
.get("security_groups"):
611 for neutron_sg
in neutron_sg_list
:
612 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
613 self
.security_groups_id
.append(neutron_sg
["id"])
616 self
.security_groups_id
= None
618 raise vimconn
.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg
)
622 def check_vim_connectivity(self
):
623 # just get network list to check connectivity and credentials
624 self
.get_network_list(filter_dict
={})
626 def get_tenant_list(self
, filter_dict
={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
634 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
637 self
._reload
_connection
()
639 if self
.api_version3
:
640 project_class_list
= self
.keystone
.projects
.list(
641 name
=filter_dict
.get("name")
644 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
648 for project
in project_class_list
:
649 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
652 project_list
.append(project
.to_dict())
656 ksExceptions
.ConnectionError
,
657 ksExceptions
.ClientException
,
660 self
._format
_exception
(e
)
662 def new_tenant(self
, tenant_name
, tenant_description
):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
667 self
._reload
_connection
()
669 if self
.api_version3
:
670 project
= self
.keystone
.projects
.create(
672 self
.config
.get("project_domain_id", "default"),
673 description
=tenant_description
,
677 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
681 ksExceptions
.ConnectionError
,
682 ksExceptions
.ClientException
,
683 ksExceptions
.BadRequest
,
686 self
._format
_exception
(e
)
688 def delete_tenant(self
, tenant_id
):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
693 self
._reload
_connection
()
695 if self
.api_version3
:
696 self
.keystone
.projects
.delete(tenant_id
)
698 self
.keystone
.tenants
.delete(tenant_id
)
702 ksExceptions
.ConnectionError
,
703 ksExceptions
.ClientException
,
704 ksExceptions
.NotFound
,
707 self
._format
_exception
(e
)
715 provider_network_profile
=None,
717 """Adds a tenant network to VIM
719 'net_name': name of the network
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
742 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
749 if provider_network_profile
:
750 vlan
= provider_network_profile
.get("segmentation-id")
754 self
._reload
_connection
()
755 network_dict
= {"name": net_name
, "admin_state_up": True}
757 if net_type
in ("data", "ptp") or provider_network_profile
:
758 provider_physical_network
= None
760 if provider_network_profile
and provider_network_profile
.get(
763 provider_physical_network
= provider_network_profile
.get(
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
771 self
.config
.get("dataplane_physical_net"), (tuple, list)
773 and provider_physical_network
774 not in self
.config
["dataplane_physical_net"]
776 raise vimconn
.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
784 # use the default dataplane_physical_net
785 if not provider_physical_network
:
786 provider_physical_network
= self
.config
.get(
787 "dataplane_physical_net"
790 # if it is non empty list, use the first value. If it is a string use the value directly
792 isinstance(provider_physical_network
, (tuple, list))
793 and provider_physical_network
795 provider_physical_network
= provider_physical_network
[0]
797 if not provider_physical_network
:
798 raise vimconn
.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
805 if not self
.config
.get("multisegment_support"):
807 "provider:physical_network"
808 ] = provider_physical_network
811 provider_network_profile
812 and "network-type" in provider_network_profile
815 "provider:network_type"
816 ] = provider_network_profile
["network-type"]
818 network_dict
["provider:network_type"] = self
.config
.get(
819 "dataplane_network_type", "vlan"
823 network_dict
["provider:segmentation_id"] = vlan
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
831 segment_list
.append(segment1_dict
)
833 "provider:physical_network": provider_physical_network
,
834 "provider:network_type": "vlan",
838 segment2_dict
["provider:segmentation_id"] = vlan
839 elif self
.config
.get("multisegment_vlan_range"):
840 vlanID
= self
._generate
_multisegment
_vlanID
()
841 segment2_dict
["provider:segmentation_id"] = vlanID
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
847 segment_list
.append(segment2_dict
)
848 network_dict
["segments"] = segment_list
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self
.vim_type
== "VIO" and vlan
is None:
852 if self
.config
.get("dataplane_net_vlan_range") is None:
853 raise vimconn
.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
859 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
861 network_dict
["shared"] = shared
863 if self
.config
.get("disable_network_port_security"):
864 network_dict
["port_security_enabled"] = False
866 if self
.config
.get("neutron_availability_zone_hints"):
867 hints
= self
.config
.get("neutron_availability_zone_hints")
869 if isinstance(hints
, str):
872 network_dict
["availability_zone_hints"] = hints
874 new_net
= self
.neutron
.create_network({"network": network_dict
})
876 # create subnetwork, even if there is no profile
881 if not ip_profile
.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand
= random
.randint(0, 255)
884 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
886 if "ip_version" not in ip_profile
:
887 ip_profile
["ip_version"] = "IPv4"
890 "name": net_name
+ "-subnet",
891 "network_id": new_net
["network"]["id"],
892 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile
["subnet_address"],
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile
.get("gateway_address"):
898 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
900 subnet
["gateway_ip"] = None
902 if ip_profile
.get("dns_address"):
903 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
905 if "dhcp_enabled" in ip_profile
:
906 subnet
["enable_dhcp"] = (
908 if ip_profile
["dhcp_enabled"] == "false"
909 or ip_profile
["dhcp_enabled"] is False
913 if ip_profile
.get("dhcp_start_address"):
914 subnet
["allocation_pools"] = []
915 subnet
["allocation_pools"].append(dict())
916 subnet
["allocation_pools"][0]["start"] = ip_profile
[
920 if ip_profile
.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
924 ip_int
+= ip_profile
["dhcp_count"] - 1
925 ip_str
= str(netaddr
.IPAddress(ip_int
))
926 subnet
["allocation_pools"][0]["end"] = ip_str
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self
.neutron
.create_subnet({"subnet": subnet
})
931 if net_type
== "data" and self
.config
.get("multisegment_support"):
932 if self
.config
.get("l2gw_support"):
933 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
934 for l2gw
in l2gw_list
:
936 "l2_gateway_id": l2gw
["id"],
937 "network_id": new_net
["network"]["id"],
938 "segmentation_id": str(vlanID
),
940 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn
}
945 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
948 return new_net
["network"]["id"], created_items
949 except Exception as e
:
950 # delete l2gw connections (if any) before deleting the network
951 for k
, v
in created_items
.items():
952 if not v
: # skip already deleted
956 k_item
, _
, k_id
= k
.partition(":")
958 if k_item
== "l2gwconn":
959 self
.neutron
.delete_l2_gateway_connection(k_id
)
960 except Exception as e2
:
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2
).__name
__, e2
968 self
.neutron
.delete_network(new_net
["network"]["id"])
970 self
._format
_exception
(e
)
972 def get_network_list(self
, filter_dict
={}):
973 """Obtain tenant networks of VIM
979 admin_state_up: boolean
981 Returns the network list of dictionaries
983 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
986 self
._reload
_connection
()
987 filter_dict_os
= filter_dict
.copy()
989 if self
.api_version3
and "tenant_id" in filter_dict_os
:
991 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
993 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
994 net_list
= net_dict
["networks"]
995 self
.__net
_os
2mano
(net_list
)
999 neExceptions
.ConnectionFailed
,
1000 ksExceptions
.ClientException
,
1001 neExceptions
.NeutronException
,
1004 self
._format
_exception
(e
)
1006 def get_network(self
, net_id
):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1010 filter_dict
= {"id": net_id
}
1011 net_list
= self
.get_network_list(filter_dict
)
1013 if len(net_list
) == 0:
1014 raise vimconn
.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id
)
1017 elif len(net_list
) > 1:
1018 raise vimconn
.VimConnConflictException(
1019 "Found more than one network with this criteria"
1024 for subnet_id
in net
.get("subnets", ()):
1026 subnet
= self
.neutron
.show_subnet(subnet_id
)
1027 except Exception as e
:
1029 "osconnector.get_network(): Error getting subnet %s %s"
1032 subnet
= {"id": subnet_id
, "fault": str(e
)}
1034 subnets
.append(subnet
)
1036 net
["subnets"] = subnets
1037 net
["encapsulation"] = net
.get("provider:network_type")
1038 net
["encapsulation_type"] = net
.get("provider:network_type")
1039 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1040 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1044 def delete_network(self
, net_id
, created_items
=None):
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1051 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1053 if created_items
is None:
1057 self
._reload
_connection
()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k
, v
in created_items
.items():
1060 if not v
: # skip already deleted
1064 k_item
, _
, k_id
= k
.partition(":")
1065 if k_item
== "l2gwconn":
1066 self
.neutron
.delete_l2_gateway_connection(k_id
)
1067 except Exception as e
:
1069 "Error deleting l2 gateway connection: {}: {}".format(
1074 # delete VM ports attached to this networks before the network
1075 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1076 for p
in ports
["ports"]:
1078 self
.neutron
.delete_port(p
["id"])
1079 except Exception as e
:
1080 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1082 self
.neutron
.delete_network(net_id
)
1086 neExceptions
.ConnectionFailed
,
1087 neExceptions
.NetworkNotFoundClient
,
1088 neExceptions
.NeutronException
,
1089 ksExceptions
.ClientException
,
1090 neExceptions
.NeutronException
,
1093 self
._format
_exception
(e
)
1095 def refresh_nets_status(self
, net_list
):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1113 for net_id
in net_list
:
1117 net_vim
= self
.get_network(net_id
)
1119 if net_vim
["status"] in netStatus2manoFormat
:
1120 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1122 net
["status"] = "OTHER"
1123 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1125 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1126 net
["status"] = "DOWN"
1128 net
["vim_info"] = self
.serialize(net_vim
)
1130 if net_vim
.get("fault"): # TODO
1131 net
["error_msg"] = str(net_vim
["fault"])
1132 except vimconn
.VimConnNotFoundException
as e
:
1133 self
.logger
.error("Exception getting net status: %s", str(e
))
1134 net
["status"] = "DELETED"
1135 net
["error_msg"] = str(e
)
1136 except vimconn
.VimConnException
as e
:
1137 self
.logger
.error("Exception getting net status: %s", str(e
))
1138 net
["status"] = "VIM_ERROR"
1139 net
["error_msg"] = str(e
)
1140 net_dict
[net_id
] = net
1143 def get_flavor(self
, flavor_id
):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1148 self
._reload
_connection
()
1149 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1152 return flavor
.to_dict()
1154 nvExceptions
.NotFound
,
1155 nvExceptions
.ClientException
,
1156 ksExceptions
.ClientException
,
1159 self
._format
_exception
(e
)
1161 def get_flavor_id_from_data(self
, flavor_dict
):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1169 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1172 self
._reload
_connection
()
1173 flavor_candidate_id
= None
1174 flavor_candidate_data
= (10000, 10000, 10000)
1177 flavor_dict
["vcpus"],
1178 flavor_dict
["disk"],
1179 flavor_dict
.get("ephemeral", 0),
1180 flavor_dict
.get("swap", 0),
1183 extended
= flavor_dict
.get("extended", {})
1186 raise vimconn
.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1192 # numas = extended.get("numas")
1193 for flavor
in self
.nova
.flavors
.list():
1194 epa
= flavor
.get_keys()
1205 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1207 if flavor_data
== flavor_target
:
1211 and flavor_target
< flavor_data
< flavor_candidate_data
1213 flavor_candidate_id
= flavor
.id
1214 flavor_candidate_data
= flavor_data
1216 if not exact_match
and flavor_candidate_id
:
1217 return flavor_candidate_id
1219 raise vimconn
.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1223 nvExceptions
.NotFound
,
1224 nvExceptions
.ClientException
,
1225 ksExceptions
.ClientException
,
1228 self
._format
_exception
(e
)
1231 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1234 quota (dict): Keeping the quota of resurces
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1239 if "limit" in quota
:
1240 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1242 if "reserve" in quota
:
1243 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1245 if "shares" in quota
:
1246 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1247 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1250 def process_numa_memory(
1251 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1253 """Set the memory in extra_specs.
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1260 if not numa
.get("memory"):
1262 memory_mb
= numa
["memory"] * 1024
1263 memory
= "hw:numa_mem.{}".format(node_id
)
1264 extra_specs
[memory
] = int(memory_mb
)
1267 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1268 """Set the cpu in extra_specs.
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1275 if not numa
.get("vcpu"):
1278 cpu
= "hw:numa_cpus.{}".format(node_id
)
1279 vcpu
= ",".join(map(str, vcpu
))
1280 extra_specs
[cpu
] = vcpu
1283 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1290 threads (int) Number of virtual cpus
1293 if not numa
.get("paired-threads"):
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads
= numa
["paired-threads"] * 2
1298 extra_specs
["hw:cpu_thread_policy"] = "require"
1299 extra_specs
["hw:cpu_policy"] = "dedicated"
1303 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1304 """Fill up extra_specs if numa has cores.
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1310 cores (int) Number of virtual cpus
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa
.get("cores"):
1317 cores
= numa
["cores"]
1318 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs
["hw:cpu_policy"] = "dedicated"
1323 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1324 """Fill up extra_specs if numa has threads.
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1330 threads (int) Number of virtual cpus
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa
.get("threads"):
1336 threads
= numa
["threads"]
1337 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs
["hw:cpu_policy"] = "dedicated"
1341 def _process_numa_parameters_of_flavor(
1342 self
, numas
: List
, extra_specs
: Dict
1344 """Process numa parameters and fill up extra_specs.
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1351 numa_nodes
= len(numas
)
1352 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1353 cpu_cores
, cpu_threads
= 0, 0
1355 if self
.vim_type
== "VIO":
1356 extra_specs
["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1357 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1361 node_id
= numa
["id"]
1362 # overwrite ram and vcpus
1363 # check if key "memory" is present in numa else use ram value at flavor
1364 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1365 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1367 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1368 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1370 if "paired-threads" in numa
:
1371 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1372 cpu_threads
+= threads
1374 elif "cores" in numa
:
1375 cores
= self
.process_numa_cores(numa
, extra_specs
)
1378 elif "threads" in numa
:
1379 threads
= self
.process_numa_threads(numa
, extra_specs
)
1380 cpu_threads
+= threads
1383 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1385 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1387 def _change_flavor_name(
1388 self
, name
: str, name_suffix
: int, flavor_data
: dict
1390 """Change the flavor name if the name already exists.
1393 name (str): Flavor name to be checked
1394 name_suffix (int): Suffix to be appended to name
1395 flavor_data (dict): Flavor dict
1398 name (str): New flavor name to be used
1402 fl
= self
.nova
.flavors
.list()
1403 fl_names
= [f
.name
for f
in fl
]
1405 while name
in fl_names
:
1407 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1411 def _process_extended_config_of_flavor(
1412 self
, extended
: dict, extra_specs
: dict
1414 """Process the extended dict to fill up extra_specs.
1417 extended (dict): Keeping the extra specification of flavor
1418 extra_specs (dict) Dict to be filled to be used during flavor creation
1423 "mem-quota": "memory",
1425 "disk-io-quota": "disk_io",
1433 "PREFER_LARGE": "any",
1437 "cpu-pinning-policy": "hw:cpu_policy",
1438 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1439 "mem-policy": "hw:numa_mempolicy",
1442 numas
= extended
.get("numas")
1444 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1446 for quota
, item
in quotas
.items():
1447 if quota
in extended
.keys():
1448 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1450 # Set the mempage size as specified in the descriptor
1451 if extended
.get("mempage-size"):
1452 if extended
["mempage-size"] in page_sizes
.keys():
1453 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1455 # Normally, validations in NBI should not allow to this condition.
1457 "Invalid mempage-size %s. Will be ignored",
1458 extended
.get("mempage-size"),
1461 for policy
, hw_policy
in policies
.items():
1462 if extended
.get(policy
):
1463 extra_specs
[hw_policy
] = extended
[policy
].lower()
1466 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1467 """Returns the details of flavor
1469 flavor_data (dict): Dictionary that includes required flavor details
1472 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1476 flavor_data
.get("ram", 64),
1477 flavor_data
.get("vcpus", 1),
1479 flavor_data
.get("extended"),
1482 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1483 """Adds a tenant flavor to openstack VIM.
1484 if change_name_if_used is True, it will change name in case of conflict,
1485 because it is not supported name repetition.
1488 flavor_data (dict): Flavor details to be processed
1489 change_name_if_used (bool): Change name in case of conflict
1492 flavor_id (str): flavor identifier
1495 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1501 name
= flavor_data
["name"]
1502 while retry
< max_retries
:
1505 self
._reload
_connection
()
1507 if change_name_if_used
:
1508 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1510 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1514 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1518 new_flavor
= self
.nova
.flavors
.create(
1522 disk
=flavor_data
.get("disk", 0),
1523 ephemeral
=flavor_data
.get("ephemeral", 0),
1524 swap
=flavor_data
.get("swap", 0),
1525 is_public
=flavor_data
.get("is_public", True),
1530 new_flavor
.set_keys(extra_specs
)
1532 return new_flavor
.id
1534 except nvExceptions
.Conflict
as e
:
1535 if change_name_if_used
and retry
< max_retries
:
1538 self
._format
_exception
(e
)
1541 ksExceptions
.ClientException
,
1542 nvExceptions
.ClientException
,
1546 self
._format
_exception
(e
)
1548 def delete_flavor(self
, flavor_id
):
1549 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1551 self
._reload
_connection
()
1552 self
.nova
.flavors
.delete(flavor_id
)
1555 # except nvExceptions.BadRequest as e:
1557 nvExceptions
.NotFound
,
1558 ksExceptions
.ClientException
,
1559 nvExceptions
.ClientException
,
1562 self
._format
_exception
(e
)
1564 def new_image(self
, image_dict
):
1566 Adds a tenant image to VIM. imge_dict is a dictionary with:
1568 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1569 location: path or URI
1570 public: "yes" or "no"
1571 metadata: metadata of the image
1572 Returns the image_id
1577 while retry
< max_retries
:
1580 self
._reload
_connection
()
1582 # determine format http://docs.openstack.org/developer/glance/formats.html
1583 if "disk_format" in image_dict
:
1584 disk_format
= image_dict
["disk_format"]
1585 else: # autodiscover based on extension
1586 if image_dict
["location"].endswith(".qcow2"):
1587 disk_format
= "qcow2"
1588 elif image_dict
["location"].endswith(".vhd"):
1590 elif image_dict
["location"].endswith(".vmdk"):
1591 disk_format
= "vmdk"
1592 elif image_dict
["location"].endswith(".vdi"):
1594 elif image_dict
["location"].endswith(".iso"):
1596 elif image_dict
["location"].endswith(".aki"):
1598 elif image_dict
["location"].endswith(".ari"):
1600 elif image_dict
["location"].endswith(".ami"):
1606 "new_image: '%s' loading from '%s'",
1608 image_dict
["location"],
1610 if self
.vim_type
== "VIO":
1611 container_format
= "bare"
1612 if "container_format" in image_dict
:
1613 container_format
= image_dict
["container_format"]
1615 new_image
= self
.glance
.images
.create(
1616 name
=image_dict
["name"],
1617 container_format
=container_format
,
1618 disk_format
=disk_format
,
1621 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1623 if image_dict
["location"].startswith("http"):
1624 # TODO there is not a method to direct download. It must be downloaded locally with requests
1625 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1627 with
open(image_dict
["location"]) as fimage
:
1628 self
.glance
.images
.upload(new_image
.id, fimage
)
1629 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1630 # image_dict.get("public","yes")=="yes",
1631 # container_format="bare", data=fimage, disk_format=disk_format)
1633 metadata_to_load
= image_dict
.get("metadata")
1635 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1637 if self
.vim_type
== "VIO":
1638 metadata_to_load
["upload_location"] = image_dict
["location"]
1640 metadata_to_load
["location"] = image_dict
["location"]
1642 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1646 nvExceptions
.Conflict
,
1647 ksExceptions
.ClientException
,
1648 nvExceptions
.ClientException
,
1650 self
._format
_exception
(e
)
1653 gl1Exceptions
.HTTPException
,
1654 gl1Exceptions
.CommunicationError
,
1657 if retry
== max_retries
:
1660 self
._format
_exception
(e
)
1661 except IOError as e
: # can not open the file
1662 raise vimconn
.VimConnConnectionException(
1663 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1664 http_code
=vimconn
.HTTP_Bad_Request
,
1667 def delete_image(self
, image_id
):
1668 """Deletes a tenant image from openstack VIM. Returns the old id"""
1670 self
._reload
_connection
()
1671 self
.glance
.images
.delete(image_id
)
1675 nvExceptions
.NotFound
,
1676 ksExceptions
.ClientException
,
1677 nvExceptions
.ClientException
,
1678 gl1Exceptions
.CommunicationError
,
1679 gl1Exceptions
.HTTPNotFound
,
1681 ) as e
: # TODO remove
1682 self
._format
_exception
(e
)
1684 def get_image_id_from_path(self
, path
):
1685 """Get the image id from image path in the VIM database. Returns the image_id"""
1687 self
._reload
_connection
()
1688 images
= self
.glance
.images
.list()
1690 for image
in images
:
1691 if image
.metadata
.get("location") == path
:
1694 raise vimconn
.VimConnNotFoundException(
1695 "image with location '{}' not found".format(path
)
1698 ksExceptions
.ClientException
,
1699 nvExceptions
.ClientException
,
1700 gl1Exceptions
.CommunicationError
,
1703 self
._format
_exception
(e
)
1705 def get_image_list(self
, filter_dict
={}):
1706 """Obtain tenant images from VIM
1710 checksum: image checksum
1711 Returns the image list of dictionaries:
1712 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1715 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1718 self
._reload
_connection
()
1719 # filter_dict_os = filter_dict.copy()
1720 # First we filter by the available filter fields: name, id. The others are removed.
1721 image_list
= self
.glance
.images
.list()
1724 for image
in image_list
:
1726 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1729 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1733 filter_dict
.get("checksum")
1734 and image
["checksum"] != filter_dict
["checksum"]
1738 filtered_list
.append(image
.copy())
1739 except gl1Exceptions
.HTTPNotFound
:
1742 return filtered_list
1744 ksExceptions
.ClientException
,
1745 nvExceptions
.ClientException
,
1746 gl1Exceptions
.CommunicationError
,
1749 self
._format
_exception
(e
)
1751 def __wait_for_vm(self
, vm_id
, status
):
1752 """wait until vm is in the desired status and return True.
1753 If the VM gets in ERROR status, return false.
1754 If the timeout is reached generate an exception"""
1756 while elapsed_time
< server_timeout
:
1757 vm_status
= self
.nova
.servers
.get(vm_id
).status
1759 if vm_status
== status
:
1762 if vm_status
== "ERROR":
1768 # if we exceeded the timeout rollback
1769 if elapsed_time
>= server_timeout
:
1770 raise vimconn
.VimConnException(
1771 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1772 http_code
=vimconn
.HTTP_Request_Timeout
,
1775 def _get_openstack_availablity_zones(self
):
1777 Get from openstack availability zones available
1781 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1782 openstack_availability_zone
= [
1784 for zone
in openstack_availability_zone
1785 if zone
.zoneName
!= "internal"
1788 return openstack_availability_zone
1792 def _set_availablity_zones(self
):
1794 Set vim availablity zone
1797 if "availability_zone" in self
.config
:
1798 vim_availability_zones
= self
.config
.get("availability_zone")
1800 if isinstance(vim_availability_zones
, str):
1801 self
.availability_zone
= [vim_availability_zones
]
1802 elif isinstance(vim_availability_zones
, list):
1803 self
.availability_zone
= vim_availability_zones
1805 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1807 def _get_vm_availability_zone(
1808 self
, availability_zone_index
, availability_zone_list
1811 Return thge availability zone to be used by the created VM.
1812 :return: The VIM availability zone to be used or None
1814 if availability_zone_index
is None:
1815 if not self
.config
.get("availability_zone"):
1817 elif isinstance(self
.config
.get("availability_zone"), str):
1818 return self
.config
["availability_zone"]
1820 # TODO consider using a different parameter at config for default AV and AV list match
1821 return self
.config
["availability_zone"][0]
1823 vim_availability_zones
= self
.availability_zone
1824 # check if VIM offer enough availability zones describe in the VNFD
1825 if vim_availability_zones
and len(availability_zone_list
) <= len(
1826 vim_availability_zones
1828 # check if all the names of NFV AV match VIM AV names
1829 match_by_index
= False
1830 for av
in availability_zone_list
:
1831 if av
not in vim_availability_zones
:
1832 match_by_index
= True
1836 return vim_availability_zones
[availability_zone_index
]
1838 return availability_zone_list
[availability_zone_index
]
1840 raise vimconn
.VimConnConflictException(
1841 "No enough availability zones at VIM for this deployment"
1844 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1845 """Fill up the security_groups in the port_dict.
1848 net (dict): Network details
1849 port_dict (dict): Port details
1853 self
.config
.get("security_groups")
1854 and net
.get("port_security") is not False
1855 and not self
.config
.get("no_port_security_extension")
1857 if not self
.security_groups_id
:
1858 self
._get
_ids
_from
_name
()
1860 port_dict
["security_groups"] = self
.security_groups_id
1862 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1863 """Fill up the network binding depending on network type in the port_dict.
1866 net (dict): Network details
1867 port_dict (dict): Port details
1870 if not net
.get("type"):
1871 raise vimconn
.VimConnException("Type is missing in the network details.")
1873 if net
["type"] == "virtual":
1877 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1878 port_dict
["binding:vnic_type"] = "direct"
1880 # VIO specific Changes
1881 if self
.vim_type
== "VIO":
1882 # Need to create port with port_security_enabled = False and no-security-groups
1883 port_dict
["port_security_enabled"] = False
1884 port_dict
["provider_security_groups"] = []
1885 port_dict
["security_groups"] = []
1888 # For PT PCI-PASSTHROUGH
1889 port_dict
["binding:vnic_type"] = "direct-physical"
1892 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1893 """Set the "ip" parameter in net dictionary.
1896 new_port (dict): New created port
1897 net (dict): Network details
1900 fixed_ips
= new_port
["port"].get("fixed_ips")
1903 net
["ip"] = fixed_ips
[0].get("ip_address")
1908 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1909 """Fill up the mac_address and fixed_ips in port_dict.
1912 net (dict): Network details
1913 port_dict (dict): Port details
1916 if net
.get("mac_address"):
1917 port_dict
["mac_address"] = net
["mac_address"]
1919 if net
.get("ip_address"):
1920 port_dict
["fixed_ips"] = [{"ip_address": net
["ip_address"]}]
1921 # TODO add "subnet_id": <subnet_id>
1923 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
1924 """Create new port using neutron.
1927 port_dict (dict): Port details
1928 created_items (dict): All created items
1929 net (dict): Network details
1932 new_port (dict): New created port
1935 new_port
= self
.neutron
.create_port({"port": port_dict
})
1936 created_items
["port:" + str(new_port
["port"]["id"])] = True
1937 net
["mac_adress"] = new_port
["port"]["mac_address"]
1938 net
["vim_id"] = new_port
["port"]["id"]
1943 self
, net
: dict, name
: str, created_items
: dict
1944 ) -> Tuple
[dict, dict]:
1945 """Create port using net details.
1948 net (dict): Network details
1949 name (str): Name to be used as network name if net dict does not include name
1950 created_items (dict): All created items
1953 new_port, port New created port, port dictionary
1958 "network_id": net
["net_id"],
1959 "name": net
.get("name"),
1960 "admin_state_up": True,
1963 if not port_dict
["name"]:
1964 port_dict
["name"] = name
1966 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
1968 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
1970 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
1972 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
1974 vimconnector
._set
_fixed
_ip
(new_port
, net
)
1976 port
= {"port-id": new_port
["port"]["id"]}
1978 if float(self
.nova
.api_version
.get_string()) >= 2.32:
1979 port
["tag"] = new_port
["port"]["name"]
1981 return new_port
, port
1983 def _prepare_network_for_vminstance(
1987 created_items
: dict,
1989 external_network
: list,
1990 no_secured_ports
: list,
1992 """Create port and fill up net dictionary for new VM instance creation.
1995 name (str): Name of network
1996 net_list (list): List of networks
1997 created_items (dict): All created items belongs to a VM
1998 net_list_vim (list): List of ports
1999 external_network (list): List of external-networks
2000 no_secured_ports (list): Port security disabled ports
2003 self
._reload
_connection
()
2005 for net
in net_list
:
2006 # Skip non-connected iface
2007 if not net
.get("net_id"):
2010 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2012 net_list_vim
.append(port
)
2014 if net
.get("floating_ip", False):
2015 net
["exit_on_floating_ip_error"] = True
2016 external_network
.append(net
)
2018 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2019 net
["exit_on_floating_ip_error"] = False
2020 external_network
.append(net
)
2021 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2023 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2024 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2025 if net
.get("port_security") is False and not self
.config
.get(
2026 "no_port_security_extension"
2028 no_secured_ports
.append(
2030 new_port
["port"]["id"],
2031 net
.get("port_security_disable_strategy"),
2035 def _prepare_persistent_root_volumes(
2040 base_disk_index
: int,
2041 block_device_mapping
: dict,
2042 existing_vim_volumes
: list,
2043 created_items
: dict,
2045 """Prepare persistent root volumes for new VM instance.
2048 name (str): Name of VM instance
2049 vm_av_zone (list): List of availability zones
2050 disk (dict): Disk details
2051 base_disk_index (int): Disk index
2052 block_device_mapping (dict): Block device details
2053 existing_vim_volumes (list): Existing disk details
2054 created_items (dict): All created items belongs to VM
2057 boot_volume_id (str): ID of boot volume
2060 # Disk may include only vim_volume_id or only vim_id."
2061 # Use existing persistent root volume finding with volume_id or vim_id
2062 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2064 if disk
.get(key_id
):
2065 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2066 existing_vim_volumes
.append({"id": disk
[key_id
]})
2069 # Create persistent root volume
2070 volume
= self
.cinder
.volumes
.create(
2072 name
=name
+ "vd" + chr(base_disk_index
),
2073 imageRef
=disk
["image_id"],
2074 # Make sure volume is in the same AZ as the VM to be attached to
2075 availability_zone
=vm_av_zone
,
2077 boot_volume_id
= volume
.id
2078 created_items
["volume:" + str(volume
.id)] = True
2079 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2081 return boot_volume_id
2083 def _prepare_non_root_persistent_volumes(
2088 block_device_mapping
: dict,
2089 base_disk_index
: int,
2090 existing_vim_volumes
: list,
2091 created_items
: dict,
2093 """Prepare persistent volumes for new VM instance.
2096 name (str): Name of VM instance
2097 disk (dict): Disk details
2098 vm_av_zone (list): List of availability zones
2099 block_device_mapping (dict): Block device details
2100 base_disk_index (int): Disk index
2101 existing_vim_volumes (list): Existing disk details
2102 created_items (dict): All created items belongs to VM
2104 # Non-root persistent volumes
2105 # Disk may include only vim_volume_id or only vim_id."
2106 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2108 if disk
.get(key_id
):
2109 # Use existing persistent volume
2110 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2111 existing_vim_volumes
.append({"id": disk
[key_id
]})
2114 # Create persistent volume
2115 volume
= self
.cinder
.volumes
.create(
2117 name
=name
+ "vd" + chr(base_disk_index
),
2118 # Make sure volume is in the same AZ as the VM to be attached to
2119 availability_zone
=vm_av_zone
,
2121 created_items
["volume:" + str(volume
.id)] = True
2122 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2124 def _wait_for_created_volumes_availability(
2125 self
, elapsed_time
: int, created_items
: dict
2127 """Wait till created volumes become available.
2130 elapsed_time (int): Passed time while waiting
2131 created_items (dict): All created items belongs to VM
2134 elapsed_time (int): Time spent while waiting
2138 while elapsed_time
< volume_timeout
:
2139 for created_item
in created_items
:
2140 v
, _
, volume_id
= created_item
.partition(":")
2142 if self
.cinder
.volumes
.get(volume_id
).status
!= "available":
2145 # All ready: break from while
2153 def _wait_for_existing_volumes_availability(
2154 self
, elapsed_time
: int, existing_vim_volumes
: list
2156 """Wait till existing volumes become available.
2159 elapsed_time (int): Passed time while waiting
2160 existing_vim_volumes (list): Existing volume details
2163 elapsed_time (int): Time spent while waiting
2167 while elapsed_time
< volume_timeout
:
2168 for volume
in existing_vim_volumes
:
2169 if self
.cinder
.volumes
.get(volume
["id"]).status
!= "available":
2171 else: # all ready: break from while
2179 def _prepare_disk_for_vminstance(
2182 existing_vim_volumes
: list,
2183 created_items
: dict,
2185 disk_list
: list = None,
2187 """Prepare all volumes for new VM instance.
2190 name (str): Name of Instance
2191 existing_vim_volumes (list): List of existing volumes
2192 created_items (dict): All created items belongs to VM
2193 vm_av_zone (list): VM availability zone
2194 disk_list (list): List of disks
2197 # Create additional volumes in case these are present in disk_list
2198 base_disk_index
= ord("b")
2199 boot_volume_id
= None
2202 block_device_mapping
= {}
2203 for disk
in disk_list
:
2204 if "image_id" in disk
:
2205 # Root persistent volume
2206 base_disk_index
= ord("a")
2207 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2209 vm_av_zone
=vm_av_zone
,
2211 base_disk_index
=base_disk_index
,
2212 block_device_mapping
=block_device_mapping
,
2213 existing_vim_volumes
=existing_vim_volumes
,
2214 created_items
=created_items
,
2217 # Non-root persistent volume
2218 self
._prepare
_non
_root
_persistent
_volumes
(
2221 vm_av_zone
=vm_av_zone
,
2222 block_device_mapping
=block_device_mapping
,
2223 base_disk_index
=base_disk_index
,
2224 existing_vim_volumes
=existing_vim_volumes
,
2225 created_items
=created_items
,
2227 base_disk_index
+= 1
2229 # Wait until created volumes are with status available
2230 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2231 elapsed_time
, created_items
2233 # Wait until existing volumes in vim are with status available
2234 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2235 elapsed_time
, existing_vim_volumes
2237 # If we exceeded the timeout rollback
2238 if elapsed_time
>= volume_timeout
:
2239 raise vimconn
.VimConnException(
2240 "Timeout creating volumes for instance " + name
,
2241 http_code
=vimconn
.HTTP_Request_Timeout
,
2244 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2246 def _find_the_external_network_for_floating_ip(self
):
2247 """Get the external network ip in order to create floating IP.
2250 pool_id (str): External network pool ID
2254 # Find the external network
2255 external_nets
= list()
2257 for net
in self
.neutron
.list_networks()["networks"]:
2258 if net
["router:external"]:
2259 external_nets
.append(net
)
2261 if len(external_nets
) == 0:
2262 raise vimconn
.VimConnException(
2263 "Cannot create floating_ip automatically since "
2264 "no external network is present",
2265 http_code
=vimconn
.HTTP_Conflict
,
2268 if len(external_nets
) > 1:
2269 raise vimconn
.VimConnException(
2270 "Cannot create floating_ip automatically since "
2271 "multiple external networks are present",
2272 http_code
=vimconn
.HTTP_Conflict
,
2276 return external_nets
[0].get("id")
2278 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2279 """Trigger neutron to create a new floating IP using external network ID.
2282 param (dict): Input parameters to create a floating IP
2283 created_items (dict): All created items belongs to new VM instance
2290 self
.logger
.debug("Creating floating IP")
2291 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2292 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2293 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2295 except Exception as e
:
2296 raise vimconn
.VimConnException(
2297 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2298 http_code
=vimconn
.HTTP_Conflict
,
2301 def _create_floating_ip(
2302 self
, floating_network
: dict, server
: object, created_items
: dict
2304 """Get the available Pool ID and create a new floating IP.
2307 floating_network (dict): Dict including external network ID
2308 server (object): Server object
2309 created_items (dict): All created items belongs to new VM instance
2313 # Pool_id is available
2315 isinstance(floating_network
["floating_ip"], str)
2316 and floating_network
["floating_ip"].lower() != "true"
2318 pool_id
= floating_network
["floating_ip"]
2322 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2326 "floating_network_id": pool_id
,
2327 "tenant_id": server
.tenant_id
,
2331 self
._neutron
_create
_float
_ip
(param
, created_items
)
2333 def _find_floating_ip(
2337 floating_network
: dict,
2339 """Find the available free floating IPs if there are.
2342 server (object): Server object
2343 floating_ips (list): List of floating IPs
2344 floating_network (dict): Details of floating network such as ID
2347 free_floating_ip (str): Free floating ip address
2350 for fip
in floating_ips
:
2351 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2354 if isinstance(floating_network
["floating_ip"], str):
2355 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2360 def _assign_floating_ip(
2361 self
, free_floating_ip
: str, floating_network
: dict
2363 """Assign the free floating ip address to port.
2366 free_floating_ip (str): Floating IP to be assigned
2367 floating_network (dict): ID of floating network
2370 fip (dict) (dict): Floating ip details
2373 # The vim_id key contains the neutron.port_id
2374 self
.neutron
.update_floatingip(
2376 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2378 # For race condition ensure not re-assigned to other VM after 5 seconds
2381 return self
.neutron
.show_floatingip(free_floating_ip
)
2383 def _get_free_floating_ip(
2384 self
, server
: object, floating_network
: dict
2386 """Get the free floating IP address.
2389 server (object): Server Object
2390 floating_network (dict): Floating network details
2393 free_floating_ip (str): Free floating ip addr
2397 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2400 random
.shuffle(floating_ips
)
2402 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2404 def _prepare_external_network_for_vminstance(
2406 external_network
: list,
2408 created_items
: dict,
2409 vm_start_time
: float,
2411 """Assign floating IP address for VM instance.
2414 external_network (list): ID of External network
2415 server (object): Server Object
2416 created_items (dict): All created items belongs to new VM instance
2417 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2423 for floating_network
in external_network
:
2426 floating_ip_retries
= 3
2427 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2430 free_floating_ip
= self
._get
_free
_floating
_ip
(
2431 server
, floating_network
2434 if not free_floating_ip
:
2435 self
._create
_floating
_ip
(
2436 floating_network
, server
, created_items
2440 # For race condition ensure not already assigned
2441 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2443 if fip
["floatingip"].get("port_id"):
2446 # Assign floating ip
2447 fip
= self
._assign
_floating
_ip
(
2448 free_floating_ip
, floating_network
2451 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2452 self
.logger
.warning(
2453 "floating_ip {} re-assigned to other port".format(
2460 "Assigned floating_ip {} to VM {}".format(
2461 free_floating_ip
, server
.id
2467 except Exception as e
:
2468 # Openstack need some time after VM creation to assign an IP. So retry if fails
2469 vm_status
= self
.nova
.servers
.get(server
.id).status
2471 if vm_status
not in ("ACTIVE", "ERROR"):
2472 if time
.time() - vm_start_time
< server_timeout
:
2475 elif floating_ip_retries
> 0:
2476 floating_ip_retries
-= 1
2479 raise vimconn
.VimConnException(
2480 "Cannot create floating_ip: {} {}".format(
2483 http_code
=vimconn
.HTTP_Conflict
,
2486 except Exception as e
:
2487 if not floating_network
["exit_on_floating_ip_error"]:
2488 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2493 def _update_port_security_for_vminstance(
2495 no_secured_ports
: list,
2498 """Updates the port security according to no_secured_ports list.
2501 no_secured_ports (list): List of ports that security will be disabled
2502 server (object): Server Object
2508 # Wait until the VM is active and then disable the port-security
2509 if no_secured_ports
:
2510 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2512 for port
in no_secured_ports
:
2514 "port": {"port_security_enabled": False, "security_groups": None}
2517 if port
[1] == "allow-address-pairs":
2519 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2523 self
.neutron
.update_port(port
[0], port_update
)
2526 raise vimconn
.VimConnException(
2527 "It was not possible to disable port security for port {}".format(
2539 affinity_group_list
: list,
2543 availability_zone_index
=None,
2544 availability_zone_list
=None,
2546 """Adds a VM instance to VIM.
2549 name (str): name of VM
2550 description (str): description
2551 start (bool): indicates if VM must start or boot in pause mode. Ignored
2552 image_id (str) image uuid
2553 flavor_id (str) flavor uuid
2554 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2555 net_list (list): list of interfaces, each one is a dictionary with:
2556 name: name of network
2557 net_id: network uuid to connect
2558 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2559 model: interface model, ignored #TODO
2560 mac_address: used for SR-IOV ifaces #TODO for other types
2561 use: 'data', 'bridge', 'mgmt'
2562 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2563 vim_id: filled/added by this function
2564 floating_ip: True/False (or it can be None)
2565 port_security: True/False
2566 cloud_config (dict): (optional) dictionary with:
2567 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2568 users: (optional) list of users to be inserted, each item is a dict with:
2569 name: (mandatory) user name,
2570 key-pairs: (optional) list of strings with the public key to be inserted to the user
2571 user-data: (optional) string is a text script to be passed directly to cloud-init
2572 config-files: (optional). List of files to be transferred. Each item is a dict with:
2573 dest: (mandatory) string with the destination absolute path
2574 encoding: (optional, by default text). Can be one of:
2575 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2576 content : (mandatory) string with the content of the file
2577 permissions: (optional) string with file permissions, typically octal notation '0644'
2578 owner: (optional) file owner, string with the format 'owner:group'
2579 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2580 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2581 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2582 size: (mandatory) string with the size of the disk in GB
2583 vim_id: (optional) should use this existing volume id
2584 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2585 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2586 availability_zone_index is None
2587 #TODO ip, security groups
2590 A tuple with the instance identifier and created_items or raises an exception on error
2591 created_items can be None or a dictionary where this method can include key-values that will be passed to
2592 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2593 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2597 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2607 # list of external networks to be connected to instance, later on used to create floating_ip
2608 external_network
= []
2609 # List of ports with port-security disabled
2610 no_secured_ports
= []
2611 block_device_mapping
= None
2612 existing_vim_volumes
= []
2613 server_group_id
= None
2614 scheduller_hints
= {}
2616 # Check the Openstack Connection
2617 self
._reload
_connection
()
2619 # Prepare network list
2620 self
._prepare
_network
_for
_vminstance
(
2623 created_items
=created_items
,
2624 net_list_vim
=net_list_vim
,
2625 external_network
=external_network
,
2626 no_secured_ports
=no_secured_ports
,
2630 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2632 # Get availability Zone
2633 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2634 availability_zone_index
, availability_zone_list
2639 self
._prepare
_disk
_for
_vminstance
(
2641 existing_vim_volumes
=existing_vim_volumes
,
2642 created_items
=created_items
,
2643 vm_av_zone
=vm_av_zone
,
2644 disk_list
=disk_list
,
2647 if affinity_group_list
:
2648 # Only first id on the list will be used. Openstack restriction
2649 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2650 scheduller_hints
["group"] = server_group_id
2653 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2654 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2655 "block_device_mapping={}, server_group={})".format(
2660 self
.config
.get("security_groups"),
2662 self
.config
.get("keypair"),
2665 block_device_mapping
,
2671 server
= self
.nova
.servers
.create(
2676 security_groups
=self
.config
.get("security_groups"),
2677 # TODO remove security_groups in future versions. Already at neutron port
2678 availability_zone
=vm_av_zone
,
2679 key_name
=self
.config
.get("keypair"),
2681 config_drive
=config_drive
,
2682 block_device_mapping
=block_device_mapping
,
2683 scheduler_hints
=scheduller_hints
,
2686 vm_start_time
= time
.time()
2688 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2690 self
._prepare
_external
_network
_for
_vminstance
(
2691 external_network
=external_network
,
2693 created_items
=created_items
,
2694 vm_start_time
=vm_start_time
,
2697 return server
.id, created_items
2699 except Exception as e
:
2702 server_id
= server
.id
2705 self
.delete_vminstance(server_id
, created_items
)
2707 except Exception as e2
:
2708 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2710 self
._format
_exception
(e
)
2712 def get_vminstance(self
, vm_id
):
2713 """Returns the VM instance information from VIM"""
2714 # self.logger.debug("Getting VM from VIM")
2716 self
._reload
_connection
()
2717 server
= self
.nova
.servers
.find(id=vm_id
)
2718 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2720 return server
.to_dict()
2722 ksExceptions
.ClientException
,
2723 nvExceptions
.ClientException
,
2724 nvExceptions
.NotFound
,
2727 self
._format
_exception
(e
)
2729 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2731 Get a console for the virtual machine
2733 vm_id: uuid of the VM
2734 console_type, can be:
2735 "novnc" (by default), "xvpvnc" for VNC types,
2736 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2737 Returns dict with the console parameters:
2738 protocol: ssh, ftp, http, https, ...
2739 server: usually ip address
2740 port: the http, ssh, ... port
2741 suffix: extra text, e.g. the http path and query string
2743 self
.logger
.debug("Getting VM CONSOLE from VIM")
2746 self
._reload
_connection
()
2747 server
= self
.nova
.servers
.find(id=vm_id
)
2749 if console_type
is None or console_type
== "novnc":
2750 console_dict
= server
.get_vnc_console("novnc")
2751 elif console_type
== "xvpvnc":
2752 console_dict
= server
.get_vnc_console(console_type
)
2753 elif console_type
== "rdp-html5":
2754 console_dict
= server
.get_rdp_console(console_type
)
2755 elif console_type
== "spice-html5":
2756 console_dict
= server
.get_spice_console(console_type
)
2758 raise vimconn
.VimConnException(
2759 "console type '{}' not allowed".format(console_type
),
2760 http_code
=vimconn
.HTTP_Bad_Request
,
2763 console_dict1
= console_dict
.get("console")
2766 console_url
= console_dict1
.get("url")
2770 protocol_index
= console_url
.find("//")
2772 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2775 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2780 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2782 -vimconn
.HTTP_Internal_Server_Error
,
2783 "Unexpected response from VIM",
2787 "protocol": console_url
[0:protocol_index
],
2788 "server": console_url
[protocol_index
+ 2 : port_index
],
2789 "port": console_url
[port_index
:suffix_index
],
2790 "suffix": console_url
[suffix_index
+ 1 :],
2795 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2797 nvExceptions
.NotFound
,
2798 ksExceptions
.ClientException
,
2799 nvExceptions
.ClientException
,
2800 nvExceptions
.BadRequest
,
2803 self
._format
_exception
(e
)
2805 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2806 """Neutron delete ports by id.
2808 k_id (str): Port id in the VIM
2811 port_dict
= self
.neutron
.list_ports()
2812 existing_ports
= [port
["id"] for port
in port_dict
["ports"] if port_dict
]
2814 if k_id
in existing_ports
:
2815 self
.neutron
.delete_port(k_id
)
2817 except Exception as e
:
2818 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2820 def _delete_volumes_by_id_wth_cinder(
2821 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
2823 """Cinder delete volume by id.
2825 k (str): Full item name in created_items
2826 k_id (str): ID of floating ip in VIM
2827 volumes_to_hold (list): Volumes not to delete
2828 created_items (dict): All created items belongs to VM
2831 if k_id
in volumes_to_hold
:
2834 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
2838 self
.cinder
.volumes
.delete(k_id
)
2839 created_items
[k
] = None
2841 except Exception as e
:
2843 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
2846 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
2847 """Neutron delete floating ip by id.
2849 k (str): Full item name in created_items
2850 k_id (str): ID of floating ip in VIM
2851 created_items (dict): All created items belongs to VM
2854 self
.neutron
.delete_floatingip(k_id
)
2855 created_items
[k
] = None
2857 except Exception as e
:
2859 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
2863 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
2864 k_item
, _
, k_id
= k
.partition(":")
2867 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
2868 """Delete VM ports attached to the networks before deleting virtual machine.
2870 created_items (dict): All created items belongs to VM
2873 for k
, v
in created_items
.items():
2874 if not v
: # skip already deleted
2878 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
2879 if k_item
== "port":
2880 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
2882 except Exception as e
:
2884 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
2887 def _delete_created_items(
2888 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
2890 """Delete Volumes and floating ip if they exist in created_items."""
2891 for k
, v
in created_items
.items():
2892 if not v
: # skip already deleted
2896 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
2898 if k_item
== "volume":
2899 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
2900 k
, k_id
, volumes_to_hold
, created_items
2906 elif k_item
== "floating_ip":
2907 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
2909 except Exception as e
:
2910 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
2914 def delete_vminstance(
2915 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
2917 """Removes a VM instance from VIM. Returns the old identifier.
2919 vm_id (str): Identifier of VM instance
2920 created_items (dict): All created items belongs to VM
2921 volumes_to_hold (list): Volumes_to_hold
2923 if created_items
is None:
2925 if volumes_to_hold
is None:
2926 volumes_to_hold
= []
2929 self
._reload
_connection
()
2931 # Delete VM ports attached to the networks before the virtual machine
2933 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
2936 self
.nova
.servers
.delete(vm_id
)
2938 # Although having detached, volumes should have in active status before deleting.
2939 # We ensure in this loop
2943 while keep_waiting
and elapsed_time
< volume_timeout
:
2944 keep_waiting
= False
2946 # Delete volumes and floating IP.
2947 keep_waiting
= self
._delete
_created
_items
(
2948 created_items
, volumes_to_hold
, keep_waiting
2956 nvExceptions
.NotFound
,
2957 ksExceptions
.ClientException
,
2958 nvExceptions
.ClientException
,
2961 self
._format
_exception
(e
)
2963 def refresh_vms_status(self
, vm_list
):
2964 """Get the status of the virtual machines and their interfaces/ports
2965 Params: the list of VM identifiers
2966 Returns a dictionary with:
2967 vm_id: #VIM id of this Virtual Machine
2968 status: #Mandatory. Text with one of:
2969 # DELETED (not found at vim)
2970 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2971 # OTHER (Vim reported other status not understood)
2972 # ERROR (VIM indicates an ERROR status)
2973 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2974 # CREATING (on building process), ERROR
2975 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2977 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2978 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2980 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2981 mac_address: #Text format XX:XX:XX:XX:XX:XX
2982 vim_net_id: #network id where this interface is connected
2983 vim_interface_id: #interface/port VIM id
2984 ip_address: #null, or text with IPv4, IPv6 address
2985 compute_node: #identification of compute node where PF,VF interface is allocated
2986 pci: #PCI address of the NIC that hosts the PF,VF
2987 vlan: #physical VLAN used for VF
2991 "refresh_vms status: Getting tenant VM instance information from VIM"
2994 for vm_id
in vm_list
:
2998 vm_vim
= self
.get_vminstance(vm_id
)
3000 if vm_vim
["status"] in vmStatus2manoFormat
:
3001 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3003 vm
["status"] = "OTHER"
3004 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3006 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3007 vm_vim
.pop("user_data", None)
3008 vm
["vim_info"] = self
.serialize(vm_vim
)
3010 vm
["interfaces"] = []
3011 if vm_vim
.get("fault"):
3012 vm
["error_msg"] = str(vm_vim
["fault"])
3016 self
._reload
_connection
()
3017 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3019 for port
in port_dict
["ports"]:
3021 interface
["vim_info"] = self
.serialize(port
)
3022 interface
["mac_address"] = port
.get("mac_address")
3023 interface
["vim_net_id"] = port
["network_id"]
3024 interface
["vim_interface_id"] = port
["id"]
3025 # check if OS-EXT-SRV-ATTR:host is there,
3026 # in case of non-admin credentials, it will be missing
3028 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3029 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3031 interface
["pci"] = None
3033 # check if binding:profile is there,
3034 # in case of non-admin credentials, it will be missing
3035 if port
.get("binding:profile"):
3036 if port
["binding:profile"].get("pci_slot"):
3037 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3039 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3040 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3041 pci
= port
["binding:profile"]["pci_slot"]
3042 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3043 interface
["pci"] = pci
3045 interface
["vlan"] = None
3047 if port
.get("binding:vif_details"):
3048 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3050 # Get vlan from network in case not present in port for those old openstacks and cases where
3051 # it is needed vlan at PT
3052 if not interface
["vlan"]:
3053 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3054 network
= self
.neutron
.show_network(port
["network_id"])
3057 network
["network"].get("provider:network_type")
3060 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3061 interface
["vlan"] = network
["network"].get(
3062 "provider:segmentation_id"
3066 # look for floating ip address
3068 floating_ip_dict
= self
.neutron
.list_floatingips(
3072 if floating_ip_dict
.get("floatingips"):
3074 floating_ip_dict
["floatingips"][0].get(
3075 "floating_ip_address"
3081 for subnet
in port
["fixed_ips"]:
3082 ips
.append(subnet
["ip_address"])
3084 interface
["ip_address"] = ";".join(ips
)
3085 vm
["interfaces"].append(interface
)
3086 except Exception as e
:
3088 "Error getting vm interface information {}: {}".format(
3093 except vimconn
.VimConnNotFoundException
as e
:
3094 self
.logger
.error("Exception getting vm status: %s", str(e
))
3095 vm
["status"] = "DELETED"
3096 vm
["error_msg"] = str(e
)
3097 except vimconn
.VimConnException
as e
:
3098 self
.logger
.error("Exception getting vm status: %s", str(e
))
3099 vm
["status"] = "VIM_ERROR"
3100 vm
["error_msg"] = str(e
)
3106 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3107 """Send and action over a VM instance from VIM
3108 Returns None or the console dict if the action was successfully sent to the VIM
3110 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3113 self
._reload
_connection
()
3114 server
= self
.nova
.servers
.find(id=vm_id
)
3116 if "start" in action_dict
:
3117 if action_dict
["start"] == "rebuild":
3120 if server
.status
== "PAUSED":
3122 elif server
.status
== "SUSPENDED":
3124 elif server
.status
== "SHUTOFF":
3128 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3130 raise vimconn
.VimConnException(
3131 "Cannot 'start' instance while it is in active state",
3132 http_code
=vimconn
.HTTP_Bad_Request
,
3135 elif "pause" in action_dict
:
3137 elif "resume" in action_dict
:
3139 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3140 self
.logger
.debug("server status %s", server
.status
)
3141 if server
.status
== "ACTIVE":
3144 self
.logger
.debug("ERROR: VM is not in Active state")
3145 raise vimconn
.VimConnException(
3146 "VM is not in active state, stop operation is not allowed",
3147 http_code
=vimconn
.HTTP_Bad_Request
,
3149 elif "forceOff" in action_dict
:
3150 server
.stop() # TODO
3151 elif "terminate" in action_dict
:
3153 elif "createImage" in action_dict
:
3154 server
.create_image()
3155 # "path":path_schema,
3156 # "description":description_schema,
3157 # "name":name_schema,
3158 # "metadata":metadata_schema,
3159 # "imageRef": id_schema,
3160 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3161 elif "rebuild" in action_dict
:
3162 server
.rebuild(server
.image
["id"])
3163 elif "reboot" in action_dict
:
3164 server
.reboot() # reboot_type="SOFT"
3165 elif "console" in action_dict
:
3166 console_type
= action_dict
["console"]
3168 if console_type
is None or console_type
== "novnc":
3169 console_dict
= server
.get_vnc_console("novnc")
3170 elif console_type
== "xvpvnc":
3171 console_dict
= server
.get_vnc_console(console_type
)
3172 elif console_type
== "rdp-html5":
3173 console_dict
= server
.get_rdp_console(console_type
)
3174 elif console_type
== "spice-html5":
3175 console_dict
= server
.get_spice_console(console_type
)
3177 raise vimconn
.VimConnException(
3178 "console type '{}' not allowed".format(console_type
),
3179 http_code
=vimconn
.HTTP_Bad_Request
,
3183 console_url
= console_dict
["console"]["url"]
3185 protocol_index
= console_url
.find("//")
3187 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3190 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3195 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3196 raise vimconn
.VimConnException(
3197 "Unexpected response from VIM " + str(console_dict
)
3201 "protocol": console_url
[0:protocol_index
],
3202 "server": console_url
[protocol_index
+ 2 : port_index
],
3203 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3204 "suffix": console_url
[suffix_index
+ 1 :],
3207 return console_dict2
3209 raise vimconn
.VimConnException(
3210 "Unexpected response from VIM " + str(console_dict
)
3215 ksExceptions
.ClientException
,
3216 nvExceptions
.ClientException
,
3217 nvExceptions
.NotFound
,
3220 self
._format
_exception
(e
)
3221 # TODO insert exception vimconn.HTTP_Unauthorized
3223 # ###### VIO Specific Changes #########
3224 def _generate_vlanID(self
):
3226 Method to get unused vlanID
3234 networks
= self
.get_network_list()
3236 for net
in networks
:
3237 if net
.get("provider:segmentation_id"):
3238 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3240 used_vlanIDs
= set(usedVlanIDs
)
3242 # find unused VLAN ID
3243 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3245 start_vlanid
, end_vlanid
= map(
3246 int, vlanID_range
.replace(" ", "").split("-")
3249 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3250 if vlanID
not in used_vlanIDs
:
3252 except Exception as exp
:
3253 raise vimconn
.VimConnException(
3254 "Exception {} occurred while generating VLAN ID.".format(exp
)
3257 raise vimconn
.VimConnConflictException(
3258 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3259 self
.config
.get("dataplane_net_vlan_range")
3263 def _generate_multisegment_vlanID(self
):
3265 Method to get unused vlanID
3273 networks
= self
.get_network_list()
3274 for net
in networks
:
3275 if net
.get("provider:network_type") == "vlan" and net
.get(
3276 "provider:segmentation_id"
3278 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3279 elif net
.get("segments"):
3280 for segment
in net
.get("segments"):
3281 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3282 "provider:segmentation_id"
3284 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3286 used_vlanIDs
= set(usedVlanIDs
)
3288 # find unused VLAN ID
3289 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3291 start_vlanid
, end_vlanid
= map(
3292 int, vlanID_range
.replace(" ", "").split("-")
3295 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3296 if vlanID
not in used_vlanIDs
:
3298 except Exception as exp
:
3299 raise vimconn
.VimConnException(
3300 "Exception {} occurred while generating VLAN ID.".format(exp
)
3303 raise vimconn
.VimConnConflictException(
3304 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3305 self
.config
.get("multisegment_vlan_range")
3309 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3311 Method to validate user given vlanID ranges
3315 for vlanID_range
in input_vlan_range
:
3316 vlan_range
= vlanID_range
.replace(" ", "")
3318 vlanID_pattern
= r
"(\d)*-(\d)*$"
3319 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3321 raise vimconn
.VimConnConflictException(
3322 "Invalid VLAN range for {}: {}.You must provide "
3323 "'{}' in format [start_ID - end_ID].".format(
3324 text_vlan_range
, vlanID_range
, text_vlan_range
3328 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3329 if start_vlanid
<= 0:
3330 raise vimconn
.VimConnConflictException(
3331 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3332 "networks valid IDs are 1 to 4094 ".format(
3333 text_vlan_range
, vlanID_range
3337 if end_vlanid
> 4094:
3338 raise vimconn
.VimConnConflictException(
3339 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3340 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3341 text_vlan_range
, vlanID_range
3345 if start_vlanid
> end_vlanid
:
3346 raise vimconn
.VimConnConflictException(
3347 "Invalid VLAN range for {}: {}. You must provide '{}'"
3348 " in format start_ID - end_ID and start_ID < end_ID ".format(
3349 text_vlan_range
, vlanID_range
, text_vlan_range
3353 # NOT USED FUNCTIONS
3355 def new_external_port(self
, port_data
):
3356 """Adds a external port to VIM
3357 Returns the port identifier"""
3358 # TODO openstack if needed
3360 -vimconn
.HTTP_Internal_Server_Error
,
3361 "osconnector.new_external_port() not implemented",
3364 def connect_port_network(self
, port_id
, network_id
, admin
=False):
3365 """Connects a external port to a network
3366 Returns status code of the VIM response"""
3367 # TODO openstack if needed
3369 -vimconn
.HTTP_Internal_Server_Error
,
3370 "osconnector.connect_port_network() not implemented",
3373 def new_user(self
, user_name
, user_passwd
, tenant_id
=None):
3374 """Adds a new user to openstack VIM
3375 Returns the user identifier"""
3376 self
.logger
.debug("osconnector: Adding a new user to VIM")
3379 self
._reload
_connection
()
3380 user
= self
.keystone
.users
.create(
3381 user_name
, password
=user_passwd
, default_project
=tenant_id
3383 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3386 except ksExceptions
.ConnectionError
as e
:
3387 error_value
= -vimconn
.HTTP_Bad_Request
3391 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3393 except ksExceptions
.ClientException
as e
: # TODO remove
3394 error_value
= -vimconn
.HTTP_Bad_Request
3398 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3401 # TODO insert exception vimconn.HTTP_Unauthorized
3402 # if reaching here is because an exception
3403 self
.logger
.debug("new_user " + error_text
)
3405 return error_value
, error_text
3407 def delete_user(self
, user_id
):
3408 """Delete a user from openstack VIM
3409 Returns the user identifier"""
3411 print("osconnector: Deleting a user from VIM")
3414 self
._reload
_connection
()
3415 self
.keystone
.users
.delete(user_id
)
3418 except ksExceptions
.ConnectionError
as e
:
3419 error_value
= -vimconn
.HTTP_Bad_Request
3423 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3425 except ksExceptions
.NotFound
as e
:
3426 error_value
= -vimconn
.HTTP_Not_Found
3430 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3432 except ksExceptions
.ClientException
as e
: # TODO remove
3433 error_value
= -vimconn
.HTTP_Bad_Request
3437 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3440 # TODO insert exception vimconn.HTTP_Unauthorized
3441 # if reaching here is because an exception
3442 self
.logger
.debug("delete_tenant " + error_text
)
3444 return error_value
, error_text
3446 def get_hosts_info(self
):
3447 """Get the information of deployed hosts
3448 Returns the hosts content"""
3450 print("osconnector: Getting Host info from VIM")
3454 self
._reload
_connection
()
3455 hypervisors
= self
.nova
.hypervisors
.list()
3457 for hype
in hypervisors
:
3458 h_list
.append(hype
.to_dict())
3460 return 1, {"hosts": h_list
}
3461 except nvExceptions
.NotFound
as e
:
3462 error_value
= -vimconn
.HTTP_Not_Found
3463 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3464 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3465 error_value
= -vimconn
.HTTP_Bad_Request
3469 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3472 # TODO insert exception vimconn.HTTP_Unauthorized
3473 # if reaching here is because an exception
3474 self
.logger
.debug("get_hosts_info " + error_text
)
3476 return error_value
, error_text
3478 def get_hosts(self
, vim_tenant
):
3479 """Get the hosts and deployed instances
3480 Returns the hosts content"""
3481 r
, hype_dict
= self
.get_hosts_info()
3486 hypervisors
= hype_dict
["hosts"]
3489 servers
= self
.nova
.servers
.list()
3490 for hype
in hypervisors
:
3491 for server
in servers
:
3493 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3494 == hype
["hypervisor_hostname"]
3497 hype
["vm"].append(server
.id)
3499 hype
["vm"] = [server
.id]
3502 except nvExceptions
.NotFound
as e
:
3503 error_value
= -vimconn
.HTTP_Not_Found
3504 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3505 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3506 error_value
= -vimconn
.HTTP_Bad_Request
3510 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3513 # TODO insert exception vimconn.HTTP_Unauthorized
3514 # if reaching here is because an exception
3515 self
.logger
.debug("get_hosts " + error_text
)
3517 return error_value
, error_text
3519 def new_classification(self
, name
, ctype
, definition
):
3521 "Adding a new (Traffic) Classification to VIM, named %s", name
3526 self
._reload
_connection
()
3528 if ctype
not in supportedClassificationTypes
:
3529 raise vimconn
.VimConnNotSupportedException(
3530 "OpenStack VIM connector does not support provided "
3531 "Classification Type {}, supported ones are: {}".format(
3532 ctype
, supportedClassificationTypes
3536 if not self
._validate
_classification
(ctype
, definition
):
3537 raise vimconn
.VimConnException(
3538 "Incorrect Classification definition for the type specified."
3541 classification_dict
= definition
3542 classification_dict
["name"] = name
3543 new_class
= self
.neutron
.create_sfc_flow_classifier(
3544 {"flow_classifier": classification_dict
}
3547 return new_class
["flow_classifier"]["id"]
3549 neExceptions
.ConnectionFailed
,
3550 ksExceptions
.ClientException
,
3551 neExceptions
.NeutronException
,
3554 self
.logger
.error("Creation of Classification failed.")
3555 self
._format
_exception
(e
)
3557 def get_classification(self
, class_id
):
3558 self
.logger
.debug(" Getting Classification %s from VIM", class_id
)
3559 filter_dict
= {"id": class_id
}
3560 class_list
= self
.get_classification_list(filter_dict
)
3562 if len(class_list
) == 0:
3563 raise vimconn
.VimConnNotFoundException(
3564 "Classification '{}' not found".format(class_id
)
3566 elif len(class_list
) > 1:
3567 raise vimconn
.VimConnConflictException(
3568 "Found more than one Classification with this criteria"
3571 classification
= class_list
[0]
3573 return classification
3575 def get_classification_list(self
, filter_dict
={}):
3577 "Getting Classifications from VIM filter: '%s'", str(filter_dict
)
3581 filter_dict_os
= filter_dict
.copy()
3582 self
._reload
_connection
()
3584 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3585 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3587 classification_dict
= self
.neutron
.list_sfc_flow_classifiers(
3590 classification_list
= classification_dict
["flow_classifiers"]
3591 self
.__classification
_os
2mano
(classification_list
)
3593 return classification_list
3595 neExceptions
.ConnectionFailed
,
3596 ksExceptions
.ClientException
,
3597 neExceptions
.NeutronException
,
3600 self
._format
_exception
(e
)
3602 def delete_classification(self
, class_id
):
3603 self
.logger
.debug("Deleting Classification '%s' from VIM", class_id
)
3606 self
._reload
_connection
()
3607 self
.neutron
.delete_sfc_flow_classifier(class_id
)
3611 neExceptions
.ConnectionFailed
,
3612 neExceptions
.NeutronException
,
3613 ksExceptions
.ClientException
,
3614 neExceptions
.NeutronException
,
3617 self
._format
_exception
(e
)
3619 def new_sfi(self
, name
, ingress_ports
, egress_ports
, sfc_encap
=True):
3621 "Adding a new Service Function Instance to VIM, named '%s'", name
3626 self
._reload
_connection
()
3632 if len(ingress_ports
) != 1:
3633 raise vimconn
.VimConnNotSupportedException(
3634 "OpenStack VIM connector can only have 1 ingress port per SFI"
3637 if len(egress_ports
) != 1:
3638 raise vimconn
.VimConnNotSupportedException(
3639 "OpenStack VIM connector can only have 1 egress port per SFI"
3644 "ingress": ingress_ports
[0],
3645 "egress": egress_ports
[0],
3646 "service_function_parameters": {"correlation": correlation
},
3648 new_sfi
= self
.neutron
.create_sfc_port_pair({"port_pair": sfi_dict
})
3650 return new_sfi
["port_pair"]["id"]
3652 neExceptions
.ConnectionFailed
,
3653 ksExceptions
.ClientException
,
3654 neExceptions
.NeutronException
,
3659 self
.neutron
.delete_sfc_port_pair(new_sfi
["port_pair"]["id"])
3662 "Creation of Service Function Instance failed, with "
3663 "subsequent deletion failure as well."
3666 self
._format
_exception
(e
)
3668 def get_sfi(self
, sfi_id
):
3669 self
.logger
.debug("Getting Service Function Instance %s from VIM", sfi_id
)
3670 filter_dict
= {"id": sfi_id
}
3671 sfi_list
= self
.get_sfi_list(filter_dict
)
3673 if len(sfi_list
) == 0:
3674 raise vimconn
.VimConnNotFoundException(
3675 "Service Function Instance '{}' not found".format(sfi_id
)
3677 elif len(sfi_list
) > 1:
3678 raise vimconn
.VimConnConflictException(
3679 "Found more than one Service Function Instance with this criteria"
3686 def get_sfi_list(self
, filter_dict
={}):
3688 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict
)
3692 self
._reload
_connection
()
3693 filter_dict_os
= filter_dict
.copy()
3695 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3696 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3698 sfi_dict
= self
.neutron
.list_sfc_port_pairs(**filter_dict_os
)
3699 sfi_list
= sfi_dict
["port_pairs"]
3700 self
.__sfi
_os
2mano
(sfi_list
)
3704 neExceptions
.ConnectionFailed
,
3705 ksExceptions
.ClientException
,
3706 neExceptions
.NeutronException
,
3709 self
._format
_exception
(e
)
3711 def delete_sfi(self
, sfi_id
):
3712 self
.logger
.debug("Deleting Service Function Instance '%s' from VIM", sfi_id
)
3715 self
._reload
_connection
()
3716 self
.neutron
.delete_sfc_port_pair(sfi_id
)
3720 neExceptions
.ConnectionFailed
,
3721 neExceptions
.NeutronException
,
3722 ksExceptions
.ClientException
,
3723 neExceptions
.NeutronException
,
3726 self
._format
_exception
(e
)
3728 def new_sf(self
, name
, sfis
, sfc_encap
=True):
3729 self
.logger
.debug("Adding a new Service Function to VIM, named '%s'", name
)
3733 self
._reload
_connection
()
3734 # correlation = None
3736 # correlation = "nsh"
3738 for instance
in sfis
:
3739 sfi
= self
.get_sfi(instance
)
3741 if sfi
.get("sfc_encap") != sfc_encap
:
3742 raise vimconn
.VimConnNotSupportedException(
3743 "OpenStack VIM connector requires all SFIs of the "
3744 "same SF to share the same SFC Encapsulation"
3747 sf_dict
= {"name": name
, "port_pairs": sfis
}
3748 new_sf
= self
.neutron
.create_sfc_port_pair_group(
3749 {"port_pair_group": sf_dict
}
3752 return new_sf
["port_pair_group"]["id"]
3754 neExceptions
.ConnectionFailed
,
3755 ksExceptions
.ClientException
,
3756 neExceptions
.NeutronException
,
3761 self
.neutron
.delete_sfc_port_pair_group(
3762 new_sf
["port_pair_group"]["id"]
3766 "Creation of Service Function failed, with "
3767 "subsequent deletion failure as well."
3770 self
._format
_exception
(e
)
3772 def get_sf(self
, sf_id
):
3773 self
.logger
.debug("Getting Service Function %s from VIM", sf_id
)
3774 filter_dict
= {"id": sf_id
}
3775 sf_list
= self
.get_sf_list(filter_dict
)
3777 if len(sf_list
) == 0:
3778 raise vimconn
.VimConnNotFoundException(
3779 "Service Function '{}' not found".format(sf_id
)
3781 elif len(sf_list
) > 1:
3782 raise vimconn
.VimConnConflictException(
3783 "Found more than one Service Function with this criteria"
3790 def get_sf_list(self
, filter_dict
={}):
3792 "Getting Service Function from VIM filter: '%s'", str(filter_dict
)
3796 self
._reload
_connection
()
3797 filter_dict_os
= filter_dict
.copy()
3799 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3800 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3802 sf_dict
= self
.neutron
.list_sfc_port_pair_groups(**filter_dict_os
)
3803 sf_list
= sf_dict
["port_pair_groups"]
3804 self
.__sf
_os
2mano
(sf_list
)
3808 neExceptions
.ConnectionFailed
,
3809 ksExceptions
.ClientException
,
3810 neExceptions
.NeutronException
,
3813 self
._format
_exception
(e
)
3815 def delete_sf(self
, sf_id
):
3816 self
.logger
.debug("Deleting Service Function '%s' from VIM", sf_id
)
3819 self
._reload
_connection
()
3820 self
.neutron
.delete_sfc_port_pair_group(sf_id
)
3824 neExceptions
.ConnectionFailed
,
3825 neExceptions
.NeutronException
,
3826 ksExceptions
.ClientException
,
3827 neExceptions
.NeutronException
,
3830 self
._format
_exception
(e
)
3832 def new_sfp(self
, name
, classifications
, sfs
, sfc_encap
=True, spi
=None):
3833 self
.logger
.debug("Adding a new Service Function Path to VIM, named '%s'", name
)
3837 self
._reload
_connection
()
3838 # In networking-sfc the MPLS encapsulation is legacy
3839 # should be used when no full SFC Encapsulation is intended
3840 correlation
= "mpls"
3847 "flow_classifiers": classifications
,
3848 "port_pair_groups": sfs
,
3849 "chain_parameters": {"correlation": correlation
},
3853 sfp_dict
["chain_id"] = spi
3855 new_sfp
= self
.neutron
.create_sfc_port_chain({"port_chain": sfp_dict
})
3857 return new_sfp
["port_chain"]["id"]
3859 neExceptions
.ConnectionFailed
,
3860 ksExceptions
.ClientException
,
3861 neExceptions
.NeutronException
,
3866 self
.neutron
.delete_sfc_port_chain(new_sfp
["port_chain"]["id"])
3869 "Creation of Service Function Path failed, with "
3870 "subsequent deletion failure as well."
3873 self
._format
_exception
(e
)
3875 def get_sfp(self
, sfp_id
):
3876 self
.logger
.debug(" Getting Service Function Path %s from VIM", sfp_id
)
3878 filter_dict
= {"id": sfp_id
}
3879 sfp_list
= self
.get_sfp_list(filter_dict
)
3881 if len(sfp_list
) == 0:
3882 raise vimconn
.VimConnNotFoundException(
3883 "Service Function Path '{}' not found".format(sfp_id
)
3885 elif len(sfp_list
) > 1:
3886 raise vimconn
.VimConnConflictException(
3887 "Found more than one Service Function Path with this criteria"
3894 def get_sfp_list(self
, filter_dict
={}):
3896 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict
)
3900 self
._reload
_connection
()
3901 filter_dict_os
= filter_dict
.copy()
3903 if self
.api_version3
and "tenant_id" in filter_dict_os
:
3904 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
3906 sfp_dict
= self
.neutron
.list_sfc_port_chains(**filter_dict_os
)
3907 sfp_list
= sfp_dict
["port_chains"]
3908 self
.__sfp
_os
2mano
(sfp_list
)
3912 neExceptions
.ConnectionFailed
,
3913 ksExceptions
.ClientException
,
3914 neExceptions
.NeutronException
,
3917 self
._format
_exception
(e
)
3919 def delete_sfp(self
, sfp_id
):
3920 self
.logger
.debug("Deleting Service Function Path '%s' from VIM", sfp_id
)
3923 self
._reload
_connection
()
3924 self
.neutron
.delete_sfc_port_chain(sfp_id
)
3928 neExceptions
.ConnectionFailed
,
3929 neExceptions
.NeutronException
,
3930 ksExceptions
.ClientException
,
3931 neExceptions
.NeutronException
,
3934 self
._format
_exception
(e
)
3936 def refresh_sfps_status(self
, sfp_list
):
3937 """Get the status of the service function path
3938 Params: the list of sfp identifiers
3939 Returns a dictionary with:
3940 vm_id: #VIM id of this service function path
3941 status: #Mandatory. Text with one of:
3942 # DELETED (not found at vim)
3943 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3944 # OTHER (Vim reported other status not understood)
3945 # ERROR (VIM indicates an ERROR status)
3947 # CREATING (on building process)
3948 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3949 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3953 "refresh_sfps status: Getting tenant SFP information from VIM"
3956 for sfp_id
in sfp_list
:
3960 sfp_vim
= self
.get_sfp(sfp_id
)
3963 sfp
["status"] = vmStatus2manoFormat
["ACTIVE"]
3965 sfp
["status"] = "OTHER"
3966 sfp
["error_msg"] = "VIM status reported " + sfp
["status"]
3968 sfp
["vim_info"] = self
.serialize(sfp_vim
)
3970 if sfp_vim
.get("fault"):
3971 sfp
["error_msg"] = str(sfp_vim
["fault"])
3972 except vimconn
.VimConnNotFoundException
as e
:
3973 self
.logger
.error("Exception getting sfp status: %s", str(e
))
3974 sfp
["status"] = "DELETED"
3975 sfp
["error_msg"] = str(e
)
3976 except vimconn
.VimConnException
as e
:
3977 self
.logger
.error("Exception getting sfp status: %s", str(e
))
3978 sfp
["status"] = "VIM_ERROR"
3979 sfp
["error_msg"] = str(e
)
3981 sfp_dict
[sfp_id
] = sfp
3985 def refresh_sfis_status(self
, sfi_list
):
3986 """Get the status of the service function instances
3987 Params: the list of sfi identifiers
3988 Returns a dictionary with:
3989 vm_id: #VIM id of this service function instance
3990 status: #Mandatory. Text with one of:
3991 # DELETED (not found at vim)
3992 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3993 # OTHER (Vim reported other status not understood)
3994 # ERROR (VIM indicates an ERROR status)
3996 # CREATING (on building process)
3997 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3998 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4002 "refresh_sfis status: Getting tenant sfi information from VIM"
4005 for sfi_id
in sfi_list
:
4009 sfi_vim
= self
.get_sfi(sfi_id
)
4012 sfi
["status"] = vmStatus2manoFormat
["ACTIVE"]
4014 sfi
["status"] = "OTHER"
4015 sfi
["error_msg"] = "VIM status reported " + sfi
["status"]
4017 sfi
["vim_info"] = self
.serialize(sfi_vim
)
4019 if sfi_vim
.get("fault"):
4020 sfi
["error_msg"] = str(sfi_vim
["fault"])
4021 except vimconn
.VimConnNotFoundException
as e
:
4022 self
.logger
.error("Exception getting sfi status: %s", str(e
))
4023 sfi
["status"] = "DELETED"
4024 sfi
["error_msg"] = str(e
)
4025 except vimconn
.VimConnException
as e
:
4026 self
.logger
.error("Exception getting sfi status: %s", str(e
))
4027 sfi
["status"] = "VIM_ERROR"
4028 sfi
["error_msg"] = str(e
)
4030 sfi_dict
[sfi_id
] = sfi
4034 def refresh_sfs_status(self
, sf_list
):
4035 """Get the status of the service functions
4036 Params: the list of sf identifiers
4037 Returns a dictionary with:
4038 vm_id: #VIM id of this service function
4039 status: #Mandatory. Text with one of:
4040 # DELETED (not found at vim)
4041 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4042 # OTHER (Vim reported other status not understood)
4043 # ERROR (VIM indicates an ERROR status)
4045 # CREATING (on building process)
4046 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4047 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4050 self
.logger
.debug("refresh_sfs status: Getting tenant sf information from VIM")
4052 for sf_id
in sf_list
:
4056 sf_vim
= self
.get_sf(sf_id
)
4059 sf
["status"] = vmStatus2manoFormat
["ACTIVE"]
4061 sf
["status"] = "OTHER"
4062 sf
["error_msg"] = "VIM status reported " + sf_vim
["status"]
4064 sf
["vim_info"] = self
.serialize(sf_vim
)
4066 if sf_vim
.get("fault"):
4067 sf
["error_msg"] = str(sf_vim
["fault"])
4068 except vimconn
.VimConnNotFoundException
as e
:
4069 self
.logger
.error("Exception getting sf status: %s", str(e
))
4070 sf
["status"] = "DELETED"
4071 sf
["error_msg"] = str(e
)
4072 except vimconn
.VimConnException
as e
:
4073 self
.logger
.error("Exception getting sf status: %s", str(e
))
4074 sf
["status"] = "VIM_ERROR"
4075 sf
["error_msg"] = str(e
)
4081 def refresh_classifications_status(self
, classification_list
):
4082 """Get the status of the classifications
4083 Params: the list of classification identifiers
4084 Returns a dictionary with:
4085 vm_id: #VIM id of this classifier
4086 status: #Mandatory. Text with one of:
4087 # DELETED (not found at vim)
4088 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4089 # OTHER (Vim reported other status not understood)
4090 # ERROR (VIM indicates an ERROR status)
4092 # CREATING (on building process)
4093 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4094 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4096 classification_dict
= {}
4098 "refresh_classifications status: Getting tenant classification information from VIM"
4101 for classification_id
in classification_list
:
4105 classification_vim
= self
.get_classification(classification_id
)
4107 if classification_vim
:
4108 classification
["status"] = vmStatus2manoFormat
["ACTIVE"]
4110 classification
["status"] = "OTHER"
4111 classification
["error_msg"] = (
4112 "VIM status reported " + classification
["status"]
4115 classification
["vim_info"] = self
.serialize(classification_vim
)
4117 if classification_vim
.get("fault"):
4118 classification
["error_msg"] = str(classification_vim
["fault"])
4119 except vimconn
.VimConnNotFoundException
as e
:
4120 self
.logger
.error("Exception getting classification status: %s", str(e
))
4121 classification
["status"] = "DELETED"
4122 classification
["error_msg"] = str(e
)
4123 except vimconn
.VimConnException
as e
:
4124 self
.logger
.error("Exception getting classification status: %s", str(e
))
4125 classification
["status"] = "VIM_ERROR"
4126 classification
["error_msg"] = str(e
)
4128 classification_dict
[classification_id
] = classification
4130 return classification_dict
4132 def new_affinity_group(self
, affinity_group_data
):
4133 """Adds a server group to VIM
4134 affinity_group_data contains a dictionary with information, keys:
4135 name: name in VIM for the server group
4136 type: affinity or anti-affinity
4137 scope: Only nfvi-node allowed
4138 Returns the server group identifier"""
4139 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
4142 name
= affinity_group_data
["name"]
4143 policy
= affinity_group_data
["type"]
4145 self
._reload
_connection
()
4146 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
4148 return new_server_group
.id
4150 ksExceptions
.ClientException
,
4151 nvExceptions
.ClientException
,
4155 self
._format
_exception
(e
)
4157 def get_affinity_group(self
, affinity_group_id
):
4158 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4159 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
4161 self
._reload
_connection
()
4162 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
4164 return server_group
.to_dict()
4166 nvExceptions
.NotFound
,
4167 nvExceptions
.ClientException
,
4168 ksExceptions
.ClientException
,
4171 self
._format
_exception
(e
)
4173 def delete_affinity_group(self
, affinity_group_id
):
4174 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4175 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
4177 self
._reload
_connection
()
4178 self
.nova
.server_groups
.delete(affinity_group_id
)
4180 return affinity_group_id
4182 nvExceptions
.NotFound
,
4183 ksExceptions
.ClientException
,
4184 nvExceptions
.ClientException
,
4187 self
._format
_exception
(e
)
4189 def get_vdu_state(self
, vm_id
):
4191 Getting the state of a vdu
4193 vm_id: ID of an instance
4195 self
.logger
.debug("Getting the status of VM")
4196 self
.logger
.debug("VIM VM ID %s", vm_id
)
4197 self
._reload
_connection
()
4198 server
= self
.nova
.servers
.find(id=vm_id
)
4199 server_dict
= server
.to_dict()
4201 server_dict
["status"],
4202 server_dict
["flavor"]["id"],
4203 server_dict
["OS-EXT-SRV-ATTR:host"],
4204 server_dict
["OS-EXT-AZ:availability_zone"],
4206 self
.logger
.debug("vdu_data %s", vdu_data
)
4209 def check_compute_availability(self
, host
, server_flavor_details
):
4210 self
._reload
_connection
()
4211 hypervisor_search
= self
.nova
.hypervisors
.search(
4212 hypervisor_match
=host
, servers
=True
4214 for hypervisor
in hypervisor_search
:
4215 hypervisor_id
= hypervisor
.to_dict()["id"]
4216 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
4217 hypervisor_dict
= hypervisor_details
.to_dict()
4218 hypervisor_temp
= json
.dumps(hypervisor_dict
)
4219 hypervisor_json
= json
.loads(hypervisor_temp
)
4220 resources_available
= [
4221 hypervisor_json
["free_ram_mb"],
4222 hypervisor_json
["disk_available_least"],
4223 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
4225 compute_available
= all(
4226 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
4228 if compute_available
:
4231 def check_availability_zone(
4232 self
, old_az
, server_flavor_details
, old_host
, host
=None
4234 self
._reload
_connection
()
4235 az_check
= {"zone_check": False, "compute_availability": None}
4236 aggregates_list
= self
.nova
.aggregates
.list()
4237 for aggregate
in aggregates_list
:
4238 aggregate_details
= aggregate
.to_dict()
4239 aggregate_temp
= json
.dumps(aggregate_details
)
4240 aggregate_json
= json
.loads(aggregate_temp
)
4241 if aggregate_json
["availability_zone"] == old_az
:
4242 hosts_list
= aggregate_json
["hosts"]
4243 if host
is not None:
4244 if host
in hosts_list
:
4245 az_check
["zone_check"] = True
4246 available_compute_id
= self
.check_compute_availability(
4247 host
, server_flavor_details
4249 if available_compute_id
is not None:
4250 az_check
["compute_availability"] = available_compute_id
4252 for check_host
in hosts_list
:
4253 if check_host
!= old_host
:
4254 available_compute_id
= self
.check_compute_availability(
4255 check_host
, server_flavor_details
4257 if available_compute_id
is not None:
4258 az_check
["zone_check"] = True
4259 az_check
["compute_availability"] = available_compute_id
4262 az_check
["zone_check"] = True
4265 def migrate_instance(self
, vm_id
, compute_host
=None):
4269 vm_id: ID of an instance
4270 compute_host: Host to migrate the vdu to
4272 self
._reload
_connection
()
4274 instance_state
= self
.get_vdu_state(vm_id
)
4275 server_flavor_id
= instance_state
[1]
4276 server_hypervisor_name
= instance_state
[2]
4277 server_availability_zone
= instance_state
[3]
4279 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
4280 server_flavor_details
= [
4281 server_flavor
["ram"],
4282 server_flavor
["disk"],
4283 server_flavor
["vcpus"],
4285 if compute_host
== server_hypervisor_name
:
4286 raise vimconn
.VimConnException(
4287 "Unable to migrate instance '{}' to the same host '{}'".format(
4290 http_code
=vimconn
.HTTP_Bad_Request
,
4292 az_status
= self
.check_availability_zone(
4293 server_availability_zone
,
4294 server_flavor_details
,
4295 server_hypervisor_name
,
4298 availability_zone_check
= az_status
["zone_check"]
4299 available_compute_id
= az_status
.get("compute_availability")
4301 if availability_zone_check
is False:
4302 raise vimconn
.VimConnException(
4303 "Unable to migrate instance '{}' to a different availability zone".format(
4306 http_code
=vimconn
.HTTP_Bad_Request
,
4308 if available_compute_id
is not None:
4309 self
.nova
.servers
.live_migrate(
4311 host
=available_compute_id
,
4312 block_migration
=True,
4313 disk_over_commit
=False,
4316 changed_compute_host
= ""
4317 if state
== "MIGRATING":
4318 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4319 changed_compute_host
= self
.get_vdu_state(vm_id
)[2]
4320 if vm_state
and changed_compute_host
== available_compute_id
:
4322 "Instance '{}' migrated to the new compute host '{}'".format(
4323 vm_id
, changed_compute_host
4326 return state
, available_compute_id
4328 raise vimconn
.VimConnException(
4329 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4330 vm_id
, available_compute_id
4332 http_code
=vimconn
.HTTP_Bad_Request
,
4335 raise vimconn
.VimConnException(
4336 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4337 available_compute_id
4339 http_code
=vimconn
.HTTP_Bad_Request
,
4342 nvExceptions
.BadRequest
,
4343 nvExceptions
.ClientException
,
4344 nvExceptions
.NotFound
,
4346 self
._format
_exception
(e
)
4348 def resize_instance(self
, vm_id
, new_flavor_id
):
4350 For resizing the vm based on the given
4353 vm_id : ID of an instance
4354 new_flavor_id : Flavor id to be resized
4355 Return the status of a resized instance
4357 self
._reload
_connection
()
4358 self
.logger
.debug("resize the flavor of an instance")
4359 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
4360 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
4361 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
4363 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
4364 if old_flavor_disk
> new_flavor_disk
:
4365 raise nvExceptions
.BadRequest(
4367 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
4370 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
4371 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
4373 instance_resized_status
= self
.confirm_resize(vm_id
)
4374 return instance_resized_status
4376 raise nvExceptions
.BadRequest(
4378 message
="Cannot 'resize' vm_state is in ERROR",
4382 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4383 raise nvExceptions
.BadRequest(
4385 message
="Cannot 'resize' instance while it is in vm_state resized",
4388 nvExceptions
.BadRequest
,
4389 nvExceptions
.ClientException
,
4390 nvExceptions
.NotFound
,
4392 self
._format
_exception
(e
)
4394 def confirm_resize(self
, vm_id
):
4396 Confirm the resize of an instance
4398 vm_id: ID of an instance
4400 self
._reload
_connection
()
4401 self
.nova
.servers
.confirm_resize(server
=vm_id
)
4402 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
4403 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
4404 instance_status
= self
.get_vdu_state(vm_id
)[0]
4405 return instance_status