1 # -*- coding: utf-8 -*-
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
12 # http://www.apache.org/licenses/LICENSE-2.0
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
34 from http
.client
import HTTPException
37 from pprint
import pformat
41 from typing
import Dict
, List
, Optional
, Tuple
43 from cinderclient
import client
as cClient
44 from glanceclient
import client
as glClient
45 import glanceclient
.exc
as gl1Exceptions
46 from keystoneauth1
import session
47 from keystoneauth1
.identity
import v2
, v3
48 import keystoneclient
.exceptions
as ksExceptions
49 import keystoneclient
.v2_0
.client
as ksClient_v2
50 import keystoneclient
.v3
.client
as ksClient_v3
52 from neutronclient
.common
import exceptions
as neExceptions
53 from neutronclient
.neutron
import client
as neClient
54 from novaclient
import client
as nClient
, exceptions
as nvExceptions
55 from osm_ro_plugin
import vimconn
56 from requests
.exceptions
import ConnectionError
59 __author__
= "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__
= "$22-sep-2017 23:59:59$"
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat
= {
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
72 netStatus2manoFormat
= {
75 "INACTIVE": "INACTIVE",
81 supportedClassificationTypes
= ["legacy_flow_classifier"]
83 # global var to have a timeout creating and deleting volumes
88 class SafeDumper(yaml
.SafeDumper
):
89 def represent_data(self
, data
):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data
, dict) and data
.__class
__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data
= dict(data
.items())
96 return super(SafeDumper
, self
).represent_data(data
)
99 class vimconnector(vimconn
.VimConnector
):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
118 api_version
= config
.get("APIversion")
120 if api_version
and api_version
not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn
.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version
)
126 vim_type
= config
.get("vim_type")
128 if vim_type
and vim_type
not in ("vio", "VIO"):
129 raise vimconn
.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type
)
134 if config
.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self
._validate
_vlan
_ranges
(
137 config
.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
140 if config
.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self
._validate
_vlan
_ranges
(
143 config
.get("multisegment_vlan_range"), "multisegment_vlan_range"
146 vimconn
.VimConnector
.__init
__(
160 if self
.config
.get("insecure") and self
.config
.get("ca_cert"):
161 raise vimconn
.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
167 if self
.config
.get("insecure"):
170 if self
.config
.get("ca_cert"):
171 self
.verify
= self
.config
.get("ca_cert")
174 raise TypeError("url param can not be NoneType")
176 self
.persistent_info
= persistent_info
177 self
.availability_zone
= persistent_info
.get("availability_zone", None)
178 self
.session
= persistent_info
.get("session", {"reload_client": True})
179 self
.my_tenant_id
= self
.session
.get("my_tenant_id")
180 self
.nova
= self
.session
.get("nova")
181 self
.neutron
= self
.session
.get("neutron")
182 self
.cinder
= self
.session
.get("cinder")
183 self
.glance
= self
.session
.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self
.keystone
= self
.session
.get("keystone")
186 self
.api_version3
= self
.session
.get("api_version3")
187 self
.vim_type
= self
.config
.get("vim_type")
190 self
.vim_type
= self
.vim_type
.upper()
192 if self
.config
.get("use_internal_endpoint"):
193 self
.endpoint_type
= "internalURL"
195 self
.endpoint_type
= None
197 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
198 logging
.getLogger("keystoneauth").setLevel(logging
.WARNING
)
199 logging
.getLogger("novaclient").setLevel(logging
.WARNING
)
200 self
.logger
= logging
.getLogger("ro.vim.openstack")
202 # allow security_groups to be a list or a single string
203 if isinstance(self
.config
.get("security_groups"), str):
204 self
.config
["security_groups"] = [self
.config
["security_groups"]]
206 self
.security_groups_id
= None
208 # ###### VIO Specific Changes #########
209 if self
.vim_type
== "VIO":
210 self
.logger
= logging
.getLogger("ro.vim.vio")
213 self
.logger
.setLevel(getattr(logging
, log_level
))
215 def __getitem__(self
, index
):
216 """Get individuals parameters.
218 if index
== "project_domain_id":
219 return self
.config
.get("project_domain_id")
220 elif index
== "user_domain_id":
221 return self
.config
.get("user_domain_id")
223 return vimconn
.VimConnector
.__getitem
__(self
, index
)
225 def __setitem__(self
, index
, value
):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
228 if index
== "project_domain_id":
229 self
.config
["project_domain_id"] = value
230 elif index
== "user_domain_id":
231 self
.config
["user_domain_id"] = value
233 vimconn
.VimConnector
.__setitem
__(self
, index
, value
)
235 self
.session
["reload_client"] = True
237 def serialize(self
, value
):
238 """Serialization of python basic types.
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
244 if isinstance(value
, str):
249 value
, Dumper
=SafeDumper
, default_flow_style
=True, width
=256
251 except yaml
.representer
.RepresenterError
:
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
260 def _reload_connection(self
):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self
.session
["reload_client"]:
266 if self
.config
.get("APIversion"):
267 self
.api_version3
= (
268 self
.config
["APIversion"] == "v3.3"
269 or self
.config
["APIversion"] == "3"
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self
.api_version3
= self
.url
.endswith("/v3") or self
.url
.endswith(
276 self
.session
["api_version3"] = self
.api_version3
278 if self
.api_version3
:
279 if self
.config
.get("project_domain_id") or self
.config
.get(
280 "project_domain_name"
282 project_domain_id_default
= None
284 project_domain_id_default
= "default"
286 if self
.config
.get("user_domain_id") or self
.config
.get(
289 user_domain_id_default
= None
291 user_domain_id_default
= "default"
295 password
=self
.passwd
,
296 project_name
=self
.tenant_name
,
297 project_id
=self
.tenant_id
,
298 project_domain_id
=self
.config
.get(
299 "project_domain_id", project_domain_id_default
301 user_domain_id
=self
.config
.get(
302 "user_domain_id", user_domain_id_default
304 project_domain_name
=self
.config
.get("project_domain_name"),
305 user_domain_name
=self
.config
.get("user_domain_name"),
311 password
=self
.passwd
,
312 tenant_name
=self
.tenant_name
,
313 tenant_id
=self
.tenant_id
,
316 sess
= session
.Session(auth
=auth
, verify
=self
.verify
)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name
= self
.config
.get("region_name")
321 if self
.api_version3
:
322 self
.keystone
= ksClient_v3
.Client(
324 endpoint_type
=self
.endpoint_type
,
325 region_name
=region_name
,
328 self
.keystone
= ksClient_v2
.Client(
329 session
=sess
, endpoint_type
=self
.endpoint_type
332 self
.session
["keystone"] = self
.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version
= self
.config
.get("microversion")
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self
.nova
= self
.session
["nova"] = nClient
.Client(
349 endpoint_type
=self
.endpoint_type
,
350 region_name
=region_name
,
352 self
.neutron
= self
.session
["neutron"] = neClient
.Client(
355 endpoint_type
=self
.endpoint_type
,
356 region_name
=region_name
,
359 if sess
.get_all_version_data(service_type
="volumev2"):
360 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
363 endpoint_type
=self
.endpoint_type
,
364 region_name
=region_name
,
367 self
.cinder
= self
.session
["cinder"] = cClient
.Client(
370 endpoint_type
=self
.endpoint_type
,
371 region_name
=region_name
,
375 self
.my_tenant_id
= self
.session
["my_tenant_id"] = sess
.get_project_id()
377 self
.logger
.error("Cannot get project_id from session", exc_info
=True)
379 if self
.endpoint_type
== "internalURL":
380 glance_service_id
= self
.keystone
.services
.list(name
="glance")[0].id
381 glance_endpoint
= self
.keystone
.endpoints
.list(
382 glance_service_id
, interface
="internal"
385 glance_endpoint
= None
387 self
.glance
= self
.session
["glance"] = glClient
.Client(
388 2, session
=sess
, endpoint
=glance_endpoint
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self
.session
["reload_client"] = False
394 self
.persistent_info
["session"] = self
.session
395 # add availablity zone info inside self.persistent_info
396 self
._set
_availablity
_zones
()
397 self
.persistent_info
["availability_zone"] = self
.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self
.security_groups_id
= None
401 def __net_os2mano(self
, net_list_dict
):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict
) is dict:
405 net_list_
= (net_list_dict
,)
406 elif type(net_list_dict
) is list:
407 net_list_
= net_list_dict
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net
in net_list_
:
411 if net
.get("provider:network_type") == "vlan":
414 net
["type"] = "bridge"
416 def __classification_os2mano(self
, class_list_dict
):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
420 if isinstance(class_list_dict
, dict):
421 class_list_
= [class_list_dict
]
422 elif isinstance(class_list_dict
, list):
423 class_list_
= class_list_dict
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification
in class_list_
:
427 id = classification
.pop("id")
428 name
= classification
.pop("name")
429 description
= classification
.pop("description")
430 project_id
= classification
.pop("project_id")
431 tenant_id
= classification
.pop("tenant_id")
432 original_classification
= copy
.deepcopy(classification
)
433 classification
.clear()
434 classification
["ctype"] = "legacy_flow_classifier"
435 classification
["definition"] = original_classification
436 classification
["id"] = id
437 classification
["name"] = name
438 classification
["description"] = description
439 classification
["project_id"] = project_id
440 classification
["tenant_id"] = tenant_id
442 def __sfi_os2mano(self
, sfi_list_dict
):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
446 if isinstance(sfi_list_dict
, dict):
447 sfi_list_
= [sfi_list_dict
]
448 elif isinstance(sfi_list_dict
, list):
449 sfi_list_
= sfi_list_dict
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
453 for sfi
in sfi_list_
:
454 sfi
["ingress_ports"] = []
455 sfi
["egress_ports"] = []
457 if sfi
.get("ingress"):
458 sfi
["ingress_ports"].append(sfi
["ingress"])
460 if sfi
.get("egress"):
461 sfi
["egress_ports"].append(sfi
["egress"])
465 params
= sfi
.get("service_function_parameters")
469 correlation
= params
.get("correlation")
474 sfi
["sfc_encap"] = sfc_encap
475 del sfi
["service_function_parameters"]
477 def __sf_os2mano(self
, sf_list_dict
):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
481 if isinstance(sf_list_dict
, dict):
482 sf_list_
= [sf_list_dict
]
483 elif isinstance(sf_list_dict
, list):
484 sf_list_
= sf_list_dict
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
489 del sf
["port_pair_group_parameters"]
490 sf
["sfis"] = sf
["port_pairs"]
493 def __sfp_os2mano(self
, sfp_list_dict
):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
497 if isinstance(sfp_list_dict
, dict):
498 sfp_list_
= [sfp_list_dict
]
499 elif isinstance(sfp_list_dict
, list):
500 sfp_list_
= sfp_list_dict
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
504 for sfp
in sfp_list_
:
505 params
= sfp
.pop("chain_parameters")
509 correlation
= params
.get("correlation")
514 sfp
["sfc_encap"] = sfc_encap
515 sfp
["spi"] = sfp
.pop("chain_id")
516 sfp
["classifications"] = sfp
.pop("flow_classifiers")
517 sfp
["service_functions"] = sfp
.pop("port_pair_groups")
519 # placeholder for now; read TODO note below
520 def _validate_classification(self
, type, definition
):
521 # only legacy_flow_classifier Type is supported at this point
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
528 def _format_exception(self
, exception
):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error
= str(exception
)
536 neExceptions
.NetworkNotFoundClient
,
537 nvExceptions
.NotFound
,
538 ksExceptions
.NotFound
,
539 gl1Exceptions
.HTTPNotFound
,
542 raise vimconn
.VimConnNotFoundException(
543 type(exception
).__name
__ + ": " + message_error
549 gl1Exceptions
.HTTPException
,
550 gl1Exceptions
.CommunicationError
,
552 ksExceptions
.ConnectionError
,
553 neExceptions
.ConnectionFailed
,
556 if type(exception
).__name
__ == "SSLError":
557 tip
= " (maybe option 'insecure' must be added to the VIM)"
559 raise vimconn
.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip
, message_error
)
566 nvExceptions
.BadRequest
,
567 ksExceptions
.BadRequest
,
570 raise vimconn
.VimConnException(
571 type(exception
).__name
__ + ": " + message_error
576 nvExceptions
.ClientException
,
577 ksExceptions
.ClientException
,
578 neExceptions
.NeutronException
,
581 raise vimconn
.VimConnUnexpectedResponse(
582 type(exception
).__name
__ + ": " + message_error
584 elif isinstance(exception
, nvExceptions
.Conflict
):
585 raise vimconn
.VimConnConflictException(
586 type(exception
).__name
__ + ": " + message_error
588 elif isinstance(exception
, vimconn
.VimConnException
):
591 self
.logger
.error("General Exception " + message_error
, exc_info
=True)
593 raise vimconn
.VimConnConnectionException(
594 type(exception
).__name
__ + ": " + message_error
597 def _get_ids_from_name(self
):
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
602 # get tenant_id if only tenant_name is supplied
603 self
._reload
_connection
()
605 if not self
.my_tenant_id
:
606 raise vimconn
.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self
.tenant_name
, self
.tenant_id
612 if self
.config
.get("security_groups") and not self
.security_groups_id
:
613 # convert from name to id
614 neutron_sg_list
= self
.neutron
.list_security_groups(
615 tenant_id
=self
.my_tenant_id
618 self
.security_groups_id
= []
619 for sg
in self
.config
.get("security_groups"):
620 for neutron_sg
in neutron_sg_list
:
621 if sg
in (neutron_sg
["id"], neutron_sg
["name"]):
622 self
.security_groups_id
.append(neutron_sg
["id"])
625 self
.security_groups_id
= None
627 raise vimconn
.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg
)
631 def check_vim_connectivity(self
):
632 # just get network list to check connectivity and credentials
633 self
.get_network_list(filter_dict
={})
635 def get_tenant_list(self
, filter_dict
={}):
636 """Obtain tenants of VIM
637 filter_dict can contain the following keys:
638 name: filter by tenant name
639 id: filter by tenant uuid/id
641 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
643 self
.logger
.debug("Getting tenants from VIM filter: '%s'", str(filter_dict
))
646 self
._reload
_connection
()
648 if self
.api_version3
:
649 project_class_list
= self
.keystone
.projects
.list(
650 name
=filter_dict
.get("name")
653 project_class_list
= self
.keystone
.tenants
.findall(**filter_dict
)
657 for project
in project_class_list
:
658 if filter_dict
.get("id") and filter_dict
["id"] != project
.id:
661 project_list
.append(project
.to_dict())
665 ksExceptions
.ConnectionError
,
666 ksExceptions
.ClientException
,
669 self
._format
_exception
(e
)
671 def new_tenant(self
, tenant_name
, tenant_description
):
672 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 self
.logger
.debug("Adding a new tenant name: %s", tenant_name
)
676 self
._reload
_connection
()
678 if self
.api_version3
:
679 project
= self
.keystone
.projects
.create(
681 self
.config
.get("project_domain_id", "default"),
682 description
=tenant_description
,
686 project
= self
.keystone
.tenants
.create(tenant_name
, tenant_description
)
690 ksExceptions
.ConnectionError
,
691 ksExceptions
.ClientException
,
692 ksExceptions
.BadRequest
,
695 self
._format
_exception
(e
)
697 def delete_tenant(self
, tenant_id
):
698 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 self
.logger
.debug("Deleting tenant %s from VIM", tenant_id
)
702 self
._reload
_connection
()
704 if self
.api_version3
:
705 self
.keystone
.projects
.delete(tenant_id
)
707 self
.keystone
.tenants
.delete(tenant_id
)
711 ksExceptions
.ConnectionError
,
712 ksExceptions
.ClientException
,
713 ksExceptions
.NotFound
,
716 self
._format
_exception
(e
)
724 provider_network_profile
=None,
726 """Adds a tenant network to VIM
728 'net_name': name of the network
730 'bridge': overlay isolated network
731 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
732 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
733 'ip_profile': is a dict containing the IP parameters of the network
734 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738 'dhcp_enabled': True or False
739 'dhcp_start_address': ip_schema, first IP to grant
740 'dhcp_count': number of IPs to grant.
741 'shared': if this network can be seen/use by other tenants/organization
742 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743 physical-network: physnet-label}
744 Returns a tuple with the network identifier and created_items, or raises an exception on error
745 created_items can be None or a dictionary where this method can include key-values that will be passed to
746 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
751 "Adding a new network to VIM name '%s', type '%s'", net_name
, net_type
753 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
758 if provider_network_profile
:
759 vlan
= provider_network_profile
.get("segmentation-id")
763 self
._reload
_connection
()
764 network_dict
= {"name": net_name
, "admin_state_up": True}
766 if net_type
in ("data", "ptp") or provider_network_profile
:
767 provider_physical_network
= None
769 if provider_network_profile
and provider_network_profile
.get(
772 provider_physical_network
= provider_network_profile
.get(
776 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777 # or not declared, just ignore the checking
780 self
.config
.get("dataplane_physical_net"), (tuple, list)
782 and provider_physical_network
783 not in self
.config
["dataplane_physical_net"]
785 raise vimconn
.VimConnConflictException(
786 "Invalid parameter 'provider-network:physical-network' "
787 "for network creation. '{}' is not one of the declared "
788 "list at VIM_config:dataplane_physical_net".format(
789 provider_physical_network
793 # use the default dataplane_physical_net
794 if not provider_physical_network
:
795 provider_physical_network
= self
.config
.get(
796 "dataplane_physical_net"
799 # if it is non empty list, use the first value. If it is a string use the value directly
801 isinstance(provider_physical_network
, (tuple, list))
802 and provider_physical_network
804 provider_physical_network
= provider_physical_network
[0]
806 if not provider_physical_network
:
807 raise vimconn
.VimConnConflictException(
808 "missing information needed for underlay networks. Provide "
809 "'dataplane_physical_net' configuration at VIM or use the NS "
810 "instantiation parameter 'provider-network.physical-network'"
814 if not self
.config
.get("multisegment_support"):
816 "provider:physical_network"
817 ] = provider_physical_network
820 provider_network_profile
821 and "network-type" in provider_network_profile
824 "provider:network_type"
825 ] = provider_network_profile
["network-type"]
827 network_dict
["provider:network_type"] = self
.config
.get(
828 "dataplane_network_type", "vlan"
832 network_dict
["provider:segmentation_id"] = vlan
837 "provider:physical_network": "",
838 "provider:network_type": "vxlan",
840 segment_list
.append(segment1_dict
)
842 "provider:physical_network": provider_physical_network
,
843 "provider:network_type": "vlan",
847 segment2_dict
["provider:segmentation_id"] = vlan
848 elif self
.config
.get("multisegment_vlan_range"):
849 vlanID
= self
._generate
_multisegment
_vlanID
()
850 segment2_dict
["provider:segmentation_id"] = vlanID
853 # raise vimconn.VimConnConflictException(
854 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
856 segment_list
.append(segment2_dict
)
857 network_dict
["segments"] = segment_list
859 # VIO Specific Changes. It needs a concrete VLAN
860 if self
.vim_type
== "VIO" and vlan
is None:
861 if self
.config
.get("dataplane_net_vlan_range") is None:
862 raise vimconn
.VimConnConflictException(
863 "You must provide 'dataplane_net_vlan_range' in format "
864 "[start_ID - end_ID] at VIM_config for creating underlay "
868 network_dict
["provider:segmentation_id"] = self
._generate
_vlanID
()
870 network_dict
["shared"] = shared
872 if self
.config
.get("disable_network_port_security"):
873 network_dict
["port_security_enabled"] = False
875 if self
.config
.get("neutron_availability_zone_hints"):
876 hints
= self
.config
.get("neutron_availability_zone_hints")
878 if isinstance(hints
, str):
881 network_dict
["availability_zone_hints"] = hints
883 new_net
= self
.neutron
.create_network({"network": network_dict
})
885 # create subnetwork, even if there is no profile
890 if not ip_profile
.get("subnet_address"):
891 # Fake subnet is required
892 subnet_rand
= random
.SystemRandom().randint(0, 255)
893 ip_profile
["subnet_address"] = "192.168.{}.0/24".format(subnet_rand
)
895 if "ip_version" not in ip_profile
:
896 ip_profile
["ip_version"] = "IPv4"
899 "name": net_name
+ "-subnet",
900 "network_id": new_net
["network"]["id"],
901 "ip_version": 4 if ip_profile
["ip_version"] == "IPv4" else 6,
902 "cidr": ip_profile
["subnet_address"],
905 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 if ip_profile
.get("gateway_address"):
907 subnet
["gateway_ip"] = ip_profile
["gateway_address"]
909 subnet
["gateway_ip"] = None
911 if ip_profile
.get("dns_address"):
912 subnet
["dns_nameservers"] = ip_profile
["dns_address"].split(";")
914 if "dhcp_enabled" in ip_profile
:
915 subnet
["enable_dhcp"] = (
917 if ip_profile
["dhcp_enabled"] == "false"
918 or ip_profile
["dhcp_enabled"] is False
922 if ip_profile
.get("dhcp_start_address"):
923 subnet
["allocation_pools"] = []
924 subnet
["allocation_pools"].append(dict())
925 subnet
["allocation_pools"][0]["start"] = ip_profile
[
929 if ip_profile
.get("dhcp_count"):
930 # parts = ip_profile["dhcp_start_address"].split(".")
931 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 ip_int
= int(netaddr
.IPAddress(ip_profile
["dhcp_start_address"]))
933 ip_int
+= ip_profile
["dhcp_count"] - 1
934 ip_str
= str(netaddr
.IPAddress(ip_int
))
935 subnet
["allocation_pools"][0]["end"] = ip_str
938 ip_profile
.get("ipv6_address_mode")
939 and ip_profile
["ip_version"] != "IPv4"
941 subnet
["ipv6_address_mode"] = ip_profile
["ipv6_address_mode"]
942 # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
943 # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
944 subnet
["ipv6_ra_mode"] = ip_profile
["ipv6_address_mode"]
946 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
947 self
.neutron
.create_subnet({"subnet": subnet
})
949 if net_type
== "data" and self
.config
.get("multisegment_support"):
950 if self
.config
.get("l2gw_support"):
951 l2gw_list
= self
.neutron
.list_l2_gateways().get("l2_gateways", ())
952 for l2gw
in l2gw_list
:
954 "l2_gateway_id": l2gw
["id"],
955 "network_id": new_net
["network"]["id"],
956 "segmentation_id": str(vlanID
),
958 new_l2gw_conn
= self
.neutron
.create_l2_gateway_connection(
959 {"l2_gateway_connection": l2gw_conn
}
963 + str(new_l2gw_conn
["l2_gateway_connection"]["id"])
966 return new_net
["network"]["id"], created_items
967 except Exception as e
:
968 # delete l2gw connections (if any) before deleting the network
969 for k
, v
in created_items
.items():
970 if not v
: # skip already deleted
974 k_item
, _
, k_id
= k
.partition(":")
976 if k_item
== "l2gwconn":
977 self
.neutron
.delete_l2_gateway_connection(k_id
)
978 except Exception as e2
:
980 "Error deleting l2 gateway connection: {}: {}".format(
981 type(e2
).__name
__, e2
986 self
.neutron
.delete_network(new_net
["network"]["id"])
988 self
._format
_exception
(e
)
990 def get_network_list(self
, filter_dict
={}):
991 """Obtain tenant networks of VIM
997 admin_state_up: boolean
999 Returns the network list of dictionaries
1001 self
.logger
.debug("Getting network from VIM filter: '%s'", str(filter_dict
))
1004 self
._reload
_connection
()
1005 filter_dict_os
= filter_dict
.copy()
1007 if self
.api_version3
and "tenant_id" in filter_dict_os
:
1009 filter_dict_os
["project_id"] = filter_dict_os
.pop("tenant_id")
1011 net_dict
= self
.neutron
.list_networks(**filter_dict_os
)
1012 net_list
= net_dict
["networks"]
1013 self
.__net
_os
2mano
(net_list
)
1017 neExceptions
.ConnectionFailed
,
1018 ksExceptions
.ClientException
,
1019 neExceptions
.NeutronException
,
1022 self
._format
_exception
(e
)
1024 def get_network(self
, net_id
):
1025 """Obtain details of network from VIM
1026 Returns the network information from a network id"""
1027 self
.logger
.debug(" Getting tenant network %s from VIM", net_id
)
1028 filter_dict
= {"id": net_id
}
1029 net_list
= self
.get_network_list(filter_dict
)
1031 if len(net_list
) == 0:
1032 raise vimconn
.VimConnNotFoundException(
1033 "Network '{}' not found".format(net_id
)
1035 elif len(net_list
) > 1:
1036 raise vimconn
.VimConnConflictException(
1037 "Found more than one network with this criteria"
1042 for subnet_id
in net
.get("subnets", ()):
1044 subnet
= self
.neutron
.show_subnet(subnet_id
)
1045 except Exception as e
:
1047 "osconnector.get_network(): Error getting subnet %s %s"
1050 subnet
= {"id": subnet_id
, "fault": str(e
)}
1052 subnets
.append(subnet
)
1054 net
["subnets"] = subnets
1055 net
["encapsulation"] = net
.get("provider:network_type")
1056 net
["encapsulation_type"] = net
.get("provider:network_type")
1057 net
["segmentation_id"] = net
.get("provider:segmentation_id")
1058 net
["encapsulation_id"] = net
.get("provider:segmentation_id")
1062 def delete_network(self
, net_id
, created_items
=None):
1064 Removes a tenant network from VIM and its associated elements
1065 :param net_id: VIM identifier of the network, provided by method new_network
1066 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1067 Returns the network identifier or raises an exception upon error or when network is not found
1069 self
.logger
.debug("Deleting network '%s' from VIM", net_id
)
1071 if created_items
is None:
1075 self
._reload
_connection
()
1076 # delete l2gw connections (if any) before deleting the network
1077 for k
, v
in created_items
.items():
1078 if not v
: # skip already deleted
1082 k_item
, _
, k_id
= k
.partition(":")
1083 if k_item
== "l2gwconn":
1084 self
.neutron
.delete_l2_gateway_connection(k_id
)
1085 except Exception as e
:
1087 "Error deleting l2 gateway connection: {}: {}".format(
1092 # delete VM ports attached to this networks before the network
1093 ports
= self
.neutron
.list_ports(network_id
=net_id
)
1094 for p
in ports
["ports"]:
1096 self
.neutron
.delete_port(p
["id"])
1097 except Exception as e
:
1098 self
.logger
.error("Error deleting port %s: %s", p
["id"], str(e
))
1100 self
.neutron
.delete_network(net_id
)
1104 neExceptions
.ConnectionFailed
,
1105 neExceptions
.NetworkNotFoundClient
,
1106 neExceptions
.NeutronException
,
1107 ksExceptions
.ClientException
,
1108 neExceptions
.NeutronException
,
1111 self
._format
_exception
(e
)
1113 def refresh_nets_status(self
, net_list
):
1114 """Get the status of the networks
1115 Params: the list of network identifiers
1116 Returns a dictionary with:
1117 net_id: #VIM id of this network
1118 status: #Mandatory. Text with one of:
1119 # DELETED (not found at vim)
1120 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1121 # OTHER (Vim reported other status not understood)
1122 # ERROR (VIM indicates an ERROR status)
1123 # ACTIVE, INACTIVE, DOWN (admin down),
1124 # BUILD (on building process)
1126 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1127 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1131 for net_id
in net_list
:
1135 net_vim
= self
.get_network(net_id
)
1137 if net_vim
["status"] in netStatus2manoFormat
:
1138 net
["status"] = netStatus2manoFormat
[net_vim
["status"]]
1140 net
["status"] = "OTHER"
1141 net
["error_msg"] = "VIM status reported " + net_vim
["status"]
1143 if net
["status"] == "ACTIVE" and not net_vim
["admin_state_up"]:
1144 net
["status"] = "DOWN"
1146 net
["vim_info"] = self
.serialize(net_vim
)
1148 if net_vim
.get("fault"): # TODO
1149 net
["error_msg"] = str(net_vim
["fault"])
1150 except vimconn
.VimConnNotFoundException
as e
:
1151 self
.logger
.error("Exception getting net status: %s", str(e
))
1152 net
["status"] = "DELETED"
1153 net
["error_msg"] = str(e
)
1154 except vimconn
.VimConnException
as e
:
1155 self
.logger
.error("Exception getting net status: %s", str(e
))
1156 net
["status"] = "VIM_ERROR"
1157 net
["error_msg"] = str(e
)
1158 net_dict
[net_id
] = net
1161 def get_flavor(self
, flavor_id
):
1162 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1163 self
.logger
.debug("Getting flavor '%s'", flavor_id
)
1166 self
._reload
_connection
()
1167 flavor
= self
.nova
.flavors
.find(id=flavor_id
)
1168 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1170 return flavor
.to_dict()
1172 nvExceptions
.NotFound
,
1173 nvExceptions
.ClientException
,
1174 ksExceptions
.ClientException
,
1177 self
._format
_exception
(e
)
1179 def get_flavor_id_from_data(self
, flavor_dict
):
1180 """Obtain flavor id that match the flavor description
1181 Returns the flavor_id or raises a vimconnNotFoundException
1182 flavor_dict: contains the required ram, vcpus, disk
1183 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1184 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1185 vimconnNotFoundException is raised
1187 exact_match
= False if self
.config
.get("use_existing_flavors") else True
1190 self
._reload
_connection
()
1191 flavor_candidate_id
= None
1192 flavor_candidate_data
= (10000, 10000, 10000)
1195 flavor_dict
["vcpus"],
1196 flavor_dict
["disk"],
1197 flavor_dict
.get("ephemeral", 0),
1198 flavor_dict
.get("swap", 0),
1201 extended
= flavor_dict
.get("extended", {})
1204 raise vimconn
.VimConnNotFoundException(
1205 "Flavor with EPA still not implemented"
1207 # if len(numas) > 1:
1208 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1210 # numas = extended.get("numas")
1211 for flavor
in self
.nova
.flavors
.list():
1212 epa
= flavor
.get_keys()
1223 flavor
.swap
if isinstance(flavor
.swap
, int) else 0,
1225 if flavor_data
== flavor_target
:
1229 and flavor_target
< flavor_data
< flavor_candidate_data
1231 flavor_candidate_id
= flavor
.id
1232 flavor_candidate_data
= flavor_data
1234 if not exact_match
and flavor_candidate_id
:
1235 return flavor_candidate_id
1237 raise vimconn
.VimConnNotFoundException(
1238 "Cannot find any flavor matching '{}'".format(flavor_dict
)
1241 nvExceptions
.NotFound
,
1242 nvExceptions
.ClientException
,
1243 ksExceptions
.ClientException
,
1246 self
._format
_exception
(e
)
1249 def process_resource_quota(quota
: dict, prefix
: str, extra_specs
: dict) -> None:
1250 """Process resource quota and fill up extra_specs.
1252 quota (dict): Keeping the quota of resurces
1254 extra_specs (dict) Dict to be filled to be used during flavor creation
1257 if "limit" in quota
:
1258 extra_specs
["quota:" + prefix
+ "_limit"] = quota
["limit"]
1260 if "reserve" in quota
:
1261 extra_specs
["quota:" + prefix
+ "_reservation"] = quota
["reserve"]
1263 if "shares" in quota
:
1264 extra_specs
["quota:" + prefix
+ "_shares_level"] = "custom"
1265 extra_specs
["quota:" + prefix
+ "_shares_share"] = quota
["shares"]
1268 def process_numa_memory(
1269 numa
: dict, node_id
: Optional
[int], extra_specs
: dict
1271 """Set the memory in extra_specs.
1273 numa (dict): A dictionary which includes numa information
1274 node_id (int): ID of numa node
1275 extra_specs (dict): To be filled.
1278 if not numa
.get("memory"):
1280 memory_mb
= numa
["memory"] * 1024
1281 memory
= "hw:numa_mem.{}".format(node_id
)
1282 extra_specs
[memory
] = int(memory_mb
)
1285 def process_numa_vcpu(numa
: dict, node_id
: int, extra_specs
: dict) -> None:
1286 """Set the cpu in extra_specs.
1288 numa (dict): A dictionary which includes numa information
1289 node_id (int): ID of numa node
1290 extra_specs (dict): To be filled.
1293 if not numa
.get("vcpu"):
1296 cpu
= "hw:numa_cpus.{}".format(node_id
)
1297 vcpu
= ",".join(map(str, vcpu
))
1298 extra_specs
[cpu
] = vcpu
1301 def process_numa_paired_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1302 """Fill up extra_specs if numa has paired-threads.
1304 numa (dict): A dictionary which includes numa information
1305 extra_specs (dict): To be filled.
1308 threads (int) Number of virtual cpus
1311 if not numa
.get("paired-threads"):
1314 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1315 threads
= numa
["paired-threads"] * 2
1316 extra_specs
["hw:cpu_thread_policy"] = "require"
1317 extra_specs
["hw:cpu_policy"] = "dedicated"
1321 def process_numa_cores(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1322 """Fill up extra_specs if numa has cores.
1324 numa (dict): A dictionary which includes numa information
1325 extra_specs (dict): To be filled.
1328 cores (int) Number of virtual cpus
1331 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1332 # architecture, or a non-SMT architecture will be emulated
1333 if not numa
.get("cores"):
1335 cores
= numa
["cores"]
1336 extra_specs
["hw:cpu_thread_policy"] = "isolate"
1337 extra_specs
["hw:cpu_policy"] = "dedicated"
1341 def process_numa_threads(numa
: dict, extra_specs
: dict) -> Optional
[int]:
1342 """Fill up extra_specs if numa has threads.
1344 numa (dict): A dictionary which includes numa information
1345 extra_specs (dict): To be filled.
1348 threads (int) Number of virtual cpus
1351 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1352 if not numa
.get("threads"):
1354 threads
= numa
["threads"]
1355 extra_specs
["hw:cpu_thread_policy"] = "prefer"
1356 extra_specs
["hw:cpu_policy"] = "dedicated"
1359 def _process_numa_parameters_of_flavor(
1360 self
, numas
: List
, extra_specs
: Dict
1362 """Process numa parameters and fill up extra_specs.
1365 numas (list): List of dictionary which includes numa information
1366 extra_specs (dict): To be filled.
1369 numa_nodes
= len(numas
)
1370 extra_specs
["hw:numa_nodes"] = str(numa_nodes
)
1371 cpu_cores
, cpu_threads
= 0, 0
1373 if self
.vim_type
== "VIO":
1374 self
.process_vio_numa_nodes(numa_nodes
, extra_specs
)
1378 node_id
= numa
["id"]
1379 # overwrite ram and vcpus
1380 # check if key "memory" is present in numa else use ram value at flavor
1381 self
.process_numa_memory(numa
, node_id
, extra_specs
)
1382 self
.process_numa_vcpu(numa
, node_id
, extra_specs
)
1384 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1385 extra_specs
["hw:cpu_sockets"] = str(numa_nodes
)
1387 if "paired-threads" in numa
:
1388 threads
= self
.process_numa_paired_threads(numa
, extra_specs
)
1389 cpu_threads
+= threads
1391 elif "cores" in numa
:
1392 cores
= self
.process_numa_cores(numa
, extra_specs
)
1395 elif "threads" in numa
:
1396 threads
= self
.process_numa_threads(numa
, extra_specs
)
1397 cpu_threads
+= threads
1400 extra_specs
["hw:cpu_cores"] = str(cpu_cores
)
1402 extra_specs
["hw:cpu_threads"] = str(cpu_threads
)
1405 def process_vio_numa_nodes(numa_nodes
: int, extra_specs
: Dict
) -> None:
1406 """According to number of numa nodes, updates the extra_specs for VIO.
1410 numa_nodes (int): List keeps the numa node numbers
1411 extra_specs (dict): Extra specs dict to be updated
1414 # If there are several numas, we do not define specific affinity.
1415 extra_specs
["vmware:latency_sensitivity_level"] = "high"
1417 def _change_flavor_name(
1418 self
, name
: str, name_suffix
: int, flavor_data
: dict
1420 """Change the flavor name if the name already exists.
1423 name (str): Flavor name to be checked
1424 name_suffix (int): Suffix to be appended to name
1425 flavor_data (dict): Flavor dict
1428 name (str): New flavor name to be used
1432 fl
= self
.nova
.flavors
.list()
1433 fl_names
= [f
.name
for f
in fl
]
1435 while name
in fl_names
:
1437 name
= flavor_data
["name"] + "-" + str(name_suffix
)
1441 def _process_extended_config_of_flavor(
1442 self
, extended
: dict, extra_specs
: dict
1444 """Process the extended dict to fill up extra_specs.
1447 extended (dict): Keeping the extra specification of flavor
1448 extra_specs (dict) Dict to be filled to be used during flavor creation
1453 "mem-quota": "memory",
1455 "disk-io-quota": "disk_io",
1463 "PREFER_LARGE": "any",
1467 "cpu-pinning-policy": "hw:cpu_policy",
1468 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1469 "mem-policy": "hw:numa_mempolicy",
1472 numas
= extended
.get("numas")
1474 self
._process
_numa
_parameters
_of
_flavor
(numas
, extra_specs
)
1476 for quota
, item
in quotas
.items():
1477 if quota
in extended
.keys():
1478 self
.process_resource_quota(extended
.get(quota
), item
, extra_specs
)
1480 # Set the mempage size as specified in the descriptor
1481 if extended
.get("mempage-size"):
1482 if extended
["mempage-size"] in page_sizes
.keys():
1483 extra_specs
["hw:mem_page_size"] = page_sizes
[extended
["mempage-size"]]
1485 # Normally, validations in NBI should not allow to this condition.
1487 "Invalid mempage-size %s. Will be ignored",
1488 extended
.get("mempage-size"),
1491 for policy
, hw_policy
in policies
.items():
1492 if extended
.get(policy
):
1493 extra_specs
[hw_policy
] = extended
[policy
].lower()
1496 def _get_flavor_details(flavor_data
: dict) -> Tuple
:
1497 """Returns the details of flavor
1499 flavor_data (dict): Dictionary that includes required flavor details
1502 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1506 flavor_data
.get("ram", 64),
1507 flavor_data
.get("vcpus", 1),
1509 flavor_data
.get("extended"),
1512 def new_flavor(self
, flavor_data
: dict, change_name_if_used
: bool = True) -> str:
1513 """Adds a tenant flavor to openstack VIM.
1514 if change_name_if_used is True, it will change name in case of conflict,
1515 because it is not supported name repetition.
1518 flavor_data (dict): Flavor details to be processed
1519 change_name_if_used (bool): Change name in case of conflict
1522 flavor_id (str): flavor identifier
1525 self
.logger
.debug("Adding flavor '%s'", str(flavor_data
))
1531 name
= flavor_data
["name"]
1532 while retry
< max_retries
:
1535 self
._reload
_connection
()
1537 if change_name_if_used
:
1538 name
= self
._change
_flavor
_name
(name
, name_suffix
, flavor_data
)
1540 ram
, vcpus
, extra_specs
, extended
= self
._get
_flavor
_details
(
1544 self
._process
_extended
_config
_of
_flavor
(extended
, extra_specs
)
1548 new_flavor
= self
.nova
.flavors
.create(
1552 disk
=flavor_data
.get("disk", 0),
1553 ephemeral
=flavor_data
.get("ephemeral", 0),
1554 swap
=flavor_data
.get("swap", 0),
1555 is_public
=flavor_data
.get("is_public", True),
1560 new_flavor
.set_keys(extra_specs
)
1562 return new_flavor
.id
1564 except nvExceptions
.Conflict
as e
:
1565 if change_name_if_used
and retry
< max_retries
:
1568 self
._format
_exception
(e
)
1571 ksExceptions
.ClientException
,
1572 nvExceptions
.ClientException
,
1576 self
._format
_exception
(e
)
1578 def delete_flavor(self
, flavor_id
):
1579 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1581 self
._reload
_connection
()
1582 self
.nova
.flavors
.delete(flavor_id
)
1585 # except nvExceptions.BadRequest as e:
1587 nvExceptions
.NotFound
,
1588 ksExceptions
.ClientException
,
1589 nvExceptions
.ClientException
,
1592 self
._format
_exception
(e
)
1594 def new_image(self
, image_dict
):
1596 Adds a tenant image to VIM. imge_dict is a dictionary with:
1598 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1599 location: path or URI
1600 public: "yes" or "no"
1601 metadata: metadata of the image
1602 Returns the image_id
1607 while retry
< max_retries
:
1610 self
._reload
_connection
()
1612 # determine format http://docs.openstack.org/developer/glance/formats.html
1613 if "disk_format" in image_dict
:
1614 disk_format
= image_dict
["disk_format"]
1615 else: # autodiscover based on extension
1616 if image_dict
["location"].endswith(".qcow2"):
1617 disk_format
= "qcow2"
1618 elif image_dict
["location"].endswith(".vhd"):
1620 elif image_dict
["location"].endswith(".vmdk"):
1621 disk_format
= "vmdk"
1622 elif image_dict
["location"].endswith(".vdi"):
1624 elif image_dict
["location"].endswith(".iso"):
1626 elif image_dict
["location"].endswith(".aki"):
1628 elif image_dict
["location"].endswith(".ari"):
1630 elif image_dict
["location"].endswith(".ami"):
1636 "new_image: '%s' loading from '%s'",
1638 image_dict
["location"],
1640 if self
.vim_type
== "VIO":
1641 container_format
= "bare"
1642 if "container_format" in image_dict
:
1643 container_format
= image_dict
["container_format"]
1645 new_image
= self
.glance
.images
.create(
1646 name
=image_dict
["name"],
1647 container_format
=container_format
,
1648 disk_format
=disk_format
,
1651 new_image
= self
.glance
.images
.create(name
=image_dict
["name"])
1653 if image_dict
["location"].startswith("http"):
1654 # TODO there is not a method to direct download. It must be downloaded locally with requests
1655 raise vimconn
.VimConnNotImplemented("Cannot create image from URL")
1657 with
open(image_dict
["location"]) as fimage
:
1658 self
.glance
.images
.upload(new_image
.id, fimage
)
1659 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1660 # image_dict.get("public","yes")=="yes",
1661 # container_format="bare", data=fimage, disk_format=disk_format)
1663 metadata_to_load
= image_dict
.get("metadata")
1665 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1667 if self
.vim_type
== "VIO":
1668 metadata_to_load
["upload_location"] = image_dict
["location"]
1670 metadata_to_load
["location"] = image_dict
["location"]
1672 self
.glance
.images
.update(new_image
.id, **metadata_to_load
)
1676 nvExceptions
.Conflict
,
1677 ksExceptions
.ClientException
,
1678 nvExceptions
.ClientException
,
1680 self
._format
_exception
(e
)
1683 gl1Exceptions
.HTTPException
,
1684 gl1Exceptions
.CommunicationError
,
1687 if retry
== max_retries
:
1690 self
._format
_exception
(e
)
1691 except IOError as e
: # can not open the file
1692 raise vimconn
.VimConnConnectionException(
1693 "{}: {} for {}".format(type(e
).__name
__, e
, image_dict
["location"]),
1694 http_code
=vimconn
.HTTP_Bad_Request
,
1697 def delete_image(self
, image_id
):
1698 """Deletes a tenant image from openstack VIM. Returns the old id"""
1700 self
._reload
_connection
()
1701 self
.glance
.images
.delete(image_id
)
1705 nvExceptions
.NotFound
,
1706 ksExceptions
.ClientException
,
1707 nvExceptions
.ClientException
,
1708 gl1Exceptions
.CommunicationError
,
1709 gl1Exceptions
.HTTPNotFound
,
1711 ) as e
: # TODO remove
1712 self
._format
_exception
(e
)
1714 def get_image_id_from_path(self
, path
):
1715 """Get the image id from image path in the VIM database. Returns the image_id"""
1717 self
._reload
_connection
()
1718 images
= self
.glance
.images
.list()
1720 for image
in images
:
1721 if image
.metadata
.get("location") == path
:
1724 raise vimconn
.VimConnNotFoundException(
1725 "image with location '{}' not found".format(path
)
1728 ksExceptions
.ClientException
,
1729 nvExceptions
.ClientException
,
1730 gl1Exceptions
.CommunicationError
,
1733 self
._format
_exception
(e
)
1735 def get_image_list(self
, filter_dict
={}):
1736 """Obtain tenant images from VIM
1740 checksum: image checksum
1741 Returns the image list of dictionaries:
1742 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1745 self
.logger
.debug("Getting image list from VIM filter: '%s'", str(filter_dict
))
1748 self
._reload
_connection
()
1749 # filter_dict_os = filter_dict.copy()
1750 # First we filter by the available filter fields: name, id. The others are removed.
1751 image_list
= self
.glance
.images
.list()
1754 for image
in image_list
:
1756 if filter_dict
.get("name") and image
["name"] != filter_dict
["name"]:
1759 if filter_dict
.get("id") and image
["id"] != filter_dict
["id"]:
1763 filter_dict
.get("checksum")
1764 and image
["checksum"] != filter_dict
["checksum"]
1768 filtered_list
.append(image
.copy())
1769 except gl1Exceptions
.HTTPNotFound
:
1772 return filtered_list
1774 ksExceptions
.ClientException
,
1775 nvExceptions
.ClientException
,
1776 gl1Exceptions
.CommunicationError
,
1779 self
._format
_exception
(e
)
1781 def __wait_for_vm(self
, vm_id
, status
):
1782 """wait until vm is in the desired status and return True.
1783 If the VM gets in ERROR status, return false.
1784 If the timeout is reached generate an exception"""
1786 while elapsed_time
< server_timeout
:
1787 vm_status
= self
.nova
.servers
.get(vm_id
).status
1789 if vm_status
== status
:
1792 if vm_status
== "ERROR":
1798 # if we exceeded the timeout rollback
1799 if elapsed_time
>= server_timeout
:
1800 raise vimconn
.VimConnException(
1801 "Timeout waiting for instance " + vm_id
+ " to get " + status
,
1802 http_code
=vimconn
.HTTP_Request_Timeout
,
1805 def _get_openstack_availablity_zones(self
):
1807 Get from openstack availability zones available
1811 openstack_availability_zone
= self
.nova
.availability_zones
.list()
1812 openstack_availability_zone
= [
1814 for zone
in openstack_availability_zone
1815 if zone
.zoneName
!= "internal"
1818 return openstack_availability_zone
1822 def _set_availablity_zones(self
):
1824 Set vim availablity zone
1827 if "availability_zone" in self
.config
:
1828 vim_availability_zones
= self
.config
.get("availability_zone")
1830 if isinstance(vim_availability_zones
, str):
1831 self
.availability_zone
= [vim_availability_zones
]
1832 elif isinstance(vim_availability_zones
, list):
1833 self
.availability_zone
= vim_availability_zones
1835 self
.availability_zone
= self
._get
_openstack
_availablity
_zones
()
1837 def _get_vm_availability_zone(
1838 self
, availability_zone_index
, availability_zone_list
1841 Return thge availability zone to be used by the created VM.
1842 :return: The VIM availability zone to be used or None
1844 if availability_zone_index
is None:
1845 if not self
.config
.get("availability_zone"):
1847 elif isinstance(self
.config
.get("availability_zone"), str):
1848 return self
.config
["availability_zone"]
1850 # TODO consider using a different parameter at config for default AV and AV list match
1851 return self
.config
["availability_zone"][0]
1853 vim_availability_zones
= self
.availability_zone
1854 # check if VIM offer enough availability zones describe in the VNFD
1855 if vim_availability_zones
and len(availability_zone_list
) <= len(
1856 vim_availability_zones
1858 # check if all the names of NFV AV match VIM AV names
1859 match_by_index
= False
1860 for av
in availability_zone_list
:
1861 if av
not in vim_availability_zones
:
1862 match_by_index
= True
1866 return vim_availability_zones
[availability_zone_index
]
1868 return availability_zone_list
[availability_zone_index
]
1870 raise vimconn
.VimConnConflictException(
1871 "No enough availability zones at VIM for this deployment"
1874 def _prepare_port_dict_security_groups(self
, net
: dict, port_dict
: dict) -> None:
1875 """Fill up the security_groups in the port_dict.
1878 net (dict): Network details
1879 port_dict (dict): Port details
1883 self
.config
.get("security_groups")
1884 and net
.get("port_security") is not False
1885 and not self
.config
.get("no_port_security_extension")
1887 if not self
.security_groups_id
:
1888 self
._get
_ids
_from
_name
()
1890 port_dict
["security_groups"] = self
.security_groups_id
1892 def _prepare_port_dict_binding(self
, net
: dict, port_dict
: dict) -> None:
1893 """Fill up the network binding depending on network type in the port_dict.
1896 net (dict): Network details
1897 port_dict (dict): Port details
1900 if not net
.get("type"):
1901 raise vimconn
.VimConnException("Type is missing in the network details.")
1903 if net
["type"] == "virtual":
1907 elif net
["type"] == "VF" or net
["type"] == "SR-IOV":
1908 port_dict
["binding:vnic_type"] = "direct"
1910 # VIO specific Changes
1911 if self
.vim_type
== "VIO":
1912 # Need to create port with port_security_enabled = False and no-security-groups
1913 port_dict
["port_security_enabled"] = False
1914 port_dict
["provider_security_groups"] = []
1915 port_dict
["security_groups"] = []
1918 # For PT PCI-PASSTHROUGH
1919 port_dict
["binding:vnic_type"] = "direct-physical"
1922 def _set_fixed_ip(new_port
: dict, net
: dict) -> None:
1923 """Set the "ip" parameter in net dictionary.
1926 new_port (dict): New created port
1927 net (dict): Network details
1930 fixed_ips
= new_port
["port"].get("fixed_ips")
1933 net
["ip"] = fixed_ips
[0].get("ip_address")
1938 def _prepare_port_dict_mac_ip_addr(net
: dict, port_dict
: dict) -> None:
1939 """Fill up the mac_address and fixed_ips in port_dict.
1942 net (dict): Network details
1943 port_dict (dict): Port details
1946 if net
.get("mac_address"):
1947 port_dict
["mac_address"] = net
["mac_address"]
1950 if ip_list
:= net
.get("ip_address"):
1951 if not isinstance(ip_list
, list):
1954 ip_dict
= {"ip_address": ip
}
1955 ip_dual_list
.append(ip_dict
)
1956 port_dict
["fixed_ips"] = ip_dual_list
1957 # TODO add "subnet_id": <subnet_id>
1959 def _create_new_port(self
, port_dict
: dict, created_items
: dict, net
: dict) -> Dict
:
1960 """Create new port using neutron.
1963 port_dict (dict): Port details
1964 created_items (dict): All created items
1965 net (dict): Network details
1968 new_port (dict): New created port
1971 new_port
= self
.neutron
.create_port({"port": port_dict
})
1972 created_items
["port:" + str(new_port
["port"]["id"])] = True
1973 net
["mac_address"] = new_port
["port"]["mac_address"]
1974 net
["vim_id"] = new_port
["port"]["id"]
1979 self
, net
: dict, name
: str, created_items
: dict
1980 ) -> Tuple
[dict, dict]:
1981 """Create port using net details.
1984 net (dict): Network details
1985 name (str): Name to be used as network name if net dict does not include name
1986 created_items (dict): All created items
1989 new_port, port New created port, port dictionary
1994 "network_id": net
["net_id"],
1995 "name": net
.get("name"),
1996 "admin_state_up": True,
1999 if not port_dict
["name"]:
2000 port_dict
["name"] = name
2002 self
._prepare
_port
_dict
_security
_groups
(net
, port_dict
)
2004 self
._prepare
_port
_dict
_binding
(net
, port_dict
)
2006 vimconnector
._prepare
_port
_dict
_mac
_ip
_addr
(net
, port_dict
)
2008 new_port
= self
._create
_new
_port
(port_dict
, created_items
, net
)
2010 vimconnector
._set
_fixed
_ip
(new_port
, net
)
2012 port
= {"port-id": new_port
["port"]["id"]}
2014 if float(self
.nova
.api_version
.get_string()) >= 2.32:
2015 port
["tag"] = new_port
["port"]["name"]
2017 return new_port
, port
2019 def _prepare_network_for_vminstance(
2023 created_items
: dict,
2025 external_network
: list,
2026 no_secured_ports
: list,
2028 """Create port and fill up net dictionary for new VM instance creation.
2031 name (str): Name of network
2032 net_list (list): List of networks
2033 created_items (dict): All created items belongs to a VM
2034 net_list_vim (list): List of ports
2035 external_network (list): List of external-networks
2036 no_secured_ports (list): Port security disabled ports
2039 self
._reload
_connection
()
2041 for net
in net_list
:
2042 # Skip non-connected iface
2043 if not net
.get("net_id"):
2046 new_port
, port
= self
._create
_port
(net
, name
, created_items
)
2048 net_list_vim
.append(port
)
2050 if net
.get("floating_ip", False):
2051 net
["exit_on_floating_ip_error"] = True
2052 external_network
.append(net
)
2054 elif net
["use"] == "mgmt" and self
.config
.get("use_floating_ip"):
2055 net
["exit_on_floating_ip_error"] = False
2056 external_network
.append(net
)
2057 net
["floating_ip"] = self
.config
.get("use_floating_ip")
2059 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2060 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2061 if net
.get("port_security") is False and not self
.config
.get(
2062 "no_port_security_extension"
2064 no_secured_ports
.append(
2066 new_port
["port"]["id"],
2067 net
.get("port_security_disable_strategy"),
2071 def _prepare_persistent_root_volumes(
2076 base_disk_index
: int,
2077 block_device_mapping
: dict,
2078 existing_vim_volumes
: list,
2079 created_items
: dict,
2081 """Prepare persistent root volumes for new VM instance.
2084 name (str): Name of VM instance
2085 vm_av_zone (list): List of availability zones
2086 disk (dict): Disk details
2087 base_disk_index (int): Disk index
2088 block_device_mapping (dict): Block device details
2089 existing_vim_volumes (list): Existing disk details
2090 created_items (dict): All created items belongs to VM
2093 boot_volume_id (str): ID of boot volume
2096 # Disk may include only vim_volume_id or only vim_id."
2097 # Use existing persistent root volume finding with volume_id or vim_id
2098 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2100 if disk
.get(key_id
):
2101 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2102 existing_vim_volumes
.append({"id": disk
[key_id
]})
2105 # Create persistent root volume
2106 volume
= self
.cinder
.volumes
.create(
2108 name
=name
+ "vd" + chr(base_disk_index
),
2109 imageRef
=disk
["image_id"],
2110 # Make sure volume is in the same AZ as the VM to be attached to
2111 availability_zone
=vm_av_zone
,
2113 boot_volume_id
= volume
.id
2114 self
.update_block_device_mapping(
2116 block_device_mapping
=block_device_mapping
,
2117 base_disk_index
=base_disk_index
,
2119 created_items
=created_items
,
2122 return boot_volume_id
2125 def update_block_device_mapping(
2127 block_device_mapping
: dict,
2128 base_disk_index
: int,
2130 created_items
: dict,
2132 """Add volume information to block device mapping dict.
2134 volume (object): Created volume object
2135 block_device_mapping (dict): Block device details
2136 base_disk_index (int): Disk index
2137 disk (dict): Disk details
2138 created_items (dict): All created items belongs to VM
2141 raise vimconn
.VimConnException("Volume is empty.")
2143 if not hasattr(volume
, "id"):
2144 raise vimconn
.VimConnException(
2145 "Created volume is not valid, does not have id attribute."
2148 volume_txt
= "volume:" + str(volume
.id)
2149 if disk
.get("keep"):
2150 volume_txt
+= ":keep"
2151 created_items
[volume_txt
] = True
2152 block_device_mapping
["vd" + chr(base_disk_index
)] = volume
.id
2154 def new_shared_volumes(self
, shared_volume_data
) -> (str, str):
2156 volume
= self
.cinder
.volumes
.create(
2157 size
=shared_volume_data
["size"],
2158 name
=shared_volume_data
["name"],
2159 volume_type
="multiattach",
2161 return (volume
.name
, volume
.id)
2162 except (ConnectionError
, KeyError) as e
:
2163 self
._format
_exception
(e
)
2165 def _prepare_shared_volumes(
2169 base_disk_index
: int,
2170 block_device_mapping
: dict,
2171 existing_vim_volumes
: list,
2172 created_items
: dict,
2174 volumes
= {volume
.name
: volume
.id for volume
in self
.cinder
.volumes
.list()}
2175 if volumes
.get(disk
["name"]):
2176 sv_id
= volumes
[disk
["name"]]
2177 volume
= self
.cinder
.volumes
.get(sv_id
)
2178 self
.update_block_device_mapping(
2180 block_device_mapping
=block_device_mapping
,
2181 base_disk_index
=base_disk_index
,
2183 created_items
=created_items
,
2186 def _prepare_non_root_persistent_volumes(
2191 block_device_mapping
: dict,
2192 base_disk_index
: int,
2193 existing_vim_volumes
: list,
2194 created_items
: dict,
2196 """Prepare persistent volumes for new VM instance.
2199 name (str): Name of VM instance
2200 disk (dict): Disk details
2201 vm_av_zone (list): List of availability zones
2202 block_device_mapping (dict): Block device details
2203 base_disk_index (int): Disk index
2204 existing_vim_volumes (list): Existing disk details
2205 created_items (dict): All created items belongs to VM
2207 # Non-root persistent volumes
2208 # Disk may include only vim_volume_id or only vim_id."
2209 key_id
= "vim_volume_id" if "vim_volume_id" in disk
.keys() else "vim_id"
2210 if disk
.get(key_id
):
2211 # Use existing persistent volume
2212 block_device_mapping
["vd" + chr(base_disk_index
)] = disk
[key_id
]
2213 existing_vim_volumes
.append({"id": disk
[key_id
]})
2215 volume_name
= f
"{name}vd{chr(base_disk_index)}"
2216 volume
= self
.cinder
.volumes
.create(
2219 # Make sure volume is in the same AZ as the VM to be attached to
2220 availability_zone
=vm_av_zone
,
2222 self
.update_block_device_mapping(
2224 block_device_mapping
=block_device_mapping
,
2225 base_disk_index
=base_disk_index
,
2227 created_items
=created_items
,
2230 def _wait_for_created_volumes_availability(
2231 self
, elapsed_time
: int, created_items
: dict
2233 """Wait till created volumes become available.
2236 elapsed_time (int): Passed time while waiting
2237 created_items (dict): All created items belongs to VM
2240 elapsed_time (int): Time spent while waiting
2243 while elapsed_time
< volume_timeout
:
2244 for created_item
in created_items
:
2246 created_item
.split(":")[0],
2247 created_item
.split(":")[1],
2250 volume
= self
.cinder
.volumes
.get(volume_id
)
2252 volume
.volume_type
== "multiattach"
2253 and volume
.status
== "in-use"
2256 elif volume
.status
!= "available":
2259 # All ready: break from while
2267 def _wait_for_existing_volumes_availability(
2268 self
, elapsed_time
: int, existing_vim_volumes
: list
2270 """Wait till existing volumes become available.
2273 elapsed_time (int): Passed time while waiting
2274 existing_vim_volumes (list): Existing volume details
2277 elapsed_time (int): Time spent while waiting
2281 while elapsed_time
< volume_timeout
:
2282 for volume
in existing_vim_volumes
:
2283 v
= self
.cinder
.volumes
.get(volume
["id"])
2284 if v
.volume_type
== "multiattach" and v
.status
== "in-use":
2286 elif v
.status
!= "available":
2288 else: # all ready: break from while
2296 def _prepare_disk_for_vminstance(
2299 existing_vim_volumes
: list,
2300 created_items
: dict,
2302 block_device_mapping
: dict,
2303 disk_list
: list = None,
2305 """Prepare all volumes for new VM instance.
2308 name (str): Name of Instance
2309 existing_vim_volumes (list): List of existing volumes
2310 created_items (dict): All created items belongs to VM
2311 vm_av_zone (list): VM availability zone
2312 block_device_mapping (dict): Block devices to be attached to VM
2313 disk_list (list): List of disks
2316 # Create additional volumes in case these are present in disk_list
2317 base_disk_index
= ord("b")
2318 boot_volume_id
= None
2320 for disk
in disk_list
:
2321 if "image_id" in disk
:
2322 # Root persistent volume
2323 base_disk_index
= ord("a")
2324 boot_volume_id
= self
._prepare
_persistent
_root
_volumes
(
2326 vm_av_zone
=vm_av_zone
,
2328 base_disk_index
=base_disk_index
,
2329 block_device_mapping
=block_device_mapping
,
2330 existing_vim_volumes
=existing_vim_volumes
,
2331 created_items
=created_items
,
2333 elif disk
.get("multiattach"):
2334 self
._prepare
_shared
_volumes
(
2337 base_disk_index
=base_disk_index
,
2338 block_device_mapping
=block_device_mapping
,
2339 existing_vim_volumes
=existing_vim_volumes
,
2340 created_items
=created_items
,
2343 # Non-root persistent volume
2344 self
._prepare
_non
_root
_persistent
_volumes
(
2347 vm_av_zone
=vm_av_zone
,
2348 block_device_mapping
=block_device_mapping
,
2349 base_disk_index
=base_disk_index
,
2350 existing_vim_volumes
=existing_vim_volumes
,
2351 created_items
=created_items
,
2353 base_disk_index
+= 1
2355 # Wait until created volumes are with status available
2356 elapsed_time
= self
._wait
_for
_created
_volumes
_availability
(
2357 elapsed_time
, created_items
2359 # Wait until existing volumes in vim are with status available
2360 elapsed_time
= self
._wait
_for
_existing
_volumes
_availability
(
2361 elapsed_time
, existing_vim_volumes
2363 # If we exceeded the timeout rollback
2364 if elapsed_time
>= volume_timeout
:
2365 raise vimconn
.VimConnException(
2366 "Timeout creating volumes for instance " + name
,
2367 http_code
=vimconn
.HTTP_Request_Timeout
,
2370 self
.cinder
.volumes
.set_bootable(boot_volume_id
, True)
2372 def _find_the_external_network_for_floating_ip(self
):
2373 """Get the external network ip in order to create floating IP.
2376 pool_id (str): External network pool ID
2380 # Find the external network
2381 external_nets
= list()
2383 for net
in self
.neutron
.list_networks()["networks"]:
2384 if net
["router:external"]:
2385 external_nets
.append(net
)
2387 if len(external_nets
) == 0:
2388 raise vimconn
.VimConnException(
2389 "Cannot create floating_ip automatically since "
2390 "no external network is present",
2391 http_code
=vimconn
.HTTP_Conflict
,
2394 if len(external_nets
) > 1:
2395 raise vimconn
.VimConnException(
2396 "Cannot create floating_ip automatically since "
2397 "multiple external networks are present",
2398 http_code
=vimconn
.HTTP_Conflict
,
2402 return external_nets
[0].get("id")
2404 def _neutron_create_float_ip(self
, param
: dict, created_items
: dict) -> None:
2405 """Trigger neutron to create a new floating IP using external network ID.
2408 param (dict): Input parameters to create a floating IP
2409 created_items (dict): All created items belongs to new VM instance
2416 self
.logger
.debug("Creating floating IP")
2417 new_floating_ip
= self
.neutron
.create_floatingip(param
)
2418 free_floating_ip
= new_floating_ip
["floatingip"]["id"]
2419 created_items
["floating_ip:" + str(free_floating_ip
)] = True
2421 except Exception as e
:
2422 raise vimconn
.VimConnException(
2423 type(e
).__name
__ + ": Cannot create new floating_ip " + str(e
),
2424 http_code
=vimconn
.HTTP_Conflict
,
2427 def _create_floating_ip(
2428 self
, floating_network
: dict, server
: object, created_items
: dict
2430 """Get the available Pool ID and create a new floating IP.
2433 floating_network (dict): Dict including external network ID
2434 server (object): Server object
2435 created_items (dict): All created items belongs to new VM instance
2439 # Pool_id is available
2441 isinstance(floating_network
["floating_ip"], str)
2442 and floating_network
["floating_ip"].lower() != "true"
2444 pool_id
= floating_network
["floating_ip"]
2448 pool_id
= self
._find
_the
_external
_network
_for
_floating
_ip
()
2452 "floating_network_id": pool_id
,
2453 "tenant_id": server
.tenant_id
,
2457 self
._neutron
_create
_float
_ip
(param
, created_items
)
2459 def _find_floating_ip(
2463 floating_network
: dict,
2465 """Find the available free floating IPs if there are.
2468 server (object): Server object
2469 floating_ips (list): List of floating IPs
2470 floating_network (dict): Details of floating network such as ID
2473 free_floating_ip (str): Free floating ip address
2476 for fip
in floating_ips
:
2477 if fip
.get("port_id") or fip
.get("tenant_id") != server
.tenant_id
:
2480 if isinstance(floating_network
["floating_ip"], str):
2481 if fip
.get("floating_network_id") != floating_network
["floating_ip"]:
2486 def _assign_floating_ip(
2487 self
, free_floating_ip
: str, floating_network
: dict
2489 """Assign the free floating ip address to port.
2492 free_floating_ip (str): Floating IP to be assigned
2493 floating_network (dict): ID of floating network
2496 fip (dict) (dict): Floating ip details
2499 # The vim_id key contains the neutron.port_id
2500 self
.neutron
.update_floatingip(
2502 {"floatingip": {"port_id": floating_network
["vim_id"]}},
2504 # For race condition ensure not re-assigned to other VM after 5 seconds
2507 return self
.neutron
.show_floatingip(free_floating_ip
)
2509 def _get_free_floating_ip(
2510 self
, server
: object, floating_network
: dict
2512 """Get the free floating IP address.
2515 server (object): Server Object
2516 floating_network (dict): Floating network details
2519 free_floating_ip (str): Free floating ip addr
2523 floating_ips
= self
.neutron
.list_floatingips().get("floatingips", ())
2526 random
.shuffle(floating_ips
)
2528 return self
._find
_floating
_ip
(server
, floating_ips
, floating_network
)
2530 def _prepare_external_network_for_vminstance(
2532 external_network
: list,
2534 created_items
: dict,
2535 vm_start_time
: float,
2537 """Assign floating IP address for VM instance.
2540 external_network (list): ID of External network
2541 server (object): Server Object
2542 created_items (dict): All created items belongs to new VM instance
2543 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2549 for floating_network
in external_network
:
2552 floating_ip_retries
= 3
2553 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2556 free_floating_ip
= self
._get
_free
_floating
_ip
(
2557 server
, floating_network
2560 if not free_floating_ip
:
2561 self
._create
_floating
_ip
(
2562 floating_network
, server
, created_items
2566 # For race condition ensure not already assigned
2567 fip
= self
.neutron
.show_floatingip(free_floating_ip
)
2569 if fip
["floatingip"].get("port_id"):
2572 # Assign floating ip
2573 fip
= self
._assign
_floating
_ip
(
2574 free_floating_ip
, floating_network
2577 if fip
["floatingip"]["port_id"] != floating_network
["vim_id"]:
2578 self
.logger
.warning(
2579 "floating_ip {} re-assigned to other port".format(
2586 "Assigned floating_ip {} to VM {}".format(
2587 free_floating_ip
, server
.id
2593 except Exception as e
:
2594 # Openstack need some time after VM creation to assign an IP. So retry if fails
2595 vm_status
= self
.nova
.servers
.get(server
.id).status
2597 if vm_status
not in ("ACTIVE", "ERROR"):
2598 if time
.time() - vm_start_time
< server_timeout
:
2601 elif floating_ip_retries
> 0:
2602 floating_ip_retries
-= 1
2605 raise vimconn
.VimConnException(
2606 "Cannot create floating_ip: {} {}".format(
2609 http_code
=vimconn
.HTTP_Conflict
,
2612 except Exception as e
:
2613 if not floating_network
["exit_on_floating_ip_error"]:
2614 self
.logger
.error("Cannot create floating_ip. %s", str(e
))
2619 def _update_port_security_for_vminstance(
2621 no_secured_ports
: list,
2624 """Updates the port security according to no_secured_ports list.
2627 no_secured_ports (list): List of ports that security will be disabled
2628 server (object): Server Object
2634 # Wait until the VM is active and then disable the port-security
2635 if no_secured_ports
:
2636 self
.__wait
_for
_vm
(server
.id, "ACTIVE")
2638 for port
in no_secured_ports
:
2640 "port": {"port_security_enabled": False, "security_groups": None}
2643 if port
[1] == "allow-address-pairs":
2645 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2649 self
.neutron
.update_port(port
[0], port_update
)
2652 raise vimconn
.VimConnException(
2653 "It was not possible to disable port security for port {}".format(
2665 affinity_group_list
: list,
2669 availability_zone_index
=None,
2670 availability_zone_list
=None,
2672 """Adds a VM instance to VIM.
2675 name (str): name of VM
2676 description (str): description
2677 start (bool): indicates if VM must start or boot in pause mode. Ignored
2678 image_id (str) image uuid
2679 flavor_id (str) flavor uuid
2680 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2681 net_list (list): list of interfaces, each one is a dictionary with:
2682 name: name of network
2683 net_id: network uuid to connect
2684 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2685 model: interface model, ignored #TODO
2686 mac_address: used for SR-IOV ifaces #TODO for other types
2687 use: 'data', 'bridge', 'mgmt'
2688 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2689 vim_id: filled/added by this function
2690 floating_ip: True/False (or it can be None)
2691 port_security: True/False
2692 cloud_config (dict): (optional) dictionary with:
2693 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2694 users: (optional) list of users to be inserted, each item is a dict with:
2695 name: (mandatory) user name,
2696 key-pairs: (optional) list of strings with the public key to be inserted to the user
2697 user-data: (optional) string is a text script to be passed directly to cloud-init
2698 config-files: (optional). List of files to be transferred. Each item is a dict with:
2699 dest: (mandatory) string with the destination absolute path
2700 encoding: (optional, by default text). Can be one of:
2701 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2702 content : (mandatory) string with the content of the file
2703 permissions: (optional) string with file permissions, typically octal notation '0644'
2704 owner: (optional) file owner, string with the format 'owner:group'
2705 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2706 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2707 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2708 size: (mandatory) string with the size of the disk in GB
2709 vim_id: (optional) should use this existing volume id
2710 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2711 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2712 availability_zone_index is None
2713 #TODO ip, security groups
2716 A tuple with the instance identifier and created_items or raises an exception on error
2717 created_items can be None or a dictionary where this method can include key-values that will be passed to
2718 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2719 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2724 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2734 # list of external networks to be connected to instance, later on used to create floating_ip
2735 external_network
= []
2736 # List of ports with port-security disabled
2737 no_secured_ports
= []
2738 block_device_mapping
= {}
2739 existing_vim_volumes
= []
2740 server_group_id
= None
2741 scheduller_hints
= {}
2743 # Check the Openstack Connection
2744 self
._reload
_connection
()
2746 # Prepare network list
2747 self
._prepare
_network
_for
_vminstance
(
2750 created_items
=created_items
,
2751 net_list_vim
=net_list_vim
,
2752 external_network
=external_network
,
2753 no_secured_ports
=no_secured_ports
,
2757 config_drive
, userdata
= self
._create
_user
_data
(cloud_config
)
2759 # Get availability Zone
2760 vm_av_zone
= self
._get
_vm
_availability
_zone
(
2761 availability_zone_index
, availability_zone_list
2766 self
._prepare
_disk
_for
_vminstance
(
2768 existing_vim_volumes
=existing_vim_volumes
,
2769 created_items
=created_items
,
2770 vm_av_zone
=vm_av_zone
,
2771 block_device_mapping
=block_device_mapping
,
2772 disk_list
=disk_list
,
2775 if affinity_group_list
:
2776 # Only first id on the list will be used. Openstack restriction
2777 server_group_id
= affinity_group_list
[0]["affinity_group_id"]
2778 scheduller_hints
["group"] = server_group_id
2781 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2782 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2783 "block_device_mapping={}, server_group={})".format(
2788 self
.config
.get("security_groups"),
2790 self
.config
.get("keypair"),
2793 block_device_mapping
,
2798 server
= self
.nova
.servers
.create(
2803 security_groups
=self
.config
.get("security_groups"),
2804 # TODO remove security_groups in future versions. Already at neutron port
2805 availability_zone
=vm_av_zone
,
2806 key_name
=self
.config
.get("keypair"),
2808 config_drive
=config_drive
,
2809 block_device_mapping
=block_device_mapping
,
2810 scheduler_hints
=scheduller_hints
,
2813 vm_start_time
= time
.time()
2815 self
._update
_port
_security
_for
_vminstance
(no_secured_ports
, server
)
2817 self
._prepare
_external
_network
_for
_vminstance
(
2818 external_network
=external_network
,
2820 created_items
=created_items
,
2821 vm_start_time
=vm_start_time
,
2824 return server
.id, created_items
2826 except Exception as e
:
2829 server_id
= server
.id
2832 created_items
= self
.remove_keep_tag_from_persistent_volumes(
2836 self
.delete_vminstance(server_id
, created_items
)
2838 except Exception as e2
:
2839 self
.logger
.error("new_vminstance rollback fail {}".format(e2
))
2841 self
._format
_exception
(e
)
2844 def remove_keep_tag_from_persistent_volumes(created_items
: Dict
) -> Dict
:
2845 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2848 created_items (dict): All created items belongs to VM
2851 updated_created_items (dict): Dict which does not include keep flag for volumes.
2855 key
.replace(":keep", ""): value
for (key
, value
) in created_items
.items()
2858 def get_vminstance(self
, vm_id
):
2859 """Returns the VM instance information from VIM"""
2860 # self.logger.debug("Getting VM from VIM")
2862 self
._reload
_connection
()
2863 server
= self
.nova
.servers
.find(id=vm_id
)
2864 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2866 return server
.to_dict()
2868 ksExceptions
.ClientException
,
2869 nvExceptions
.ClientException
,
2870 nvExceptions
.NotFound
,
2873 self
._format
_exception
(e
)
2875 def get_vminstance_console(self
, vm_id
, console_type
="vnc"):
2877 Get a console for the virtual machine
2879 vm_id: uuid of the VM
2880 console_type, can be:
2881 "novnc" (by default), "xvpvnc" for VNC types,
2882 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2883 Returns dict with the console parameters:
2884 protocol: ssh, ftp, http, https, ...
2885 server: usually ip address
2886 port: the http, ssh, ... port
2887 suffix: extra text, e.g. the http path and query string
2889 self
.logger
.debug("Getting VM CONSOLE from VIM")
2892 self
._reload
_connection
()
2893 server
= self
.nova
.servers
.find(id=vm_id
)
2895 if console_type
is None or console_type
== "novnc":
2896 console_dict
= server
.get_vnc_console("novnc")
2897 elif console_type
== "xvpvnc":
2898 console_dict
= server
.get_vnc_console(console_type
)
2899 elif console_type
== "rdp-html5":
2900 console_dict
= server
.get_rdp_console(console_type
)
2901 elif console_type
== "spice-html5":
2902 console_dict
= server
.get_spice_console(console_type
)
2904 raise vimconn
.VimConnException(
2905 "console type '{}' not allowed".format(console_type
),
2906 http_code
=vimconn
.HTTP_Bad_Request
,
2909 console_dict1
= console_dict
.get("console")
2912 console_url
= console_dict1
.get("url")
2916 protocol_index
= console_url
.find("//")
2918 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
2921 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
2926 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
2928 -vimconn
.HTTP_Internal_Server_Error
,
2929 "Unexpected response from VIM",
2933 "protocol": console_url
[0:protocol_index
],
2934 "server": console_url
[protocol_index
+ 2 : port_index
],
2935 "port": console_url
[port_index
:suffix_index
],
2936 "suffix": console_url
[suffix_index
+ 1 :],
2941 raise vimconn
.VimConnUnexpectedResponse("Unexpected response from VIM")
2943 nvExceptions
.NotFound
,
2944 ksExceptions
.ClientException
,
2945 nvExceptions
.ClientException
,
2946 nvExceptions
.BadRequest
,
2949 self
._format
_exception
(e
)
2951 def _delete_ports_by_id_wth_neutron(self
, k_id
: str) -> None:
2952 """Neutron delete ports by id.
2954 k_id (str): Port id in the VIM
2957 port_dict
= self
.neutron
.list_ports()
2958 existing_ports
= [port
["id"] for port
in port_dict
["ports"] if port_dict
]
2960 if k_id
in existing_ports
:
2961 self
.neutron
.delete_port(k_id
)
2963 except Exception as e
:
2964 self
.logger
.error("Error deleting port: {}: {}".format(type(e
).__name
__, e
))
2966 def delete_shared_volumes(self
, shared_volume_vim_id
: str) -> bool:
2967 """Cinder delete volume by id.
2969 shared_volume_vim_id (str): ID of shared volume in VIM
2972 if self
.cinder
.volumes
.get(shared_volume_vim_id
).status
!= "available":
2976 self
.cinder
.volumes
.delete(shared_volume_vim_id
)
2978 except Exception as e
:
2980 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
2983 def _delete_volumes_by_id_wth_cinder(
2984 self
, k
: str, k_id
: str, volumes_to_hold
: list, created_items
: dict
2986 """Cinder delete volume by id.
2988 k (str): Full item name in created_items
2989 k_id (str): ID of floating ip in VIM
2990 volumes_to_hold (list): Volumes not to delete
2991 created_items (dict): All created items belongs to VM
2994 if k_id
in volumes_to_hold
:
2997 if self
.cinder
.volumes
.get(k_id
).status
!= "available":
3001 self
.cinder
.volumes
.delete(k_id
)
3002 created_items
[k
] = None
3004 except Exception as e
:
3006 "Error deleting volume: {}: {}".format(type(e
).__name
__, e
)
3009 def _delete_floating_ip_by_id(self
, k
: str, k_id
: str, created_items
: dict) -> None:
3010 """Neutron delete floating ip by id.
3012 k (str): Full item name in created_items
3013 k_id (str): ID of floating ip in VIM
3014 created_items (dict): All created items belongs to VM
3017 self
.neutron
.delete_floatingip(k_id
)
3018 created_items
[k
] = None
3020 except Exception as e
:
3022 "Error deleting floating ip: {}: {}".format(type(e
).__name
__, e
)
3026 def _get_item_name_id(k
: str) -> Tuple
[str, str]:
3027 k_item
, _
, k_id
= k
.partition(":")
3030 def _delete_vm_ports_attached_to_network(self
, created_items
: dict) -> None:
3031 """Delete VM ports attached to the networks before deleting virtual machine.
3033 created_items (dict): All created items belongs to VM
3036 for k
, v
in created_items
.items():
3037 if not v
: # skip already deleted
3041 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3042 if k_item
== "port":
3043 self
._delete
_ports
_by
_id
_wth
_neutron
(k_id
)
3045 except Exception as e
:
3047 "Error deleting port: {}: {}".format(type(e
).__name
__, e
)
3050 def _delete_created_items(
3051 self
, created_items
: dict, volumes_to_hold
: list, keep_waiting
: bool
3053 """Delete Volumes and floating ip if they exist in created_items."""
3054 for k
, v
in created_items
.items():
3055 if not v
: # skip already deleted
3059 k_item
, k_id
= self
._get
_item
_name
_id
(k
)
3060 if k_item
== "volume":
3061 unavailable_vol
= self
._delete
_volumes
_by
_id
_wth
_cinder
(
3062 k
, k_id
, volumes_to_hold
, created_items
3068 elif k_item
== "floating_ip":
3069 self
._delete
_floating
_ip
_by
_id
(k
, k_id
, created_items
)
3071 except Exception as e
:
3072 self
.logger
.error("Error deleting {}: {}".format(k
, e
))
3077 def _extract_items_wth_keep_flag_from_created_items(created_items
: dict) -> dict:
3078 """Remove the volumes which has key flag from created_items
3081 created_items (dict): All created items belongs to VM
3084 created_items (dict): Persistent volumes eliminated created_items
3088 for (key
, value
) in created_items
.items()
3089 if len(key
.split(":")) == 2
3092 def delete_vminstance(
3093 self
, vm_id
: str, created_items
: dict = None, volumes_to_hold
: list = None
3095 """Removes a VM instance from VIM. Returns the old identifier.
3097 vm_id (str): Identifier of VM instance
3098 created_items (dict): All created items belongs to VM
3099 volumes_to_hold (list): Volumes_to_hold
3101 if created_items
is None:
3103 if volumes_to_hold
is None:
3104 volumes_to_hold
= []
3107 created_items
= self
._extract
_items
_wth
_keep
_flag
_from
_created
_items
(
3111 self
._reload
_connection
()
3113 # Delete VM ports attached to the networks before the virtual machine
3115 self
._delete
_vm
_ports
_attached
_to
_network
(created_items
)
3118 self
.nova
.servers
.delete(vm_id
)
3120 # Although having detached, volumes should have in active status before deleting.
3121 # We ensure in this loop
3125 while keep_waiting
and elapsed_time
< volume_timeout
:
3126 keep_waiting
= False
3128 # Delete volumes and floating IP.
3129 keep_waiting
= self
._delete
_created
_items
(
3130 created_items
, volumes_to_hold
, keep_waiting
3138 nvExceptions
.NotFound
,
3139 ksExceptions
.ClientException
,
3140 nvExceptions
.ClientException
,
3143 self
._format
_exception
(e
)
3145 def refresh_vms_status(self
, vm_list
):
3146 """Get the status of the virtual machines and their interfaces/ports
3147 Params: the list of VM identifiers
3148 Returns a dictionary with:
3149 vm_id: #VIM id of this Virtual Machine
3150 status: #Mandatory. Text with one of:
3151 # DELETED (not found at vim)
3152 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3153 # OTHER (Vim reported other status not understood)
3154 # ERROR (VIM indicates an ERROR status)
3155 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3156 # CREATING (on building process), ERROR
3157 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3159 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3160 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3162 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3163 mac_address: #Text format XX:XX:XX:XX:XX:XX
3164 vim_net_id: #network id where this interface is connected
3165 vim_interface_id: #interface/port VIM id
3166 ip_address: #null, or text with IPv4, IPv6 address
3167 compute_node: #identification of compute node where PF,VF interface is allocated
3168 pci: #PCI address of the NIC that hosts the PF,VF
3169 vlan: #physical VLAN used for VF
3173 "refresh_vms status: Getting tenant VM instance information from VIM"
3176 for vm_id
in vm_list
:
3180 vm_vim
= self
.get_vminstance(vm_id
)
3182 if vm_vim
["status"] in vmStatus2manoFormat
:
3183 vm
["status"] = vmStatus2manoFormat
[vm_vim
["status"]]
3185 vm
["status"] = "OTHER"
3186 vm
["error_msg"] = "VIM status reported " + vm_vim
["status"]
3188 vm_vim
.pop("OS-EXT-SRV-ATTR:user_data", None)
3189 vm_vim
.pop("user_data", None)
3190 vm
["vim_info"] = self
.serialize(vm_vim
)
3192 vm
["interfaces"] = []
3193 if vm_vim
.get("fault"):
3194 vm
["error_msg"] = str(vm_vim
["fault"])
3198 self
._reload
_connection
()
3199 port_dict
= self
.neutron
.list_ports(device_id
=vm_id
)
3201 for port
in port_dict
["ports"]:
3203 interface
["vim_info"] = self
.serialize(port
)
3204 interface
["mac_address"] = port
.get("mac_address")
3205 interface
["vim_net_id"] = port
["network_id"]
3206 interface
["vim_interface_id"] = port
["id"]
3207 # check if OS-EXT-SRV-ATTR:host is there,
3208 # in case of non-admin credentials, it will be missing
3210 if vm_vim
.get("OS-EXT-SRV-ATTR:host"):
3211 interface
["compute_node"] = vm_vim
["OS-EXT-SRV-ATTR:host"]
3213 interface
["pci"] = None
3215 # check if binding:profile is there,
3216 # in case of non-admin credentials, it will be missing
3217 if port
.get("binding:profile"):
3218 if port
["binding:profile"].get("pci_slot"):
3219 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3221 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3222 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3223 pci
= port
["binding:profile"]["pci_slot"]
3224 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3225 interface
["pci"] = pci
3227 interface
["vlan"] = None
3229 if port
.get("binding:vif_details"):
3230 interface
["vlan"] = port
["binding:vif_details"].get("vlan")
3232 # Get vlan from network in case not present in port for those old openstacks and cases where
3233 # it is needed vlan at PT
3234 if not interface
["vlan"]:
3235 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3236 network
= self
.neutron
.show_network(port
["network_id"])
3239 network
["network"].get("provider:network_type")
3242 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3243 interface
["vlan"] = network
["network"].get(
3244 "provider:segmentation_id"
3248 # look for floating ip address
3250 floating_ip_dict
= self
.neutron
.list_floatingips(
3254 if floating_ip_dict
.get("floatingips"):
3256 floating_ip_dict
["floatingips"][0].get(
3257 "floating_ip_address"
3263 for subnet
in port
["fixed_ips"]:
3264 ips
.append(subnet
["ip_address"])
3266 interface
["ip_address"] = ";".join(ips
)
3267 vm
["interfaces"].append(interface
)
3268 except Exception as e
:
3270 "Error getting vm interface information {}: {}".format(
3275 except vimconn
.VimConnNotFoundException
as e
:
3276 self
.logger
.error("Exception getting vm status: %s", str(e
))
3277 vm
["status"] = "DELETED"
3278 vm
["error_msg"] = str(e
)
3279 except vimconn
.VimConnException
as e
:
3280 self
.logger
.error("Exception getting vm status: %s", str(e
))
3281 vm
["status"] = "VIM_ERROR"
3282 vm
["error_msg"] = str(e
)
3288 def action_vminstance(self
, vm_id
, action_dict
, created_items
={}):
3289 """Send and action over a VM instance from VIM
3290 Returns None or the console dict if the action was successfully sent to the VIM
3292 self
.logger
.debug("Action over VM '%s': %s", vm_id
, str(action_dict
))
3295 self
._reload
_connection
()
3296 server
= self
.nova
.servers
.find(id=vm_id
)
3298 if "start" in action_dict
:
3299 if action_dict
["start"] == "rebuild":
3302 if server
.status
== "PAUSED":
3304 elif server
.status
== "SUSPENDED":
3306 elif server
.status
== "SHUTOFF":
3310 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3312 raise vimconn
.VimConnException(
3313 "Cannot 'start' instance while it is in active state",
3314 http_code
=vimconn
.HTTP_Bad_Request
,
3317 elif "pause" in action_dict
:
3319 elif "resume" in action_dict
:
3321 elif "shutoff" in action_dict
or "shutdown" in action_dict
:
3322 self
.logger
.debug("server status %s", server
.status
)
3323 if server
.status
== "ACTIVE":
3326 self
.logger
.debug("ERROR: VM is not in Active state")
3327 raise vimconn
.VimConnException(
3328 "VM is not in active state, stop operation is not allowed",
3329 http_code
=vimconn
.HTTP_Bad_Request
,
3331 elif "forceOff" in action_dict
:
3332 server
.stop() # TODO
3333 elif "terminate" in action_dict
:
3335 elif "createImage" in action_dict
:
3336 server
.create_image()
3337 # "path":path_schema,
3338 # "description":description_schema,
3339 # "name":name_schema,
3340 # "metadata":metadata_schema,
3341 # "imageRef": id_schema,
3342 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3343 elif "rebuild" in action_dict
:
3344 server
.rebuild(server
.image
["id"])
3345 elif "reboot" in action_dict
:
3346 server
.reboot() # reboot_type="SOFT"
3347 elif "console" in action_dict
:
3348 console_type
= action_dict
["console"]
3350 if console_type
is None or console_type
== "novnc":
3351 console_dict
= server
.get_vnc_console("novnc")
3352 elif console_type
== "xvpvnc":
3353 console_dict
= server
.get_vnc_console(console_type
)
3354 elif console_type
== "rdp-html5":
3355 console_dict
= server
.get_rdp_console(console_type
)
3356 elif console_type
== "spice-html5":
3357 console_dict
= server
.get_spice_console(console_type
)
3359 raise vimconn
.VimConnException(
3360 "console type '{}' not allowed".format(console_type
),
3361 http_code
=vimconn
.HTTP_Bad_Request
,
3365 console_url
= console_dict
["console"]["url"]
3367 protocol_index
= console_url
.find("//")
3369 console_url
[protocol_index
+ 2 :].find("/") + protocol_index
+ 2
3372 console_url
[protocol_index
+ 2 : suffix_index
].find(":")
3377 if protocol_index
< 0 or port_index
< 0 or suffix_index
< 0:
3378 raise vimconn
.VimConnException(
3379 "Unexpected response from VIM " + str(console_dict
)
3383 "protocol": console_url
[0:protocol_index
],
3384 "server": console_url
[protocol_index
+ 2 : port_index
],
3385 "port": int(console_url
[port_index
+ 1 : suffix_index
]),
3386 "suffix": console_url
[suffix_index
+ 1 :],
3389 return console_dict2
3391 raise vimconn
.VimConnException(
3392 "Unexpected response from VIM " + str(console_dict
)
3397 ksExceptions
.ClientException
,
3398 nvExceptions
.ClientException
,
3399 nvExceptions
.NotFound
,
3402 self
._format
_exception
(e
)
3403 # TODO insert exception vimconn.HTTP_Unauthorized
3405 # ###### VIO Specific Changes #########
3406 def _generate_vlanID(self
):
3408 Method to get unused vlanID
3416 networks
= self
.get_network_list()
3418 for net
in networks
:
3419 if net
.get("provider:segmentation_id"):
3420 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3422 used_vlanIDs
= set(usedVlanIDs
)
3424 # find unused VLAN ID
3425 for vlanID_range
in self
.config
.get("dataplane_net_vlan_range"):
3427 start_vlanid
, end_vlanid
= map(
3428 int, vlanID_range
.replace(" ", "").split("-")
3431 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3432 if vlanID
not in used_vlanIDs
:
3434 except Exception as exp
:
3435 raise vimconn
.VimConnException(
3436 "Exception {} occurred while generating VLAN ID.".format(exp
)
3439 raise vimconn
.VimConnConflictException(
3440 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3441 self
.config
.get("dataplane_net_vlan_range")
3445 def _generate_multisegment_vlanID(self
):
3447 Method to get unused vlanID
3455 networks
= self
.get_network_list()
3456 for net
in networks
:
3457 if net
.get("provider:network_type") == "vlan" and net
.get(
3458 "provider:segmentation_id"
3460 usedVlanIDs
.append(net
.get("provider:segmentation_id"))
3461 elif net
.get("segments"):
3462 for segment
in net
.get("segments"):
3463 if segment
.get("provider:network_type") == "vlan" and segment
.get(
3464 "provider:segmentation_id"
3466 usedVlanIDs
.append(segment
.get("provider:segmentation_id"))
3468 used_vlanIDs
= set(usedVlanIDs
)
3470 # find unused VLAN ID
3471 for vlanID_range
in self
.config
.get("multisegment_vlan_range"):
3473 start_vlanid
, end_vlanid
= map(
3474 int, vlanID_range
.replace(" ", "").split("-")
3477 for vlanID
in range(start_vlanid
, end_vlanid
+ 1):
3478 if vlanID
not in used_vlanIDs
:
3480 except Exception as exp
:
3481 raise vimconn
.VimConnException(
3482 "Exception {} occurred while generating VLAN ID.".format(exp
)
3485 raise vimconn
.VimConnConflictException(
3486 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3487 self
.config
.get("multisegment_vlan_range")
3491 def _validate_vlan_ranges(self
, input_vlan_range
, text_vlan_range
):
3493 Method to validate user given vlanID ranges
3497 for vlanID_range
in input_vlan_range
:
3498 vlan_range
= vlanID_range
.replace(" ", "")
3500 vlanID_pattern
= r
"(\d)*-(\d)*$"
3501 match_obj
= re
.match(vlanID_pattern
, vlan_range
)
3503 raise vimconn
.VimConnConflictException(
3504 "Invalid VLAN range for {}: {}.You must provide "
3505 "'{}' in format [start_ID - end_ID].".format(
3506 text_vlan_range
, vlanID_range
, text_vlan_range
3510 start_vlanid
, end_vlanid
= map(int, vlan_range
.split("-"))
3511 if start_vlanid
<= 0:
3512 raise vimconn
.VimConnConflictException(
3513 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3514 "networks valid IDs are 1 to 4094 ".format(
3515 text_vlan_range
, vlanID_range
3519 if end_vlanid
> 4094:
3520 raise vimconn
.VimConnConflictException(
3521 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3522 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3523 text_vlan_range
, vlanID_range
3527 if start_vlanid
> end_vlanid
:
3528 raise vimconn
.VimConnConflictException(
3529 "Invalid VLAN range for {}: {}. You must provide '{}'"
3530 " in format start_ID - end_ID and start_ID < end_ID ".format(
3531 text_vlan_range
, vlanID_range
, text_vlan_range
3535 def get_hosts_info(self
):
3536 """Get the information of deployed hosts
3537 Returns the hosts content"""
3539 print("osconnector: Getting Host info from VIM")
3543 self
._reload
_connection
()
3544 hypervisors
= self
.nova
.hypervisors
.list()
3546 for hype
in hypervisors
:
3547 h_list
.append(hype
.to_dict())
3549 return 1, {"hosts": h_list
}
3550 except nvExceptions
.NotFound
as e
:
3551 error_value
= -vimconn
.HTTP_Not_Found
3552 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3553 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3554 error_value
= -vimconn
.HTTP_Bad_Request
3558 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3561 # TODO insert exception vimconn.HTTP_Unauthorized
3562 # if reaching here is because an exception
3563 self
.logger
.debug("get_hosts_info " + error_text
)
3565 return error_value
, error_text
3567 def get_hosts(self
, vim_tenant
):
3568 """Get the hosts and deployed instances
3569 Returns the hosts content"""
3570 r
, hype_dict
= self
.get_hosts_info()
3575 hypervisors
= hype_dict
["hosts"]
3578 servers
= self
.nova
.servers
.list()
3579 for hype
in hypervisors
:
3580 for server
in servers
:
3582 server
.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3583 == hype
["hypervisor_hostname"]
3586 hype
["vm"].append(server
.id)
3588 hype
["vm"] = [server
.id]
3591 except nvExceptions
.NotFound
as e
:
3592 error_value
= -vimconn
.HTTP_Not_Found
3593 error_text
= str(e
) if len(e
.args
) == 0 else str(e
.args
[0])
3594 except (ksExceptions
.ClientException
, nvExceptions
.ClientException
) as e
:
3595 error_value
= -vimconn
.HTTP_Bad_Request
3599 + (str(e
) if len(e
.args
) == 0 else str(e
.args
[0]))
3602 # TODO insert exception vimconn.HTTP_Unauthorized
3603 # if reaching here is because an exception
3604 self
.logger
.debug("get_hosts " + error_text
)
3606 return error_value
, error_text
3608 def new_affinity_group(self
, affinity_group_data
):
3609 """Adds a server group to VIM
3610 affinity_group_data contains a dictionary with information, keys:
3611 name: name in VIM for the server group
3612 type: affinity or anti-affinity
3613 scope: Only nfvi-node allowed
3614 Returns the server group identifier"""
3615 self
.logger
.debug("Adding Server Group '%s'", str(affinity_group_data
))
3618 name
= affinity_group_data
["name"]
3619 policy
= affinity_group_data
["type"]
3621 self
._reload
_connection
()
3622 new_server_group
= self
.nova
.server_groups
.create(name
, policy
)
3624 return new_server_group
.id
3626 ksExceptions
.ClientException
,
3627 nvExceptions
.ClientException
,
3631 self
._format
_exception
(e
)
3633 def get_affinity_group(self
, affinity_group_id
):
3634 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3635 self
.logger
.debug("Getting flavor '%s'", affinity_group_id
)
3637 self
._reload
_connection
()
3638 server_group
= self
.nova
.server_groups
.find(id=affinity_group_id
)
3640 return server_group
.to_dict()
3642 nvExceptions
.NotFound
,
3643 nvExceptions
.ClientException
,
3644 ksExceptions
.ClientException
,
3647 self
._format
_exception
(e
)
3649 def delete_affinity_group(self
, affinity_group_id
):
3650 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3651 self
.logger
.debug("Getting server group '%s'", affinity_group_id
)
3653 self
._reload
_connection
()
3654 self
.nova
.server_groups
.delete(affinity_group_id
)
3656 return affinity_group_id
3658 nvExceptions
.NotFound
,
3659 ksExceptions
.ClientException
,
3660 nvExceptions
.ClientException
,
3663 self
._format
_exception
(e
)
3665 def get_vdu_state(self
, vm_id
):
3667 Getting the state of a vdu
3669 vm_id: ID of an instance
3671 self
.logger
.debug("Getting the status of VM")
3672 self
.logger
.debug("VIM VM ID %s", vm_id
)
3673 self
._reload
_connection
()
3674 server
= self
.nova
.servers
.find(id=vm_id
)
3675 server_dict
= server
.to_dict()
3677 server_dict
["status"],
3678 server_dict
["flavor"]["id"],
3679 server_dict
["OS-EXT-SRV-ATTR:host"],
3680 server_dict
["OS-EXT-AZ:availability_zone"],
3682 self
.logger
.debug("vdu_data %s", vdu_data
)
3685 def check_compute_availability(self
, host
, server_flavor_details
):
3686 self
._reload
_connection
()
3687 hypervisor_search
= self
.nova
.hypervisors
.search(
3688 hypervisor_match
=host
, servers
=True
3690 for hypervisor
in hypervisor_search
:
3691 hypervisor_id
= hypervisor
.to_dict()["id"]
3692 hypervisor_details
= self
.nova
.hypervisors
.get(hypervisor
=hypervisor_id
)
3693 hypervisor_dict
= hypervisor_details
.to_dict()
3694 hypervisor_temp
= json
.dumps(hypervisor_dict
)
3695 hypervisor_json
= json
.loads(hypervisor_temp
)
3696 resources_available
= [
3697 hypervisor_json
["free_ram_mb"],
3698 hypervisor_json
["disk_available_least"],
3699 hypervisor_json
["vcpus"] - hypervisor_json
["vcpus_used"],
3701 compute_available
= all(
3702 x
> y
for x
, y
in zip(resources_available
, server_flavor_details
)
3704 if compute_available
:
3707 def check_availability_zone(
3708 self
, old_az
, server_flavor_details
, old_host
, host
=None
3710 self
._reload
_connection
()
3711 az_check
= {"zone_check": False, "compute_availability": None}
3712 aggregates_list
= self
.nova
.aggregates
.list()
3713 for aggregate
in aggregates_list
:
3714 aggregate_details
= aggregate
.to_dict()
3715 aggregate_temp
= json
.dumps(aggregate_details
)
3716 aggregate_json
= json
.loads(aggregate_temp
)
3717 if aggregate_json
["availability_zone"] == old_az
:
3718 hosts_list
= aggregate_json
["hosts"]
3719 if host
is not None:
3720 if host
in hosts_list
:
3721 az_check
["zone_check"] = True
3722 available_compute_id
= self
.check_compute_availability(
3723 host
, server_flavor_details
3725 if available_compute_id
is not None:
3726 az_check
["compute_availability"] = available_compute_id
3728 for check_host
in hosts_list
:
3729 if check_host
!= old_host
:
3730 available_compute_id
= self
.check_compute_availability(
3731 check_host
, server_flavor_details
3733 if available_compute_id
is not None:
3734 az_check
["zone_check"] = True
3735 az_check
["compute_availability"] = available_compute_id
3738 az_check
["zone_check"] = True
3741 def migrate_instance(self
, vm_id
, compute_host
=None):
3745 vm_id: ID of an instance
3746 compute_host: Host to migrate the vdu to
3748 self
._reload
_connection
()
3750 instance_state
= self
.get_vdu_state(vm_id
)
3751 server_flavor_id
= instance_state
[1]
3752 server_hypervisor_name
= instance_state
[2]
3753 server_availability_zone
= instance_state
[3]
3755 server_flavor
= self
.nova
.flavors
.find(id=server_flavor_id
).to_dict()
3756 server_flavor_details
= [
3757 server_flavor
["ram"],
3758 server_flavor
["disk"],
3759 server_flavor
["vcpus"],
3761 if compute_host
== server_hypervisor_name
:
3762 raise vimconn
.VimConnException(
3763 "Unable to migrate instance '{}' to the same host '{}'".format(
3766 http_code
=vimconn
.HTTP_Bad_Request
,
3768 az_status
= self
.check_availability_zone(
3769 server_availability_zone
,
3770 server_flavor_details
,
3771 server_hypervisor_name
,
3774 availability_zone_check
= az_status
["zone_check"]
3775 available_compute_id
= az_status
.get("compute_availability")
3777 if availability_zone_check
is False:
3778 raise vimconn
.VimConnException(
3779 "Unable to migrate instance '{}' to a different availability zone".format(
3782 http_code
=vimconn
.HTTP_Bad_Request
,
3784 if available_compute_id
is not None:
3785 self
.nova
.servers
.live_migrate(
3787 host
=available_compute_id
,
3788 block_migration
=True,
3789 disk_over_commit
=False,
3792 changed_compute_host
= ""
3793 if state
== "MIGRATING":
3794 vm_state
= self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3795 changed_compute_host
= self
.get_vdu_state(vm_id
)[2]
3796 if vm_state
and changed_compute_host
== available_compute_id
:
3798 "Instance '{}' migrated to the new compute host '{}'".format(
3799 vm_id
, changed_compute_host
3802 return state
, available_compute_id
3804 raise vimconn
.VimConnException(
3805 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3806 vm_id
, available_compute_id
3808 http_code
=vimconn
.HTTP_Bad_Request
,
3811 raise vimconn
.VimConnException(
3812 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3813 available_compute_id
3815 http_code
=vimconn
.HTTP_Bad_Request
,
3818 nvExceptions
.BadRequest
,
3819 nvExceptions
.ClientException
,
3820 nvExceptions
.NotFound
,
3822 self
._format
_exception
(e
)
3824 def resize_instance(self
, vm_id
, new_flavor_id
):
3826 For resizing the vm based on the given
3829 vm_id : ID of an instance
3830 new_flavor_id : Flavor id to be resized
3831 Return the status of a resized instance
3833 self
._reload
_connection
()
3834 self
.logger
.debug("resize the flavor of an instance")
3835 instance_status
, old_flavor_id
, compute_host
, az
= self
.get_vdu_state(vm_id
)
3836 old_flavor_disk
= self
.nova
.flavors
.find(id=old_flavor_id
).to_dict()["disk"]
3837 new_flavor_disk
= self
.nova
.flavors
.find(id=new_flavor_id
).to_dict()["disk"]
3839 if instance_status
== "ACTIVE" or instance_status
== "SHUTOFF":
3840 if old_flavor_disk
> new_flavor_disk
:
3841 raise nvExceptions
.BadRequest(
3843 message
="Server disk resize failed. Resize to lower disk flavor is not allowed",
3846 self
.nova
.servers
.resize(server
=vm_id
, flavor
=new_flavor_id
)
3847 vm_state
= self
.__wait
_for
_vm
(vm_id
, "VERIFY_RESIZE")
3849 instance_resized_status
= self
.confirm_resize(vm_id
)
3850 return instance_resized_status
3852 raise nvExceptions
.BadRequest(
3854 message
="Cannot 'resize' vm_state is in ERROR",
3858 self
.logger
.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3859 raise nvExceptions
.BadRequest(
3861 message
="Cannot 'resize' instance while it is in vm_state resized",
3864 nvExceptions
.BadRequest
,
3865 nvExceptions
.ClientException
,
3866 nvExceptions
.NotFound
,
3868 self
._format
_exception
(e
)
3870 def confirm_resize(self
, vm_id
):
3872 Confirm the resize of an instance
3874 vm_id: ID of an instance
3876 self
._reload
_connection
()
3877 self
.nova
.servers
.confirm_resize(server
=vm_id
)
3878 if self
.get_vdu_state(vm_id
)[0] == "VERIFY_RESIZE":
3879 self
.__wait
_for
_vm
(vm_id
, "ACTIVE")
3880 instance_status
= self
.get_vdu_state(vm_id
)[0]
3881 return instance_status
3883 def get_monitoring_data(self
):
3885 self
.logger
.debug("Getting servers and ports data from Openstack VIMs.")
3886 self
._reload
_connection
()
3887 all_servers
= self
.nova
.servers
.list(detailed
=True)
3888 all_ports
= self
.neutron
.list_ports()
3889 return all_servers
, all_ports
3891 vimconn
.VimConnException
,
3892 vimconn
.VimConnNotFoundException
,
3893 vimconn
.VimConnConnectionException
,
3895 raise vimconn
.VimConnException(
3896 f
"Exception in monitoring while getting VMs and ports status: {str(e)}"