3d5eb7740051bc1368e846868bb40f795626215f
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp"):
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 def process_resource_quota(self, quota, prefix, extra_specs):
1231 """
1232 :param prefix:
1233 :param extra_specs:
1234 :return:
1235 """
1236 if "limit" in quota:
1237 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1238
1239 if "reserve" in quota:
1240 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1241
1242 if "shares" in quota:
1243 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1244 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1245
1246 def new_flavor(self, flavor_data, change_name_if_used=True):
1247 """Adds a tenant flavor to openstack VIM
1248 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1249 repetition
1250 Returns the flavor identifier
1251 """
1252 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1253 retry = 0
1254 max_retries = 3
1255 name_suffix = 0
1256
1257 try:
1258 name = flavor_data["name"]
1259 while retry < max_retries:
1260 retry += 1
1261 try:
1262 self._reload_connection()
1263
1264 if change_name_if_used:
1265 # get used names
1266 fl_names = []
1267 fl = self.nova.flavors.list()
1268
1269 for f in fl:
1270 fl_names.append(f.name)
1271
1272 while name in fl_names:
1273 name_suffix += 1
1274 name = flavor_data["name"] + "-" + str(name_suffix)
1275
1276 ram = flavor_data.get("ram", 64)
1277 vcpus = flavor_data.get("vcpus", 1)
1278 extra_specs = {}
1279
1280 extended = flavor_data.get("extended")
1281 if extended:
1282 numas = extended.get("numas")
1283
1284 if numas:
1285 numa_nodes = len(numas)
1286
1287 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1288
1289 if self.vim_type == "VIO":
1290 extra_specs[
1291 "vmware:extra_config"
1292 ] = '{"numa.nodeAffinity":"0"}'
1293 extra_specs["vmware:latency_sensitivity_level"] = "high"
1294
1295 for numa in numas:
1296 if "id" in numa:
1297 node_id = numa["id"]
1298
1299 if "memory" in numa:
1300 memory_mb = numa["memory"] * 1024
1301 memory = "hw:numa_mem.{}".format(node_id)
1302 extra_specs[memory] = int(memory_mb)
1303
1304 if "vcpu" in numa:
1305 vcpu = numa["vcpu"]
1306 cpu = "hw:numa_cpus.{}".format(node_id)
1307 vcpu = ",".join(map(str, vcpu))
1308 extra_specs[cpu] = vcpu
1309
1310 # overwrite ram and vcpus
1311 # check if key "memory" is present in numa else use ram value at flavor
1312 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1313 # implemented/virt-driver-cpu-thread-pinning.html
1314 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1315
1316 if "paired-threads" in numa:
1317 vcpus = numa["paired-threads"] * 2
1318 # cpu_thread_policy "require" implies that the compute node must have an
1319 # STM architecture
1320 extra_specs["hw:cpu_thread_policy"] = "require"
1321 extra_specs["hw:cpu_policy"] = "dedicated"
1322 elif "cores" in numa:
1323 vcpus = numa["cores"]
1324 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1325 # architecture, or a non-SMT architecture will be emulated
1326 extra_specs["hw:cpu_thread_policy"] = "isolate"
1327 extra_specs["hw:cpu_policy"] = "dedicated"
1328 elif "threads" in numa:
1329 vcpus = numa["threads"]
1330 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1331 # architecture
1332 extra_specs["hw:cpu_thread_policy"] = "prefer"
1333 extra_specs["hw:cpu_policy"] = "dedicated"
1334 # for interface in numa.get("interfaces",() ):
1335 # if interface["dedicated"]=="yes":
1336 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1337 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1338 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1339 # when a way to connect it is available
1340 elif extended.get("cpu-quota"):
1341 self.process_resource_quota(
1342 extended.get("cpu-quota"), "cpu", extra_specs
1343 )
1344
1345 if extended.get("mem-quota"):
1346 self.process_resource_quota(
1347 extended.get("mem-quota"), "memory", extra_specs
1348 )
1349
1350 if extended.get("vif-quota"):
1351 self.process_resource_quota(
1352 extended.get("vif-quota"), "vif", extra_specs
1353 )
1354
1355 if extended.get("disk-io-quota"):
1356 self.process_resource_quota(
1357 extended.get("disk-io-quota"), "disk_io", extra_specs
1358 )
1359
1360 # Set the mempage size as specified in the descriptor
1361 if extended.get("mempage-size"):
1362 if extended.get("mempage-size") == "LARGE":
1363 extra_specs["hw:mem_page_size"] = "large"
1364 elif extended.get("mempage-size") == "SMALL":
1365 extra_specs["hw:mem_page_size"] = "small"
1366 elif extended.get("mempage-size") == "SIZE_2MB":
1367 extra_specs["hw:mem_page_size"] = "2MB"
1368 elif extended.get("mempage-size") == "SIZE_1GB":
1369 extra_specs["hw:mem_page_size"] = "1GB"
1370 elif extended.get("mempage-size") == "PREFER_LARGE":
1371 extra_specs["hw:mem_page_size"] = "any"
1372 else:
1373 # The validations in NBI should make reaching here not possible.
1374 # If this message is shown, check validations
1375 self.logger.debug(
1376 "Invalid mempage-size %s. Will be ignored",
1377 extended.get("mempage-size"),
1378 )
1379 if extended.get("cpu-pinning-policy"):
1380 extra_specs["hw:cpu_policy"] = extended.get(
1381 "cpu-pinning-policy"
1382 ).lower()
1383
1384 # Set the cpu thread pinning policy as specified in the descriptor
1385 if extended.get("cpu-thread-pinning-policy"):
1386 extra_specs["hw:cpu_thread_policy"] = extended.get(
1387 "cpu-thread-pinning-policy"
1388 ).lower()
1389
1390 # Set the mem policy as specified in the descriptor
1391 if extended.get("mem-policy"):
1392 extra_specs["hw:numa_mempolicy"] = extended.get(
1393 "mem-policy"
1394 ).lower()
1395
1396 # create flavor
1397 new_flavor = self.nova.flavors.create(
1398 name=name,
1399 ram=ram,
1400 vcpus=vcpus,
1401 disk=flavor_data.get("disk", 0),
1402 ephemeral=flavor_data.get("ephemeral", 0),
1403 swap=flavor_data.get("swap", 0),
1404 is_public=flavor_data.get("is_public", True),
1405 )
1406 # add metadata
1407 if extra_specs:
1408 new_flavor.set_keys(extra_specs)
1409
1410 return new_flavor.id
1411 except nvExceptions.Conflict as e:
1412 if change_name_if_used and retry < max_retries:
1413 continue
1414
1415 self._format_exception(e)
1416 # except nvExceptions.BadRequest as e:
1417 except (
1418 ksExceptions.ClientException,
1419 nvExceptions.ClientException,
1420 ConnectionError,
1421 KeyError,
1422 ) as e:
1423 self._format_exception(e)
1424
1425 def delete_flavor(self, flavor_id):
1426 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1427 try:
1428 self._reload_connection()
1429 self.nova.flavors.delete(flavor_id)
1430
1431 return flavor_id
1432 # except nvExceptions.BadRequest as e:
1433 except (
1434 nvExceptions.NotFound,
1435 ksExceptions.ClientException,
1436 nvExceptions.ClientException,
1437 ConnectionError,
1438 ) as e:
1439 self._format_exception(e)
1440
1441 def new_image(self, image_dict):
1442 """
1443 Adds a tenant image to VIM. imge_dict is a dictionary with:
1444 name: name
1445 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1446 location: path or URI
1447 public: "yes" or "no"
1448 metadata: metadata of the image
1449 Returns the image_id
1450 """
1451 retry = 0
1452 max_retries = 3
1453
1454 while retry < max_retries:
1455 retry += 1
1456 try:
1457 self._reload_connection()
1458
1459 # determine format http://docs.openstack.org/developer/glance/formats.html
1460 if "disk_format" in image_dict:
1461 disk_format = image_dict["disk_format"]
1462 else: # autodiscover based on extension
1463 if image_dict["location"].endswith(".qcow2"):
1464 disk_format = "qcow2"
1465 elif image_dict["location"].endswith(".vhd"):
1466 disk_format = "vhd"
1467 elif image_dict["location"].endswith(".vmdk"):
1468 disk_format = "vmdk"
1469 elif image_dict["location"].endswith(".vdi"):
1470 disk_format = "vdi"
1471 elif image_dict["location"].endswith(".iso"):
1472 disk_format = "iso"
1473 elif image_dict["location"].endswith(".aki"):
1474 disk_format = "aki"
1475 elif image_dict["location"].endswith(".ari"):
1476 disk_format = "ari"
1477 elif image_dict["location"].endswith(".ami"):
1478 disk_format = "ami"
1479 else:
1480 disk_format = "raw"
1481
1482 self.logger.debug(
1483 "new_image: '%s' loading from '%s'",
1484 image_dict["name"],
1485 image_dict["location"],
1486 )
1487 if self.vim_type == "VIO":
1488 container_format = "bare"
1489 if "container_format" in image_dict:
1490 container_format = image_dict["container_format"]
1491
1492 new_image = self.glance.images.create(
1493 name=image_dict["name"],
1494 container_format=container_format,
1495 disk_format=disk_format,
1496 )
1497 else:
1498 new_image = self.glance.images.create(name=image_dict["name"])
1499
1500 if image_dict["location"].startswith("http"):
1501 # TODO there is not a method to direct download. It must be downloaded locally with requests
1502 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1503 else: # local path
1504 with open(image_dict["location"]) as fimage:
1505 self.glance.images.upload(new_image.id, fimage)
1506 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1507 # image_dict.get("public","yes")=="yes",
1508 # container_format="bare", data=fimage, disk_format=disk_format)
1509
1510 metadata_to_load = image_dict.get("metadata")
1511
1512 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1513 # for openstack
1514 if self.vim_type == "VIO":
1515 metadata_to_load["upload_location"] = image_dict["location"]
1516 else:
1517 metadata_to_load["location"] = image_dict["location"]
1518
1519 self.glance.images.update(new_image.id, **metadata_to_load)
1520
1521 return new_image.id
1522 except (
1523 nvExceptions.Conflict,
1524 ksExceptions.ClientException,
1525 nvExceptions.ClientException,
1526 ) as e:
1527 self._format_exception(e)
1528 except (
1529 HTTPException,
1530 gl1Exceptions.HTTPException,
1531 gl1Exceptions.CommunicationError,
1532 ConnectionError,
1533 ) as e:
1534 if retry == max_retries:
1535 continue
1536
1537 self._format_exception(e)
1538 except IOError as e: # can not open the file
1539 raise vimconn.VimConnConnectionException(
1540 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1541 http_code=vimconn.HTTP_Bad_Request,
1542 )
1543
1544 def delete_image(self, image_id):
1545 """Deletes a tenant image from openstack VIM. Returns the old id"""
1546 try:
1547 self._reload_connection()
1548 self.glance.images.delete(image_id)
1549
1550 return image_id
1551 except (
1552 nvExceptions.NotFound,
1553 ksExceptions.ClientException,
1554 nvExceptions.ClientException,
1555 gl1Exceptions.CommunicationError,
1556 gl1Exceptions.HTTPNotFound,
1557 ConnectionError,
1558 ) as e: # TODO remove
1559 self._format_exception(e)
1560
1561 def get_image_id_from_path(self, path):
1562 """Get the image id from image path in the VIM database. Returns the image_id"""
1563 try:
1564 self._reload_connection()
1565 images = self.glance.images.list()
1566
1567 for image in images:
1568 if image.metadata.get("location") == path:
1569 return image.id
1570
1571 raise vimconn.VimConnNotFoundException(
1572 "image with location '{}' not found".format(path)
1573 )
1574 except (
1575 ksExceptions.ClientException,
1576 nvExceptions.ClientException,
1577 gl1Exceptions.CommunicationError,
1578 ConnectionError,
1579 ) as e:
1580 self._format_exception(e)
1581
1582 def get_image_list(self, filter_dict={}):
1583 """Obtain tenant images from VIM
1584 Filter_dict can be:
1585 id: image id
1586 name: image name
1587 checksum: image checksum
1588 Returns the image list of dictionaries:
1589 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1590 List can be empty
1591 """
1592 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1593
1594 try:
1595 self._reload_connection()
1596 # filter_dict_os = filter_dict.copy()
1597 # First we filter by the available filter fields: name, id. The others are removed.
1598 image_list = self.glance.images.list()
1599 filtered_list = []
1600
1601 for image in image_list:
1602 try:
1603 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1604 continue
1605
1606 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1607 continue
1608
1609 if (
1610 filter_dict.get("checksum")
1611 and image["checksum"] != filter_dict["checksum"]
1612 ):
1613 continue
1614
1615 filtered_list.append(image.copy())
1616 except gl1Exceptions.HTTPNotFound:
1617 pass
1618
1619 return filtered_list
1620 except (
1621 ksExceptions.ClientException,
1622 nvExceptions.ClientException,
1623 gl1Exceptions.CommunicationError,
1624 ConnectionError,
1625 ) as e:
1626 self._format_exception(e)
1627
1628 def __wait_for_vm(self, vm_id, status):
1629 """wait until vm is in the desired status and return True.
1630 If the VM gets in ERROR status, return false.
1631 If the timeout is reached generate an exception"""
1632 elapsed_time = 0
1633 while elapsed_time < server_timeout:
1634 vm_status = self.nova.servers.get(vm_id).status
1635
1636 if vm_status == status:
1637 return True
1638
1639 if vm_status == "ERROR":
1640 return False
1641
1642 time.sleep(5)
1643 elapsed_time += 5
1644
1645 # if we exceeded the timeout rollback
1646 if elapsed_time >= server_timeout:
1647 raise vimconn.VimConnException(
1648 "Timeout waiting for instance " + vm_id + " to get " + status,
1649 http_code=vimconn.HTTP_Request_Timeout,
1650 )
1651
1652 def _get_openstack_availablity_zones(self):
1653 """
1654 Get from openstack availability zones available
1655 :return:
1656 """
1657 try:
1658 openstack_availability_zone = self.nova.availability_zones.list()
1659 openstack_availability_zone = [
1660 str(zone.zoneName)
1661 for zone in openstack_availability_zone
1662 if zone.zoneName != "internal"
1663 ]
1664
1665 return openstack_availability_zone
1666 except Exception:
1667 return None
1668
1669 def _set_availablity_zones(self):
1670 """
1671 Set vim availablity zone
1672 :return:
1673 """
1674 if "availability_zone" in self.config:
1675 vim_availability_zones = self.config.get("availability_zone")
1676
1677 if isinstance(vim_availability_zones, str):
1678 self.availability_zone = [vim_availability_zones]
1679 elif isinstance(vim_availability_zones, list):
1680 self.availability_zone = vim_availability_zones
1681 else:
1682 self.availability_zone = self._get_openstack_availablity_zones()
1683
1684 def _get_vm_availability_zone(
1685 self, availability_zone_index, availability_zone_list
1686 ):
1687 """
1688 Return thge availability zone to be used by the created VM.
1689 :return: The VIM availability zone to be used or None
1690 """
1691 if availability_zone_index is None:
1692 if not self.config.get("availability_zone"):
1693 return None
1694 elif isinstance(self.config.get("availability_zone"), str):
1695 return self.config["availability_zone"]
1696 else:
1697 # TODO consider using a different parameter at config for default AV and AV list match
1698 return self.config["availability_zone"][0]
1699
1700 vim_availability_zones = self.availability_zone
1701 # check if VIM offer enough availability zones describe in the VNFD
1702 if vim_availability_zones and len(availability_zone_list) <= len(
1703 vim_availability_zones
1704 ):
1705 # check if all the names of NFV AV match VIM AV names
1706 match_by_index = False
1707 for av in availability_zone_list:
1708 if av not in vim_availability_zones:
1709 match_by_index = True
1710 break
1711
1712 if match_by_index:
1713 return vim_availability_zones[availability_zone_index]
1714 else:
1715 return availability_zone_list[availability_zone_index]
1716 else:
1717 raise vimconn.VimConnConflictException(
1718 "No enough availability zones at VIM for this deployment"
1719 )
1720
1721 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1722 """Fill up the security_groups in the port_dict.
1723
1724 Args:
1725 net (dict): Network details
1726 port_dict (dict): Port details
1727
1728 """
1729 if (
1730 self.config.get("security_groups")
1731 and net.get("port_security") is not False
1732 and not self.config.get("no_port_security_extension")
1733 ):
1734 if not self.security_groups_id:
1735 self._get_ids_from_name()
1736
1737 port_dict["security_groups"] = self.security_groups_id
1738
1739 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1740 """Fill up the network binding depending on network type in the port_dict.
1741
1742 Args:
1743 net (dict): Network details
1744 port_dict (dict): Port details
1745
1746 """
1747 if not net.get("type"):
1748 raise vimconn.VimConnException("Type is missing in the network details.")
1749
1750 if net["type"] == "virtual":
1751 pass
1752
1753 # For VF
1754 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1755
1756 port_dict["binding:vnic_type"] = "direct"
1757
1758 # VIO specific Changes
1759 if self.vim_type == "VIO":
1760 # Need to create port with port_security_enabled = False and no-security-groups
1761 port_dict["port_security_enabled"] = False
1762 port_dict["provider_security_groups"] = []
1763 port_dict["security_groups"] = []
1764
1765 else:
1766 # For PT PCI-PASSTHROUGH
1767 port_dict["binding:vnic_type"] = "direct-physical"
1768
1769 @staticmethod
1770 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1771 """Set the "ip" parameter in net dictionary.
1772
1773 Args:
1774 new_port (dict): New created port
1775 net (dict): Network details
1776
1777 """
1778 fixed_ips = new_port["port"].get("fixed_ips")
1779
1780 if fixed_ips:
1781 net["ip"] = fixed_ips[0].get("ip_address")
1782 else:
1783 net["ip"] = None
1784
1785 @staticmethod
1786 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1787 """Fill up the mac_address and fixed_ips in port_dict.
1788
1789 Args:
1790 net (dict): Network details
1791 port_dict (dict): Port details
1792
1793 """
1794 if net.get("mac_address"):
1795 port_dict["mac_address"] = net["mac_address"]
1796
1797 if net.get("ip_address"):
1798 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1799 # TODO add "subnet_id": <subnet_id>
1800
1801 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1802 """Create new port using neutron.
1803
1804 Args:
1805 port_dict (dict): Port details
1806 created_items (dict): All created items
1807 net (dict): Network details
1808
1809 Returns:
1810 new_port (dict): New created port
1811
1812 """
1813 new_port = self.neutron.create_port({"port": port_dict})
1814 created_items["port:" + str(new_port["port"]["id"])] = True
1815 net["mac_adress"] = new_port["port"]["mac_address"]
1816 net["vim_id"] = new_port["port"]["id"]
1817
1818 return new_port
1819
1820 def _create_port(
1821 self, net: dict, name: str, created_items: dict
1822 ) -> Tuple[dict, dict]:
1823 """Create port using net details.
1824
1825 Args:
1826 net (dict): Network details
1827 name (str): Name to be used as network name if net dict does not include name
1828 created_items (dict): All created items
1829
1830 Returns:
1831 new_port, port New created port, port dictionary
1832
1833 """
1834
1835 port_dict = {
1836 "network_id": net["net_id"],
1837 "name": net.get("name"),
1838 "admin_state_up": True,
1839 }
1840
1841 if not port_dict["name"]:
1842 port_dict["name"] = name
1843
1844 self._prepare_port_dict_security_groups(net, port_dict)
1845
1846 self._prepare_port_dict_binding(net, port_dict)
1847
1848 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1849
1850 new_port = self._create_new_port(port_dict, created_items, net)
1851
1852 vimconnector._set_fixed_ip(new_port, net)
1853
1854 port = {"port-id": new_port["port"]["id"]}
1855
1856 if float(self.nova.api_version.get_string()) >= 2.32:
1857 port["tag"] = new_port["port"]["name"]
1858
1859 return new_port, port
1860
1861 def _prepare_network_for_vminstance(
1862 self,
1863 name: str,
1864 net_list: list,
1865 created_items: dict,
1866 net_list_vim: list,
1867 external_network: list,
1868 no_secured_ports: list,
1869 ) -> None:
1870 """Create port and fill up net dictionary for new VM instance creation.
1871
1872 Args:
1873 name (str): Name of network
1874 net_list (list): List of networks
1875 created_items (dict): All created items belongs to a VM
1876 net_list_vim (list): List of ports
1877 external_network (list): List of external-networks
1878 no_secured_ports (list): Port security disabled ports
1879 """
1880
1881 self._reload_connection()
1882
1883 for net in net_list:
1884 # Skip non-connected iface
1885 if not net.get("net_id"):
1886 continue
1887
1888 new_port, port = self._create_port(net, name, created_items)
1889
1890 net_list_vim.append(port)
1891
1892 if net.get("floating_ip", False):
1893 net["exit_on_floating_ip_error"] = True
1894 external_network.append(net)
1895
1896 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
1897 net["exit_on_floating_ip_error"] = False
1898 external_network.append(net)
1899 net["floating_ip"] = self.config.get("use_floating_ip")
1900
1901 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1902 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
1903 if net.get("port_security") is False and not self.config.get(
1904 "no_port_security_extension"
1905 ):
1906 no_secured_ports.append(
1907 (
1908 new_port["port"]["id"],
1909 net.get("port_security_disable_strategy"),
1910 )
1911 )
1912
1913 def _prepare_persistent_root_volumes(
1914 self,
1915 name: str,
1916 vm_av_zone: list,
1917 disk: dict,
1918 base_disk_index: int,
1919 block_device_mapping: dict,
1920 existing_vim_volumes: list,
1921 created_items: dict,
1922 ) -> Optional[str]:
1923 """Prepare persistent root volumes for new VM instance.
1924
1925 Args:
1926 name (str): Name of VM instance
1927 vm_av_zone (list): List of availability zones
1928 disk (dict): Disk details
1929 base_disk_index (int): Disk index
1930 block_device_mapping (dict): Block device details
1931 existing_vim_volumes (list): Existing disk details
1932 created_items (dict): All created items belongs to VM
1933
1934 Returns:
1935 boot_volume_id (str): ID of boot volume
1936
1937 """
1938 # Disk may include only vim_volume_id or only vim_id."
1939 # Use existing persistent root volume finding with volume_id or vim_id
1940 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
1941
1942 if disk.get(key_id):
1943
1944 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
1945 existing_vim_volumes.append({"id": disk[key_id]})
1946
1947 else:
1948 # Create persistent root volume
1949 volume = self.cinder.volumes.create(
1950 size=disk["size"],
1951 name=name + "vd" + chr(base_disk_index),
1952 imageRef=disk["image_id"],
1953 # Make sure volume is in the same AZ as the VM to be attached to
1954 availability_zone=vm_av_zone,
1955 )
1956 boot_volume_id = volume.id
1957 self.update_block_device_mapping(
1958 volume=volume,
1959 block_device_mapping=block_device_mapping,
1960 base_disk_index=base_disk_index,
1961 disk=disk,
1962 created_items=created_items,
1963 )
1964
1965 return boot_volume_id
1966
1967 @staticmethod
1968 def update_block_device_mapping(
1969 volume: object,
1970 block_device_mapping: dict,
1971 base_disk_index: int,
1972 disk: dict,
1973 created_items: dict,
1974 ) -> None:
1975 """Add volume information to block device mapping dict.
1976 Args:
1977 volume (object): Created volume object
1978 block_device_mapping (dict): Block device details
1979 base_disk_index (int): Disk index
1980 disk (dict): Disk details
1981 created_items (dict): All created items belongs to VM
1982 """
1983 if not volume:
1984 raise vimconn.VimConnException("Volume is empty.")
1985
1986 if not hasattr(volume, "id"):
1987 raise vimconn.VimConnException(
1988 "Created volume is not valid, does not have id attribute."
1989 )
1990
1991 volume_txt = "volume:" + str(volume.id)
1992 if disk.get("keep"):
1993 volume_txt += ":keep"
1994 created_items[volume_txt] = True
1995 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
1996
1997 def _prepare_non_root_persistent_volumes(
1998 self,
1999 name: str,
2000 disk: dict,
2001 vm_av_zone: list,
2002 block_device_mapping: dict,
2003 base_disk_index: int,
2004 existing_vim_volumes: list,
2005 created_items: dict,
2006 ) -> None:
2007 """Prepare persistent volumes for new VM instance.
2008
2009 Args:
2010 name (str): Name of VM instance
2011 disk (dict): Disk details
2012 vm_av_zone (list): List of availability zones
2013 block_device_mapping (dict): Block device details
2014 base_disk_index (int): Disk index
2015 existing_vim_volumes (list): Existing disk details
2016 created_items (dict): All created items belongs to VM
2017 """
2018 # Non-root persistent volumes
2019 # Disk may include only vim_volume_id or only vim_id."
2020 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2021
2022 if disk.get(key_id):
2023
2024 # Use existing persistent volume
2025 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2026 existing_vim_volumes.append({"id": disk[key_id]})
2027
2028 else:
2029 # Create persistent volume
2030 volume = self.cinder.volumes.create(
2031 size=disk["size"],
2032 name=name + "vd" + chr(base_disk_index),
2033 # Make sure volume is in the same AZ as the VM to be attached to
2034 availability_zone=vm_av_zone,
2035 )
2036 self.update_block_device_mapping(
2037 volume=volume,
2038 block_device_mapping=block_device_mapping,
2039 base_disk_index=base_disk_index,
2040 disk=disk,
2041 created_items=created_items,
2042 )
2043
2044 def _wait_for_created_volumes_availability(
2045 self, elapsed_time: int, created_items: dict
2046 ) -> Optional[int]:
2047 """Wait till created volumes become available.
2048
2049 Args:
2050 elapsed_time (int): Passed time while waiting
2051 created_items (dict): All created items belongs to VM
2052
2053 Returns:
2054 elapsed_time (int): Time spent while waiting
2055
2056 """
2057
2058 while elapsed_time < volume_timeout:
2059 for created_item in created_items:
2060 v, volume_id = (
2061 created_item.split(":")[0],
2062 created_item.split(":")[1],
2063 )
2064 if v == "volume":
2065 if self.cinder.volumes.get(volume_id).status != "available":
2066 break
2067 else:
2068 # All ready: break from while
2069 break
2070
2071 time.sleep(5)
2072 elapsed_time += 5
2073
2074 return elapsed_time
2075
2076 def _wait_for_existing_volumes_availability(
2077 self, elapsed_time: int, existing_vim_volumes: list
2078 ) -> Optional[int]:
2079 """Wait till existing volumes become available.
2080
2081 Args:
2082 elapsed_time (int): Passed time while waiting
2083 existing_vim_volumes (list): Existing volume details
2084
2085 Returns:
2086 elapsed_time (int): Time spent while waiting
2087
2088 """
2089
2090 while elapsed_time < volume_timeout:
2091 for volume in existing_vim_volumes:
2092 if self.cinder.volumes.get(volume["id"]).status != "available":
2093 break
2094 else: # all ready: break from while
2095 break
2096
2097 time.sleep(5)
2098 elapsed_time += 5
2099
2100 return elapsed_time
2101
2102 def _prepare_disk_for_vminstance(
2103 self,
2104 name: str,
2105 existing_vim_volumes: list,
2106 created_items: dict,
2107 vm_av_zone: list,
2108 block_device_mapping: dict,
2109 disk_list: list = None,
2110 ) -> None:
2111 """Prepare all volumes for new VM instance.
2112
2113 Args:
2114 name (str): Name of Instance
2115 existing_vim_volumes (list): List of existing volumes
2116 created_items (dict): All created items belongs to VM
2117 vm_av_zone (list): VM availability zone
2118 block_device_mapping (dict): Block devices to be attached to VM
2119 disk_list (list): List of disks
2120
2121 """
2122 # Create additional volumes in case these are present in disk_list
2123 base_disk_index = ord("b")
2124 boot_volume_id = None
2125 elapsed_time = 0
2126
2127 for disk in disk_list:
2128 if "image_id" in disk:
2129 # Root persistent volume
2130 base_disk_index = ord("a")
2131 boot_volume_id = self._prepare_persistent_root_volumes(
2132 name=name,
2133 vm_av_zone=vm_av_zone,
2134 disk=disk,
2135 base_disk_index=base_disk_index,
2136 block_device_mapping=block_device_mapping,
2137 existing_vim_volumes=existing_vim_volumes,
2138 created_items=created_items,
2139 )
2140 else:
2141 # Non-root persistent volume
2142 self._prepare_non_root_persistent_volumes(
2143 name=name,
2144 disk=disk,
2145 vm_av_zone=vm_av_zone,
2146 block_device_mapping=block_device_mapping,
2147 base_disk_index=base_disk_index,
2148 existing_vim_volumes=existing_vim_volumes,
2149 created_items=created_items,
2150 )
2151 base_disk_index += 1
2152
2153 # Wait until created volumes are with status available
2154 elapsed_time = self._wait_for_created_volumes_availability(
2155 elapsed_time, created_items
2156 )
2157 # Wait until existing volumes in vim are with status available
2158 elapsed_time = self._wait_for_existing_volumes_availability(
2159 elapsed_time, existing_vim_volumes
2160 )
2161 # If we exceeded the timeout rollback
2162 if elapsed_time >= volume_timeout:
2163 raise vimconn.VimConnException(
2164 "Timeout creating volumes for instance " + name,
2165 http_code=vimconn.HTTP_Request_Timeout,
2166 )
2167 if boot_volume_id:
2168 self.cinder.volumes.set_bootable(boot_volume_id, True)
2169
2170 def _find_the_external_network_for_floating_ip(self):
2171 """Get the external network ip in order to create floating IP.
2172
2173 Returns:
2174 pool_id (str): External network pool ID
2175
2176 """
2177
2178 # Find the external network
2179 external_nets = list()
2180
2181 for net in self.neutron.list_networks()["networks"]:
2182 if net["router:external"]:
2183 external_nets.append(net)
2184
2185 if len(external_nets) == 0:
2186 raise vimconn.VimConnException(
2187 "Cannot create floating_ip automatically since "
2188 "no external network is present",
2189 http_code=vimconn.HTTP_Conflict,
2190 )
2191
2192 if len(external_nets) > 1:
2193 raise vimconn.VimConnException(
2194 "Cannot create floating_ip automatically since "
2195 "multiple external networks are present",
2196 http_code=vimconn.HTTP_Conflict,
2197 )
2198
2199 # Pool ID
2200 return external_nets[0].get("id")
2201
2202 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2203 """Trigger neutron to create a new floating IP using external network ID.
2204
2205 Args:
2206 param (dict): Input parameters to create a floating IP
2207 created_items (dict): All created items belongs to new VM instance
2208
2209 Raises:
2210
2211 VimConnException
2212 """
2213 try:
2214 self.logger.debug("Creating floating IP")
2215 new_floating_ip = self.neutron.create_floatingip(param)
2216 free_floating_ip = new_floating_ip["floatingip"]["id"]
2217 created_items["floating_ip:" + str(free_floating_ip)] = True
2218
2219 except Exception as e:
2220 raise vimconn.VimConnException(
2221 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2222 http_code=vimconn.HTTP_Conflict,
2223 )
2224
2225 def _create_floating_ip(
2226 self, floating_network: dict, server: object, created_items: dict
2227 ) -> None:
2228 """Get the available Pool ID and create a new floating IP.
2229
2230 Args:
2231 floating_network (dict): Dict including external network ID
2232 server (object): Server object
2233 created_items (dict): All created items belongs to new VM instance
2234
2235 """
2236
2237 # Pool_id is available
2238 if (
2239 isinstance(floating_network["floating_ip"], str)
2240 and floating_network["floating_ip"].lower() != "true"
2241 ):
2242 pool_id = floating_network["floating_ip"]
2243
2244 # Find the Pool_id
2245 else:
2246 pool_id = self._find_the_external_network_for_floating_ip()
2247
2248 param = {
2249 "floatingip": {
2250 "floating_network_id": pool_id,
2251 "tenant_id": server.tenant_id,
2252 }
2253 }
2254
2255 self._neutron_create_float_ip(param, created_items)
2256
2257 def _find_floating_ip(
2258 self,
2259 server: object,
2260 floating_ips: list,
2261 floating_network: dict,
2262 ) -> Optional[str]:
2263 """Find the available free floating IPs if there are.
2264
2265 Args:
2266 server (object): Server object
2267 floating_ips (list): List of floating IPs
2268 floating_network (dict): Details of floating network such as ID
2269
2270 Returns:
2271 free_floating_ip (str): Free floating ip address
2272
2273 """
2274 for fip in floating_ips:
2275 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2276 continue
2277
2278 if isinstance(floating_network["floating_ip"], str):
2279 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2280 continue
2281
2282 return fip["id"]
2283
2284 def _assign_floating_ip(
2285 self, free_floating_ip: str, floating_network: dict
2286 ) -> Dict:
2287 """Assign the free floating ip address to port.
2288
2289 Args:
2290 free_floating_ip (str): Floating IP to be assigned
2291 floating_network (dict): ID of floating network
2292
2293 Returns:
2294 fip (dict) (dict): Floating ip details
2295
2296 """
2297 # The vim_id key contains the neutron.port_id
2298 self.neutron.update_floatingip(
2299 free_floating_ip,
2300 {"floatingip": {"port_id": floating_network["vim_id"]}},
2301 )
2302 # For race condition ensure not re-assigned to other VM after 5 seconds
2303 time.sleep(5)
2304
2305 return self.neutron.show_floatingip(free_floating_ip)
2306
2307 def _get_free_floating_ip(
2308 self, server: object, floating_network: dict, created_items: dict
2309 ) -> Optional[str]:
2310 """Get the free floating IP address.
2311
2312 Args:
2313 server (object): Server Object
2314 floating_network (dict): Floating network details
2315 created_items (dict): All created items belongs to new VM instance
2316
2317 Returns:
2318 free_floating_ip (str): Free floating ip addr
2319
2320 """
2321
2322 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2323
2324 # Randomize
2325 random.shuffle(floating_ips)
2326
2327 return self._find_floating_ip(
2328 server, floating_ips, floating_network, created_items
2329 )
2330
2331 def _prepare_external_network_for_vminstance(
2332 self,
2333 external_network: list,
2334 server: object,
2335 created_items: dict,
2336 vm_start_time: float,
2337 ) -> None:
2338 """Assign floating IP address for VM instance.
2339
2340 Args:
2341 external_network (list): ID of External network
2342 server (object): Server Object
2343 created_items (dict): All created items belongs to new VM instance
2344 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2345
2346 Raises:
2347 VimConnException
2348
2349 """
2350 for floating_network in external_network:
2351 try:
2352 assigned = False
2353 floating_ip_retries = 3
2354 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2355 # several times
2356 while not assigned:
2357
2358 free_floating_ip = self._get_free_floating_ip(
2359 server, floating_network, created_items
2360 )
2361
2362 if not free_floating_ip:
2363 self._create_floating_ip(
2364 floating_network, server, created_items
2365 )
2366
2367 try:
2368 # For race condition ensure not already assigned
2369 fip = self.neutron.show_floatingip(free_floating_ip)
2370
2371 if fip["floatingip"].get("port_id"):
2372 continue
2373
2374 # Assign floating ip
2375 fip = self._assign_floating_ip(
2376 free_floating_ip, floating_network
2377 )
2378
2379 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2380 self.logger.warning(
2381 "floating_ip {} re-assigned to other port".format(
2382 free_floating_ip
2383 )
2384 )
2385 continue
2386
2387 self.logger.debug(
2388 "Assigned floating_ip {} to VM {}".format(
2389 free_floating_ip, server.id
2390 )
2391 )
2392
2393 assigned = True
2394
2395 except Exception as e:
2396 # Openstack need some time after VM creation to assign an IP. So retry if fails
2397 vm_status = self.nova.servers.get(server.id).status
2398
2399 if vm_status not in ("ACTIVE", "ERROR"):
2400 if time.time() - vm_start_time < server_timeout:
2401 time.sleep(5)
2402 continue
2403 elif floating_ip_retries > 0:
2404 floating_ip_retries -= 1
2405 continue
2406
2407 raise vimconn.VimConnException(
2408 "Cannot create floating_ip: {} {}".format(
2409 type(e).__name__, e
2410 ),
2411 http_code=vimconn.HTTP_Conflict,
2412 )
2413
2414 except Exception as e:
2415 if not floating_network["exit_on_floating_ip_error"]:
2416 self.logger.error("Cannot create floating_ip. %s", str(e))
2417 continue
2418
2419 raise
2420
2421 def _update_port_security_for_vminstance(
2422 self,
2423 no_secured_ports: list,
2424 server: object,
2425 ) -> None:
2426 """Updates the port security according to no_secured_ports list.
2427
2428 Args:
2429 no_secured_ports (list): List of ports that security will be disabled
2430 server (object): Server Object
2431
2432 Raises:
2433 VimConnException
2434
2435 """
2436 # Wait until the VM is active and then disable the port-security
2437 if no_secured_ports:
2438 self.__wait_for_vm(server.id, "ACTIVE")
2439
2440 for port in no_secured_ports:
2441 port_update = {
2442 "port": {"port_security_enabled": False, "security_groups": None}
2443 }
2444
2445 if port[1] == "allow-address-pairs":
2446 port_update = {
2447 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2448 }
2449
2450 try:
2451 self.neutron.update_port(port[0], port_update)
2452
2453 except Exception:
2454
2455 raise vimconn.VimConnException(
2456 "It was not possible to disable port security for port {}".format(
2457 port[0]
2458 )
2459 )
2460
2461 def new_vminstance(
2462 self,
2463 name: str,
2464 description: str,
2465 start: bool,
2466 image_id: str,
2467 flavor_id: str,
2468 affinity_group_list: list,
2469 net_list: list,
2470 cloud_config=None,
2471 disk_list=None,
2472 availability_zone_index=None,
2473 availability_zone_list=None,
2474 ) -> tuple:
2475 """Adds a VM instance to VIM.
2476
2477 Args:
2478 name (str): name of VM
2479 description (str): description
2480 start (bool): indicates if VM must start or boot in pause mode. Ignored
2481 image_id (str) image uuid
2482 flavor_id (str) flavor uuid
2483 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2484 net_list (list): list of interfaces, each one is a dictionary with:
2485 name: name of network
2486 net_id: network uuid to connect
2487 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2488 model: interface model, ignored #TODO
2489 mac_address: used for SR-IOV ifaces #TODO for other types
2490 use: 'data', 'bridge', 'mgmt'
2491 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2492 vim_id: filled/added by this function
2493 floating_ip: True/False (or it can be None)
2494 port_security: True/False
2495 cloud_config (dict): (optional) dictionary with:
2496 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2497 users: (optional) list of users to be inserted, each item is a dict with:
2498 name: (mandatory) user name,
2499 key-pairs: (optional) list of strings with the public key to be inserted to the user
2500 user-data: (optional) string is a text script to be passed directly to cloud-init
2501 config-files: (optional). List of files to be transferred. Each item is a dict with:
2502 dest: (mandatory) string with the destination absolute path
2503 encoding: (optional, by default text). Can be one of:
2504 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2505 content : (mandatory) string with the content of the file
2506 permissions: (optional) string with file permissions, typically octal notation '0644'
2507 owner: (optional) file owner, string with the format 'owner:group'
2508 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2509 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2510 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2511 size: (mandatory) string with the size of the disk in GB
2512 vim_id: (optional) should use this existing volume id
2513 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2514 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2515 availability_zone_index is None
2516 #TODO ip, security groups
2517
2518 Returns:
2519 A tuple with the instance identifier and created_items or raises an exception on error
2520 created_items can be None or a dictionary where this method can include key-values that will be passed to
2521 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2522 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2523 as not present.
2524
2525 """
2526 self.logger.debug(
2527 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2528 image_id,
2529 flavor_id,
2530 str(net_list),
2531 )
2532
2533 try:
2534 server = None
2535 created_items = {}
2536 net_list_vim = []
2537 # list of external networks to be connected to instance, later on used to create floating_ip
2538 external_network = []
2539 # List of ports with port-security disabled
2540 no_secured_ports = []
2541 block_device_mapping = {}
2542 existing_vim_volumes = []
2543 server_group_id = None
2544 scheduller_hints = {}
2545
2546 # Check the Openstack Connection
2547 self._reload_connection()
2548
2549 # Prepare network list
2550 self._prepare_network_for_vminstance(
2551 name=name,
2552 net_list=net_list,
2553 created_items=created_items,
2554 net_list_vim=net_list_vim,
2555 external_network=external_network,
2556 no_secured_ports=no_secured_ports,
2557 )
2558
2559 # Cloud config
2560 config_drive, userdata = self._create_user_data(cloud_config)
2561
2562 # Get availability Zone
2563 vm_av_zone = self._get_vm_availability_zone(
2564 availability_zone_index, availability_zone_list
2565 )
2566
2567 if disk_list:
2568 # Prepare disks
2569 self._prepare_disk_for_vminstance(
2570 name=name,
2571 existing_vim_volumes=existing_vim_volumes,
2572 created_items=created_items,
2573 vm_av_zone=vm_av_zone,
2574 block_device_mapping=block_device_mapping,
2575 disk_list=disk_list,
2576 )
2577
2578 if affinity_group_list:
2579 # Only first id on the list will be used. Openstack restriction
2580 server_group_id = affinity_group_list[0]["affinity_group_id"]
2581 scheduller_hints["group"] = server_group_id
2582
2583 self.logger.debug(
2584 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2585 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2586 "block_device_mapping={}, server_group={})".format(
2587 name,
2588 image_id,
2589 flavor_id,
2590 net_list_vim,
2591 self.config.get("security_groups"),
2592 vm_av_zone,
2593 self.config.get("keypair"),
2594 userdata,
2595 config_drive,
2596 block_device_mapping,
2597 server_group_id,
2598 )
2599 )
2600
2601 # Create VM
2602 server = self.nova.servers.create(
2603 name=name,
2604 image=image_id,
2605 flavor=flavor_id,
2606 nics=net_list_vim,
2607 security_groups=self.config.get("security_groups"),
2608 # TODO remove security_groups in future versions. Already at neutron port
2609 availability_zone=vm_av_zone,
2610 key_name=self.config.get("keypair"),
2611 userdata=userdata,
2612 config_drive=config_drive,
2613 block_device_mapping=block_device_mapping,
2614 scheduler_hints=scheduller_hints,
2615 )
2616
2617 vm_start_time = time.time()
2618
2619 self._update_port_security_for_vminstance(no_secured_ports, server)
2620
2621 self._prepare_external_network_for_vminstance(
2622 external_network=external_network,
2623 server=server,
2624 created_items=created_items,
2625 vm_start_time=vm_start_time,
2626 )
2627
2628 return server.id, created_items
2629
2630 except Exception as e:
2631 server_id = None
2632 if server:
2633 server_id = server.id
2634
2635 try:
2636 created_items = self.remove_keep_tag_from_persistent_volumes(
2637 created_items
2638 )
2639
2640 self.delete_vminstance(server_id, created_items)
2641
2642 except Exception as e2:
2643 self.logger.error("new_vminstance rollback fail {}".format(e2))
2644
2645 self._format_exception(e)
2646
2647 @staticmethod
2648 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2649 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2650
2651 Args:
2652 created_items (dict): All created items belongs to VM
2653
2654 Returns:
2655 updated_created_items (dict): Dict which does not include keep flag for volumes.
2656
2657 """
2658 return {
2659 key.replace(":keep", ""): value for (key, value) in created_items.items()
2660 }
2661
2662 def get_vminstance(self, vm_id):
2663 """Returns the VM instance information from VIM"""
2664 # self.logger.debug("Getting VM from VIM")
2665 try:
2666 self._reload_connection()
2667 server = self.nova.servers.find(id=vm_id)
2668 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2669
2670 return server.to_dict()
2671 except (
2672 ksExceptions.ClientException,
2673 nvExceptions.ClientException,
2674 nvExceptions.NotFound,
2675 ConnectionError,
2676 ) as e:
2677 self._format_exception(e)
2678
2679 def get_vminstance_console(self, vm_id, console_type="vnc"):
2680 """
2681 Get a console for the virtual machine
2682 Params:
2683 vm_id: uuid of the VM
2684 console_type, can be:
2685 "novnc" (by default), "xvpvnc" for VNC types,
2686 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2687 Returns dict with the console parameters:
2688 protocol: ssh, ftp, http, https, ...
2689 server: usually ip address
2690 port: the http, ssh, ... port
2691 suffix: extra text, e.g. the http path and query string
2692 """
2693 self.logger.debug("Getting VM CONSOLE from VIM")
2694
2695 try:
2696 self._reload_connection()
2697 server = self.nova.servers.find(id=vm_id)
2698
2699 if console_type is None or console_type == "novnc":
2700 console_dict = server.get_vnc_console("novnc")
2701 elif console_type == "xvpvnc":
2702 console_dict = server.get_vnc_console(console_type)
2703 elif console_type == "rdp-html5":
2704 console_dict = server.get_rdp_console(console_type)
2705 elif console_type == "spice-html5":
2706 console_dict = server.get_spice_console(console_type)
2707 else:
2708 raise vimconn.VimConnException(
2709 "console type '{}' not allowed".format(console_type),
2710 http_code=vimconn.HTTP_Bad_Request,
2711 )
2712
2713 console_dict1 = console_dict.get("console")
2714
2715 if console_dict1:
2716 console_url = console_dict1.get("url")
2717
2718 if console_url:
2719 # parse console_url
2720 protocol_index = console_url.find("//")
2721 suffix_index = (
2722 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2723 )
2724 port_index = (
2725 console_url[protocol_index + 2 : suffix_index].find(":")
2726 + protocol_index
2727 + 2
2728 )
2729
2730 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2731 return (
2732 -vimconn.HTTP_Internal_Server_Error,
2733 "Unexpected response from VIM",
2734 )
2735
2736 console_dict = {
2737 "protocol": console_url[0:protocol_index],
2738 "server": console_url[protocol_index + 2 : port_index],
2739 "port": console_url[port_index:suffix_index],
2740 "suffix": console_url[suffix_index + 1 :],
2741 }
2742 protocol_index += 2
2743
2744 return console_dict
2745 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2746 except (
2747 nvExceptions.NotFound,
2748 ksExceptions.ClientException,
2749 nvExceptions.ClientException,
2750 nvExceptions.BadRequest,
2751 ConnectionError,
2752 ) as e:
2753 self._format_exception(e)
2754
2755 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2756 """Neutron delete ports by id.
2757 Args:
2758 k_id (str): Port id in the VIM
2759 """
2760 try:
2761
2762 port_dict = self.neutron.list_ports()
2763 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2764
2765 if k_id in existing_ports:
2766 self.neutron.delete_port(k_id)
2767
2768 except Exception as e:
2769
2770 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2771
2772 def _delete_volumes_by_id_wth_cinder(
2773 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2774 ) -> bool:
2775 """Cinder delete volume by id.
2776 Args:
2777 k (str): Full item name in created_items
2778 k_id (str): ID of floating ip in VIM
2779 volumes_to_hold (list): Volumes not to delete
2780 created_items (dict): All created items belongs to VM
2781 """
2782 try:
2783 if k_id in volumes_to_hold:
2784 return
2785
2786 if self.cinder.volumes.get(k_id).status != "available":
2787 return True
2788
2789 else:
2790 self.cinder.volumes.delete(k_id)
2791 created_items[k] = None
2792
2793 except Exception as e:
2794 self.logger.error(
2795 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2796 )
2797
2798 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2799 """Neutron delete floating ip by id.
2800 Args:
2801 k (str): Full item name in created_items
2802 k_id (str): ID of floating ip in VIM
2803 created_items (dict): All created items belongs to VM
2804 """
2805 try:
2806 self.neutron.delete_floatingip(k_id)
2807 created_items[k] = None
2808
2809 except Exception as e:
2810 self.logger.error(
2811 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2812 )
2813
2814 @staticmethod
2815 def _get_item_name_id(k: str) -> Tuple[str, str]:
2816 k_item, _, k_id = k.partition(":")
2817 return k_item, k_id
2818
2819 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2820 """Delete VM ports attached to the networks before deleting virtual machine.
2821 Args:
2822 created_items (dict): All created items belongs to VM
2823 """
2824
2825 for k, v in created_items.items():
2826 if not v: # skip already deleted
2827 continue
2828
2829 try:
2830 k_item, k_id = self._get_item_name_id(k)
2831 if k_item == "port":
2832 self._delete_ports_by_id_wth_neutron(k_id)
2833
2834 except Exception as e:
2835 self.logger.error(
2836 "Error deleting port: {}: {}".format(type(e).__name__, e)
2837 )
2838
2839 def _delete_created_items(
2840 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2841 ) -> bool:
2842 """Delete Volumes and floating ip if they exist in created_items."""
2843 for k, v in created_items.items():
2844 if not v: # skip already deleted
2845 continue
2846
2847 try:
2848 k_item, k_id = self._get_item_name_id(k)
2849
2850 if k_item == "volume":
2851
2852 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2853 k, k_id, volumes_to_hold, created_items
2854 )
2855
2856 if unavailable_vol:
2857 keep_waiting = True
2858
2859 elif k_item == "floating_ip":
2860
2861 self._delete_floating_ip_by_id(k, k_id, created_items)
2862
2863 except Exception as e:
2864 self.logger.error("Error deleting {}: {}".format(k, e))
2865
2866 return keep_waiting
2867
2868 @staticmethod
2869 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
2870 """Remove the volumes which has key flag from created_items
2871
2872 Args:
2873 created_items (dict): All created items belongs to VM
2874
2875 Returns:
2876 created_items (dict): Persistent volumes eliminated created_items
2877 """
2878 return {
2879 key: value
2880 for (key, value) in created_items.items()
2881 if len(key.split(":")) == 2
2882 }
2883
2884 def delete_vminstance(
2885 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
2886 ) -> None:
2887 """Removes a VM instance from VIM. Returns the old identifier.
2888 Args:
2889 vm_id (str): Identifier of VM instance
2890 created_items (dict): All created items belongs to VM
2891 volumes_to_hold (list): Volumes_to_hold
2892 """
2893 if created_items is None:
2894 created_items = {}
2895 if volumes_to_hold is None:
2896 volumes_to_hold = []
2897
2898 try:
2899 created_items = self._extract_items_wth_keep_flag_from_created_items(
2900 created_items
2901 )
2902
2903 self._reload_connection()
2904
2905 # Delete VM ports attached to the networks before the virtual machine
2906 if created_items:
2907 self._delete_vm_ports_attached_to_network(created_items)
2908
2909 if vm_id:
2910 self.nova.servers.delete(vm_id)
2911
2912 # Although having detached, volumes should have in active status before deleting.
2913 # We ensure in this loop
2914 keep_waiting = True
2915 elapsed_time = 0
2916
2917 while keep_waiting and elapsed_time < volume_timeout:
2918 keep_waiting = False
2919
2920 # Delete volumes and floating IP.
2921 keep_waiting = self._delete_created_items(
2922 created_items, volumes_to_hold, keep_waiting
2923 )
2924
2925 if keep_waiting:
2926 time.sleep(1)
2927 elapsed_time += 1
2928
2929 except (
2930 nvExceptions.NotFound,
2931 ksExceptions.ClientException,
2932 nvExceptions.ClientException,
2933 ConnectionError,
2934 ) as e:
2935 self._format_exception(e)
2936
2937 def refresh_vms_status(self, vm_list):
2938 """Get the status of the virtual machines and their interfaces/ports
2939 Params: the list of VM identifiers
2940 Returns a dictionary with:
2941 vm_id: #VIM id of this Virtual Machine
2942 status: #Mandatory. Text with one of:
2943 # DELETED (not found at vim)
2944 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2945 # OTHER (Vim reported other status not understood)
2946 # ERROR (VIM indicates an ERROR status)
2947 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2948 # CREATING (on building process), ERROR
2949 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2950 #
2951 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2952 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2953 interfaces:
2954 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2955 mac_address: #Text format XX:XX:XX:XX:XX:XX
2956 vim_net_id: #network id where this interface is connected
2957 vim_interface_id: #interface/port VIM id
2958 ip_address: #null, or text with IPv4, IPv6 address
2959 compute_node: #identification of compute node where PF,VF interface is allocated
2960 pci: #PCI address of the NIC that hosts the PF,VF
2961 vlan: #physical VLAN used for VF
2962 """
2963 vm_dict = {}
2964 self.logger.debug(
2965 "refresh_vms status: Getting tenant VM instance information from VIM"
2966 )
2967
2968 for vm_id in vm_list:
2969 vm = {}
2970
2971 try:
2972 vm_vim = self.get_vminstance(vm_id)
2973
2974 if vm_vim["status"] in vmStatus2manoFormat:
2975 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
2976 else:
2977 vm["status"] = "OTHER"
2978 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
2979
2980 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
2981 vm_vim.pop("user_data", None)
2982 vm["vim_info"] = self.serialize(vm_vim)
2983
2984 vm["interfaces"] = []
2985 if vm_vim.get("fault"):
2986 vm["error_msg"] = str(vm_vim["fault"])
2987
2988 # get interfaces
2989 try:
2990 self._reload_connection()
2991 port_dict = self.neutron.list_ports(device_id=vm_id)
2992
2993 for port in port_dict["ports"]:
2994 interface = {}
2995 interface["vim_info"] = self.serialize(port)
2996 interface["mac_address"] = port.get("mac_address")
2997 interface["vim_net_id"] = port["network_id"]
2998 interface["vim_interface_id"] = port["id"]
2999 # check if OS-EXT-SRV-ATTR:host is there,
3000 # in case of non-admin credentials, it will be missing
3001
3002 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3003 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3004
3005 interface["pci"] = None
3006
3007 # check if binding:profile is there,
3008 # in case of non-admin credentials, it will be missing
3009 if port.get("binding:profile"):
3010 if port["binding:profile"].get("pci_slot"):
3011 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3012 # the slot to 0x00
3013 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3014 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3015 pci = port["binding:profile"]["pci_slot"]
3016 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3017 interface["pci"] = pci
3018
3019 interface["vlan"] = None
3020
3021 if port.get("binding:vif_details"):
3022 interface["vlan"] = port["binding:vif_details"].get("vlan")
3023
3024 # Get vlan from network in case not present in port for those old openstacks and cases where
3025 # it is needed vlan at PT
3026 if not interface["vlan"]:
3027 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3028 network = self.neutron.show_network(port["network_id"])
3029
3030 if (
3031 network["network"].get("provider:network_type")
3032 == "vlan"
3033 ):
3034 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3035 interface["vlan"] = network["network"].get(
3036 "provider:segmentation_id"
3037 )
3038
3039 ips = []
3040 # look for floating ip address
3041 try:
3042 floating_ip_dict = self.neutron.list_floatingips(
3043 port_id=port["id"]
3044 )
3045
3046 if floating_ip_dict.get("floatingips"):
3047 ips.append(
3048 floating_ip_dict["floatingips"][0].get(
3049 "floating_ip_address"
3050 )
3051 )
3052 except Exception:
3053 pass
3054
3055 for subnet in port["fixed_ips"]:
3056 ips.append(subnet["ip_address"])
3057
3058 interface["ip_address"] = ";".join(ips)
3059 vm["interfaces"].append(interface)
3060 except Exception as e:
3061 self.logger.error(
3062 "Error getting vm interface information {}: {}".format(
3063 type(e).__name__, e
3064 ),
3065 exc_info=True,
3066 )
3067 except vimconn.VimConnNotFoundException as e:
3068 self.logger.error("Exception getting vm status: %s", str(e))
3069 vm["status"] = "DELETED"
3070 vm["error_msg"] = str(e)
3071 except vimconn.VimConnException as e:
3072 self.logger.error("Exception getting vm status: %s", str(e))
3073 vm["status"] = "VIM_ERROR"
3074 vm["error_msg"] = str(e)
3075
3076 vm_dict[vm_id] = vm
3077
3078 return vm_dict
3079
3080 def action_vminstance(self, vm_id, action_dict, created_items={}):
3081 """Send and action over a VM instance from VIM
3082 Returns None or the console dict if the action was successfully sent to the VIM"""
3083 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3084
3085 try:
3086 self._reload_connection()
3087 server = self.nova.servers.find(id=vm_id)
3088
3089 if "start" in action_dict:
3090 if action_dict["start"] == "rebuild":
3091 server.rebuild()
3092 else:
3093 if server.status == "PAUSED":
3094 server.unpause()
3095 elif server.status == "SUSPENDED":
3096 server.resume()
3097 elif server.status == "SHUTOFF":
3098 server.start()
3099 else:
3100 self.logger.debug(
3101 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3102 )
3103 raise vimconn.VimConnException(
3104 "Cannot 'start' instance while it is in active state",
3105 http_code=vimconn.HTTP_Bad_Request,
3106 )
3107
3108 elif "pause" in action_dict:
3109 server.pause()
3110 elif "resume" in action_dict:
3111 server.resume()
3112 elif "shutoff" in action_dict or "shutdown" in action_dict:
3113 self.logger.debug("server status %s", server.status)
3114 if server.status == "ACTIVE":
3115 server.stop()
3116 else:
3117 self.logger.debug("ERROR: VM is not in Active state")
3118 raise vimconn.VimConnException(
3119 "VM is not in active state, stop operation is not allowed",
3120 http_code=vimconn.HTTP_Bad_Request,
3121 )
3122 elif "forceOff" in action_dict:
3123 server.stop() # TODO
3124 elif "terminate" in action_dict:
3125 server.delete()
3126 elif "createImage" in action_dict:
3127 server.create_image()
3128 # "path":path_schema,
3129 # "description":description_schema,
3130 # "name":name_schema,
3131 # "metadata":metadata_schema,
3132 # "imageRef": id_schema,
3133 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3134 elif "rebuild" in action_dict:
3135 server.rebuild(server.image["id"])
3136 elif "reboot" in action_dict:
3137 server.reboot() # reboot_type="SOFT"
3138 elif "console" in action_dict:
3139 console_type = action_dict["console"]
3140
3141 if console_type is None or console_type == "novnc":
3142 console_dict = server.get_vnc_console("novnc")
3143 elif console_type == "xvpvnc":
3144 console_dict = server.get_vnc_console(console_type)
3145 elif console_type == "rdp-html5":
3146 console_dict = server.get_rdp_console(console_type)
3147 elif console_type == "spice-html5":
3148 console_dict = server.get_spice_console(console_type)
3149 else:
3150 raise vimconn.VimConnException(
3151 "console type '{}' not allowed".format(console_type),
3152 http_code=vimconn.HTTP_Bad_Request,
3153 )
3154
3155 try:
3156 console_url = console_dict["console"]["url"]
3157 # parse console_url
3158 protocol_index = console_url.find("//")
3159 suffix_index = (
3160 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3161 )
3162 port_index = (
3163 console_url[protocol_index + 2 : suffix_index].find(":")
3164 + protocol_index
3165 + 2
3166 )
3167
3168 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3169 raise vimconn.VimConnException(
3170 "Unexpected response from VIM " + str(console_dict)
3171 )
3172
3173 console_dict2 = {
3174 "protocol": console_url[0:protocol_index],
3175 "server": console_url[protocol_index + 2 : port_index],
3176 "port": int(console_url[port_index + 1 : suffix_index]),
3177 "suffix": console_url[suffix_index + 1 :],
3178 }
3179
3180 return console_dict2
3181 except Exception:
3182 raise vimconn.VimConnException(
3183 "Unexpected response from VIM " + str(console_dict)
3184 )
3185
3186 return None
3187 except (
3188 ksExceptions.ClientException,
3189 nvExceptions.ClientException,
3190 nvExceptions.NotFound,
3191 ConnectionError,
3192 ) as e:
3193 self._format_exception(e)
3194 # TODO insert exception vimconn.HTTP_Unauthorized
3195
3196 # ###### VIO Specific Changes #########
3197 def _generate_vlanID(self):
3198 """
3199 Method to get unused vlanID
3200 Args:
3201 None
3202 Returns:
3203 vlanID
3204 """
3205 # Get used VLAN IDs
3206 usedVlanIDs = []
3207 networks = self.get_network_list()
3208
3209 for net in networks:
3210 if net.get("provider:segmentation_id"):
3211 usedVlanIDs.append(net.get("provider:segmentation_id"))
3212
3213 used_vlanIDs = set(usedVlanIDs)
3214
3215 # find unused VLAN ID
3216 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3217 try:
3218 start_vlanid, end_vlanid = map(
3219 int, vlanID_range.replace(" ", "").split("-")
3220 )
3221
3222 for vlanID in range(start_vlanid, end_vlanid + 1):
3223 if vlanID not in used_vlanIDs:
3224 return vlanID
3225 except Exception as exp:
3226 raise vimconn.VimConnException(
3227 "Exception {} occurred while generating VLAN ID.".format(exp)
3228 )
3229 else:
3230 raise vimconn.VimConnConflictException(
3231 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3232 self.config.get("dataplane_net_vlan_range")
3233 )
3234 )
3235
3236 def _generate_multisegment_vlanID(self):
3237 """
3238 Method to get unused vlanID
3239 Args:
3240 None
3241 Returns:
3242 vlanID
3243 """
3244 # Get used VLAN IDs
3245 usedVlanIDs = []
3246 networks = self.get_network_list()
3247 for net in networks:
3248 if net.get("provider:network_type") == "vlan" and net.get(
3249 "provider:segmentation_id"
3250 ):
3251 usedVlanIDs.append(net.get("provider:segmentation_id"))
3252 elif net.get("segments"):
3253 for segment in net.get("segments"):
3254 if segment.get("provider:network_type") == "vlan" and segment.get(
3255 "provider:segmentation_id"
3256 ):
3257 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3258
3259 used_vlanIDs = set(usedVlanIDs)
3260
3261 # find unused VLAN ID
3262 for vlanID_range in self.config.get("multisegment_vlan_range"):
3263 try:
3264 start_vlanid, end_vlanid = map(
3265 int, vlanID_range.replace(" ", "").split("-")
3266 )
3267
3268 for vlanID in range(start_vlanid, end_vlanid + 1):
3269 if vlanID not in used_vlanIDs:
3270 return vlanID
3271 except Exception as exp:
3272 raise vimconn.VimConnException(
3273 "Exception {} occurred while generating VLAN ID.".format(exp)
3274 )
3275 else:
3276 raise vimconn.VimConnConflictException(
3277 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3278 self.config.get("multisegment_vlan_range")
3279 )
3280 )
3281
3282 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3283 """
3284 Method to validate user given vlanID ranges
3285 Args: None
3286 Returns: None
3287 """
3288 for vlanID_range in input_vlan_range:
3289 vlan_range = vlanID_range.replace(" ", "")
3290 # validate format
3291 vlanID_pattern = r"(\d)*-(\d)*$"
3292 match_obj = re.match(vlanID_pattern, vlan_range)
3293 if not match_obj:
3294 raise vimconn.VimConnConflictException(
3295 "Invalid VLAN range for {}: {}.You must provide "
3296 "'{}' in format [start_ID - end_ID].".format(
3297 text_vlan_range, vlanID_range, text_vlan_range
3298 )
3299 )
3300
3301 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3302 if start_vlanid <= 0:
3303 raise vimconn.VimConnConflictException(
3304 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3305 "networks valid IDs are 1 to 4094 ".format(
3306 text_vlan_range, vlanID_range
3307 )
3308 )
3309
3310 if end_vlanid > 4094:
3311 raise vimconn.VimConnConflictException(
3312 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3313 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3314 text_vlan_range, vlanID_range
3315 )
3316 )
3317
3318 if start_vlanid > end_vlanid:
3319 raise vimconn.VimConnConflictException(
3320 "Invalid VLAN range for {}: {}. You must provide '{}'"
3321 " in format start_ID - end_ID and start_ID < end_ID ".format(
3322 text_vlan_range, vlanID_range, text_vlan_range
3323 )
3324 )
3325
3326 # NOT USED FUNCTIONS
3327
3328 def new_external_port(self, port_data):
3329 """Adds a external port to VIM
3330 Returns the port identifier"""
3331 # TODO openstack if needed
3332 return (
3333 -vimconn.HTTP_Internal_Server_Error,
3334 "osconnector.new_external_port() not implemented",
3335 )
3336
3337 def connect_port_network(self, port_id, network_id, admin=False):
3338 """Connects a external port to a network
3339 Returns status code of the VIM response"""
3340 # TODO openstack if needed
3341 return (
3342 -vimconn.HTTP_Internal_Server_Error,
3343 "osconnector.connect_port_network() not implemented",
3344 )
3345
3346 def new_user(self, user_name, user_passwd, tenant_id=None):
3347 """Adds a new user to openstack VIM
3348 Returns the user identifier"""
3349 self.logger.debug("osconnector: Adding a new user to VIM")
3350
3351 try:
3352 self._reload_connection()
3353 user = self.keystone.users.create(
3354 user_name, password=user_passwd, default_project=tenant_id
3355 )
3356 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3357
3358 return user.id
3359 except ksExceptions.ConnectionError as e:
3360 error_value = -vimconn.HTTP_Bad_Request
3361 error_text = (
3362 type(e).__name__
3363 + ": "
3364 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3365 )
3366 except ksExceptions.ClientException as e: # TODO remove
3367 error_value = -vimconn.HTTP_Bad_Request
3368 error_text = (
3369 type(e).__name__
3370 + ": "
3371 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3372 )
3373
3374 # TODO insert exception vimconn.HTTP_Unauthorized
3375 # if reaching here is because an exception
3376 self.logger.debug("new_user " + error_text)
3377
3378 return error_value, error_text
3379
3380 def delete_user(self, user_id):
3381 """Delete a user from openstack VIM
3382 Returns the user identifier"""
3383 if self.debug:
3384 print("osconnector: Deleting a user from VIM")
3385
3386 try:
3387 self._reload_connection()
3388 self.keystone.users.delete(user_id)
3389
3390 return 1, user_id
3391 except ksExceptions.ConnectionError as e:
3392 error_value = -vimconn.HTTP_Bad_Request
3393 error_text = (
3394 type(e).__name__
3395 + ": "
3396 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3397 )
3398 except ksExceptions.NotFound as e:
3399 error_value = -vimconn.HTTP_Not_Found
3400 error_text = (
3401 type(e).__name__
3402 + ": "
3403 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3404 )
3405 except ksExceptions.ClientException as e: # TODO remove
3406 error_value = -vimconn.HTTP_Bad_Request
3407 error_text = (
3408 type(e).__name__
3409 + ": "
3410 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3411 )
3412
3413 # TODO insert exception vimconn.HTTP_Unauthorized
3414 # if reaching here is because an exception
3415 self.logger.debug("delete_tenant " + error_text)
3416
3417 return error_value, error_text
3418
3419 def get_hosts_info(self):
3420 """Get the information of deployed hosts
3421 Returns the hosts content"""
3422 if self.debug:
3423 print("osconnector: Getting Host info from VIM")
3424
3425 try:
3426 h_list = []
3427 self._reload_connection()
3428 hypervisors = self.nova.hypervisors.list()
3429
3430 for hype in hypervisors:
3431 h_list.append(hype.to_dict())
3432
3433 return 1, {"hosts": h_list}
3434 except nvExceptions.NotFound as e:
3435 error_value = -vimconn.HTTP_Not_Found
3436 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3437 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3438 error_value = -vimconn.HTTP_Bad_Request
3439 error_text = (
3440 type(e).__name__
3441 + ": "
3442 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3443 )
3444
3445 # TODO insert exception vimconn.HTTP_Unauthorized
3446 # if reaching here is because an exception
3447 self.logger.debug("get_hosts_info " + error_text)
3448
3449 return error_value, error_text
3450
3451 def get_hosts(self, vim_tenant):
3452 """Get the hosts and deployed instances
3453 Returns the hosts content"""
3454 r, hype_dict = self.get_hosts_info()
3455
3456 if r < 0:
3457 return r, hype_dict
3458
3459 hypervisors = hype_dict["hosts"]
3460
3461 try:
3462 servers = self.nova.servers.list()
3463 for hype in hypervisors:
3464 for server in servers:
3465 if (
3466 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3467 == hype["hypervisor_hostname"]
3468 ):
3469 if "vm" in hype:
3470 hype["vm"].append(server.id)
3471 else:
3472 hype["vm"] = [server.id]
3473
3474 return 1, hype_dict
3475 except nvExceptions.NotFound as e:
3476 error_value = -vimconn.HTTP_Not_Found
3477 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3478 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3479 error_value = -vimconn.HTTP_Bad_Request
3480 error_text = (
3481 type(e).__name__
3482 + ": "
3483 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3484 )
3485
3486 # TODO insert exception vimconn.HTTP_Unauthorized
3487 # if reaching here is because an exception
3488 self.logger.debug("get_hosts " + error_text)
3489
3490 return error_value, error_text
3491
3492 def new_classification(self, name, ctype, definition):
3493 self.logger.debug(
3494 "Adding a new (Traffic) Classification to VIM, named %s", name
3495 )
3496
3497 try:
3498 new_class = None
3499 self._reload_connection()
3500
3501 if ctype not in supportedClassificationTypes:
3502 raise vimconn.VimConnNotSupportedException(
3503 "OpenStack VIM connector does not support provided "
3504 "Classification Type {}, supported ones are: {}".format(
3505 ctype, supportedClassificationTypes
3506 )
3507 )
3508
3509 if not self._validate_classification(ctype, definition):
3510 raise vimconn.VimConnException(
3511 "Incorrect Classification definition for the type specified."
3512 )
3513
3514 classification_dict = definition
3515 classification_dict["name"] = name
3516 new_class = self.neutron.create_sfc_flow_classifier(
3517 {"flow_classifier": classification_dict}
3518 )
3519
3520 return new_class["flow_classifier"]["id"]
3521 except (
3522 neExceptions.ConnectionFailed,
3523 ksExceptions.ClientException,
3524 neExceptions.NeutronException,
3525 ConnectionError,
3526 ) as e:
3527 self.logger.error("Creation of Classification failed.")
3528 self._format_exception(e)
3529
3530 def get_classification(self, class_id):
3531 self.logger.debug(" Getting Classification %s from VIM", class_id)
3532 filter_dict = {"id": class_id}
3533 class_list = self.get_classification_list(filter_dict)
3534
3535 if len(class_list) == 0:
3536 raise vimconn.VimConnNotFoundException(
3537 "Classification '{}' not found".format(class_id)
3538 )
3539 elif len(class_list) > 1:
3540 raise vimconn.VimConnConflictException(
3541 "Found more than one Classification with this criteria"
3542 )
3543
3544 classification = class_list[0]
3545
3546 return classification
3547
3548 def get_classification_list(self, filter_dict={}):
3549 self.logger.debug(
3550 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3551 )
3552
3553 try:
3554 filter_dict_os = filter_dict.copy()
3555 self._reload_connection()
3556
3557 if self.api_version3 and "tenant_id" in filter_dict_os:
3558 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3559
3560 classification_dict = self.neutron.list_sfc_flow_classifiers(
3561 **filter_dict_os
3562 )
3563 classification_list = classification_dict["flow_classifiers"]
3564 self.__classification_os2mano(classification_list)
3565
3566 return classification_list
3567 except (
3568 neExceptions.ConnectionFailed,
3569 ksExceptions.ClientException,
3570 neExceptions.NeutronException,
3571 ConnectionError,
3572 ) as e:
3573 self._format_exception(e)
3574
3575 def delete_classification(self, class_id):
3576 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3577
3578 try:
3579 self._reload_connection()
3580 self.neutron.delete_sfc_flow_classifier(class_id)
3581
3582 return class_id
3583 except (
3584 neExceptions.ConnectionFailed,
3585 neExceptions.NeutronException,
3586 ksExceptions.ClientException,
3587 neExceptions.NeutronException,
3588 ConnectionError,
3589 ) as e:
3590 self._format_exception(e)
3591
3592 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3593 self.logger.debug(
3594 "Adding a new Service Function Instance to VIM, named '%s'", name
3595 )
3596
3597 try:
3598 new_sfi = None
3599 self._reload_connection()
3600 correlation = None
3601
3602 if sfc_encap:
3603 correlation = "nsh"
3604
3605 if len(ingress_ports) != 1:
3606 raise vimconn.VimConnNotSupportedException(
3607 "OpenStack VIM connector can only have 1 ingress port per SFI"
3608 )
3609
3610 if len(egress_ports) != 1:
3611 raise vimconn.VimConnNotSupportedException(
3612 "OpenStack VIM connector can only have 1 egress port per SFI"
3613 )
3614
3615 sfi_dict = {
3616 "name": name,
3617 "ingress": ingress_ports[0],
3618 "egress": egress_ports[0],
3619 "service_function_parameters": {"correlation": correlation},
3620 }
3621 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3622
3623 return new_sfi["port_pair"]["id"]
3624 except (
3625 neExceptions.ConnectionFailed,
3626 ksExceptions.ClientException,
3627 neExceptions.NeutronException,
3628 ConnectionError,
3629 ) as e:
3630 if new_sfi:
3631 try:
3632 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3633 except Exception:
3634 self.logger.error(
3635 "Creation of Service Function Instance failed, with "
3636 "subsequent deletion failure as well."
3637 )
3638
3639 self._format_exception(e)
3640
3641 def get_sfi(self, sfi_id):
3642 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3643 filter_dict = {"id": sfi_id}
3644 sfi_list = self.get_sfi_list(filter_dict)
3645
3646 if len(sfi_list) == 0:
3647 raise vimconn.VimConnNotFoundException(
3648 "Service Function Instance '{}' not found".format(sfi_id)
3649 )
3650 elif len(sfi_list) > 1:
3651 raise vimconn.VimConnConflictException(
3652 "Found more than one Service Function Instance with this criteria"
3653 )
3654
3655 sfi = sfi_list[0]
3656
3657 return sfi
3658
3659 def get_sfi_list(self, filter_dict={}):
3660 self.logger.debug(
3661 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3662 )
3663
3664 try:
3665 self._reload_connection()
3666 filter_dict_os = filter_dict.copy()
3667
3668 if self.api_version3 and "tenant_id" in filter_dict_os:
3669 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3670
3671 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3672 sfi_list = sfi_dict["port_pairs"]
3673 self.__sfi_os2mano(sfi_list)
3674
3675 return sfi_list
3676 except (
3677 neExceptions.ConnectionFailed,
3678 ksExceptions.ClientException,
3679 neExceptions.NeutronException,
3680 ConnectionError,
3681 ) as e:
3682 self._format_exception(e)
3683
3684 def delete_sfi(self, sfi_id):
3685 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3686
3687 try:
3688 self._reload_connection()
3689 self.neutron.delete_sfc_port_pair(sfi_id)
3690
3691 return sfi_id
3692 except (
3693 neExceptions.ConnectionFailed,
3694 neExceptions.NeutronException,
3695 ksExceptions.ClientException,
3696 neExceptions.NeutronException,
3697 ConnectionError,
3698 ) as e:
3699 self._format_exception(e)
3700
3701 def new_sf(self, name, sfis, sfc_encap=True):
3702 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3703
3704 try:
3705 new_sf = None
3706 self._reload_connection()
3707 # correlation = None
3708 # if sfc_encap:
3709 # correlation = "nsh"
3710
3711 for instance in sfis:
3712 sfi = self.get_sfi(instance)
3713
3714 if sfi.get("sfc_encap") != sfc_encap:
3715 raise vimconn.VimConnNotSupportedException(
3716 "OpenStack VIM connector requires all SFIs of the "
3717 "same SF to share the same SFC Encapsulation"
3718 )
3719
3720 sf_dict = {"name": name, "port_pairs": sfis}
3721 new_sf = self.neutron.create_sfc_port_pair_group(
3722 {"port_pair_group": sf_dict}
3723 )
3724
3725 return new_sf["port_pair_group"]["id"]
3726 except (
3727 neExceptions.ConnectionFailed,
3728 ksExceptions.ClientException,
3729 neExceptions.NeutronException,
3730 ConnectionError,
3731 ) as e:
3732 if new_sf:
3733 try:
3734 self.neutron.delete_sfc_port_pair_group(
3735 new_sf["port_pair_group"]["id"]
3736 )
3737 except Exception:
3738 self.logger.error(
3739 "Creation of Service Function failed, with "
3740 "subsequent deletion failure as well."
3741 )
3742
3743 self._format_exception(e)
3744
3745 def get_sf(self, sf_id):
3746 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3747 filter_dict = {"id": sf_id}
3748 sf_list = self.get_sf_list(filter_dict)
3749
3750 if len(sf_list) == 0:
3751 raise vimconn.VimConnNotFoundException(
3752 "Service Function '{}' not found".format(sf_id)
3753 )
3754 elif len(sf_list) > 1:
3755 raise vimconn.VimConnConflictException(
3756 "Found more than one Service Function with this criteria"
3757 )
3758
3759 sf = sf_list[0]
3760
3761 return sf
3762
3763 def get_sf_list(self, filter_dict={}):
3764 self.logger.debug(
3765 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3766 )
3767
3768 try:
3769 self._reload_connection()
3770 filter_dict_os = filter_dict.copy()
3771
3772 if self.api_version3 and "tenant_id" in filter_dict_os:
3773 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3774
3775 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3776 sf_list = sf_dict["port_pair_groups"]
3777 self.__sf_os2mano(sf_list)
3778
3779 return sf_list
3780 except (
3781 neExceptions.ConnectionFailed,
3782 ksExceptions.ClientException,
3783 neExceptions.NeutronException,
3784 ConnectionError,
3785 ) as e:
3786 self._format_exception(e)
3787
3788 def delete_sf(self, sf_id):
3789 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3790
3791 try:
3792 self._reload_connection()
3793 self.neutron.delete_sfc_port_pair_group(sf_id)
3794
3795 return sf_id
3796 except (
3797 neExceptions.ConnectionFailed,
3798 neExceptions.NeutronException,
3799 ksExceptions.ClientException,
3800 neExceptions.NeutronException,
3801 ConnectionError,
3802 ) as e:
3803 self._format_exception(e)
3804
3805 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3806 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3807
3808 try:
3809 new_sfp = None
3810 self._reload_connection()
3811 # In networking-sfc the MPLS encapsulation is legacy
3812 # should be used when no full SFC Encapsulation is intended
3813 correlation = "mpls"
3814
3815 if sfc_encap:
3816 correlation = "nsh"
3817
3818 sfp_dict = {
3819 "name": name,
3820 "flow_classifiers": classifications,
3821 "port_pair_groups": sfs,
3822 "chain_parameters": {"correlation": correlation},
3823 }
3824
3825 if spi:
3826 sfp_dict["chain_id"] = spi
3827
3828 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3829
3830 return new_sfp["port_chain"]["id"]
3831 except (
3832 neExceptions.ConnectionFailed,
3833 ksExceptions.ClientException,
3834 neExceptions.NeutronException,
3835 ConnectionError,
3836 ) as e:
3837 if new_sfp:
3838 try:
3839 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3840 except Exception:
3841 self.logger.error(
3842 "Creation of Service Function Path failed, with "
3843 "subsequent deletion failure as well."
3844 )
3845
3846 self._format_exception(e)
3847
3848 def get_sfp(self, sfp_id):
3849 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3850
3851 filter_dict = {"id": sfp_id}
3852 sfp_list = self.get_sfp_list(filter_dict)
3853
3854 if len(sfp_list) == 0:
3855 raise vimconn.VimConnNotFoundException(
3856 "Service Function Path '{}' not found".format(sfp_id)
3857 )
3858 elif len(sfp_list) > 1:
3859 raise vimconn.VimConnConflictException(
3860 "Found more than one Service Function Path with this criteria"
3861 )
3862
3863 sfp = sfp_list[0]
3864
3865 return sfp
3866
3867 def get_sfp_list(self, filter_dict={}):
3868 self.logger.debug(
3869 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3870 )
3871
3872 try:
3873 self._reload_connection()
3874 filter_dict_os = filter_dict.copy()
3875
3876 if self.api_version3 and "tenant_id" in filter_dict_os:
3877 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3878
3879 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3880 sfp_list = sfp_dict["port_chains"]
3881 self.__sfp_os2mano(sfp_list)
3882
3883 return sfp_list
3884 except (
3885 neExceptions.ConnectionFailed,
3886 ksExceptions.ClientException,
3887 neExceptions.NeutronException,
3888 ConnectionError,
3889 ) as e:
3890 self._format_exception(e)
3891
3892 def delete_sfp(self, sfp_id):
3893 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3894
3895 try:
3896 self._reload_connection()
3897 self.neutron.delete_sfc_port_chain(sfp_id)
3898
3899 return sfp_id
3900 except (
3901 neExceptions.ConnectionFailed,
3902 neExceptions.NeutronException,
3903 ksExceptions.ClientException,
3904 neExceptions.NeutronException,
3905 ConnectionError,
3906 ) as e:
3907 self._format_exception(e)
3908
3909 def refresh_sfps_status(self, sfp_list):
3910 """Get the status of the service function path
3911 Params: the list of sfp identifiers
3912 Returns a dictionary with:
3913 vm_id: #VIM id of this service function path
3914 status: #Mandatory. Text with one of:
3915 # DELETED (not found at vim)
3916 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3917 # OTHER (Vim reported other status not understood)
3918 # ERROR (VIM indicates an ERROR status)
3919 # ACTIVE,
3920 # CREATING (on building process)
3921 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3922 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3923 """
3924 sfp_dict = {}
3925 self.logger.debug(
3926 "refresh_sfps status: Getting tenant SFP information from VIM"
3927 )
3928
3929 for sfp_id in sfp_list:
3930 sfp = {}
3931
3932 try:
3933 sfp_vim = self.get_sfp(sfp_id)
3934
3935 if sfp_vim["spi"]:
3936 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3937 else:
3938 sfp["status"] = "OTHER"
3939 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3940
3941 sfp["vim_info"] = self.serialize(sfp_vim)
3942
3943 if sfp_vim.get("fault"):
3944 sfp["error_msg"] = str(sfp_vim["fault"])
3945 except vimconn.VimConnNotFoundException as e:
3946 self.logger.error("Exception getting sfp status: %s", str(e))
3947 sfp["status"] = "DELETED"
3948 sfp["error_msg"] = str(e)
3949 except vimconn.VimConnException as e:
3950 self.logger.error("Exception getting sfp status: %s", str(e))
3951 sfp["status"] = "VIM_ERROR"
3952 sfp["error_msg"] = str(e)
3953
3954 sfp_dict[sfp_id] = sfp
3955
3956 return sfp_dict
3957
3958 def refresh_sfis_status(self, sfi_list):
3959 """Get the status of the service function instances
3960 Params: the list of sfi identifiers
3961 Returns a dictionary with:
3962 vm_id: #VIM id of this service function instance
3963 status: #Mandatory. Text with one of:
3964 # DELETED (not found at vim)
3965 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3966 # OTHER (Vim reported other status not understood)
3967 # ERROR (VIM indicates an ERROR status)
3968 # ACTIVE,
3969 # CREATING (on building process)
3970 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3971 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3972 """
3973 sfi_dict = {}
3974 self.logger.debug(
3975 "refresh_sfis status: Getting tenant sfi information from VIM"
3976 )
3977
3978 for sfi_id in sfi_list:
3979 sfi = {}
3980
3981 try:
3982 sfi_vim = self.get_sfi(sfi_id)
3983
3984 if sfi_vim:
3985 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
3986 else:
3987 sfi["status"] = "OTHER"
3988 sfi["error_msg"] = "VIM status reported " + sfi["status"]
3989
3990 sfi["vim_info"] = self.serialize(sfi_vim)
3991
3992 if sfi_vim.get("fault"):
3993 sfi["error_msg"] = str(sfi_vim["fault"])
3994 except vimconn.VimConnNotFoundException as e:
3995 self.logger.error("Exception getting sfi status: %s", str(e))
3996 sfi["status"] = "DELETED"
3997 sfi["error_msg"] = str(e)
3998 except vimconn.VimConnException as e:
3999 self.logger.error("Exception getting sfi status: %s", str(e))
4000 sfi["status"] = "VIM_ERROR"
4001 sfi["error_msg"] = str(e)
4002
4003 sfi_dict[sfi_id] = sfi
4004
4005 return sfi_dict
4006
4007 def refresh_sfs_status(self, sf_list):
4008 """Get the status of the service functions
4009 Params: the list of sf identifiers
4010 Returns a dictionary with:
4011 vm_id: #VIM id of this service function
4012 status: #Mandatory. Text with one of:
4013 # DELETED (not found at vim)
4014 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4015 # OTHER (Vim reported other status not understood)
4016 # ERROR (VIM indicates an ERROR status)
4017 # ACTIVE,
4018 # CREATING (on building process)
4019 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4020 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4021 """
4022 sf_dict = {}
4023 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4024
4025 for sf_id in sf_list:
4026 sf = {}
4027
4028 try:
4029 sf_vim = self.get_sf(sf_id)
4030
4031 if sf_vim:
4032 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4033 else:
4034 sf["status"] = "OTHER"
4035 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4036
4037 sf["vim_info"] = self.serialize(sf_vim)
4038
4039 if sf_vim.get("fault"):
4040 sf["error_msg"] = str(sf_vim["fault"])
4041 except vimconn.VimConnNotFoundException as e:
4042 self.logger.error("Exception getting sf status: %s", str(e))
4043 sf["status"] = "DELETED"
4044 sf["error_msg"] = str(e)
4045 except vimconn.VimConnException as e:
4046 self.logger.error("Exception getting sf status: %s", str(e))
4047 sf["status"] = "VIM_ERROR"
4048 sf["error_msg"] = str(e)
4049
4050 sf_dict[sf_id] = sf
4051
4052 return sf_dict
4053
4054 def refresh_classifications_status(self, classification_list):
4055 """Get the status of the classifications
4056 Params: the list of classification identifiers
4057 Returns a dictionary with:
4058 vm_id: #VIM id of this classifier
4059 status: #Mandatory. Text with one of:
4060 # DELETED (not found at vim)
4061 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4062 # OTHER (Vim reported other status not understood)
4063 # ERROR (VIM indicates an ERROR status)
4064 # ACTIVE,
4065 # CREATING (on building process)
4066 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4067 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4068 """
4069 classification_dict = {}
4070 self.logger.debug(
4071 "refresh_classifications status: Getting tenant classification information from VIM"
4072 )
4073
4074 for classification_id in classification_list:
4075 classification = {}
4076
4077 try:
4078 classification_vim = self.get_classification(classification_id)
4079
4080 if classification_vim:
4081 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4082 else:
4083 classification["status"] = "OTHER"
4084 classification["error_msg"] = (
4085 "VIM status reported " + classification["status"]
4086 )
4087
4088 classification["vim_info"] = self.serialize(classification_vim)
4089
4090 if classification_vim.get("fault"):
4091 classification["error_msg"] = str(classification_vim["fault"])
4092 except vimconn.VimConnNotFoundException as e:
4093 self.logger.error("Exception getting classification status: %s", str(e))
4094 classification["status"] = "DELETED"
4095 classification["error_msg"] = str(e)
4096 except vimconn.VimConnException as e:
4097 self.logger.error("Exception getting classification status: %s", str(e))
4098 classification["status"] = "VIM_ERROR"
4099 classification["error_msg"] = str(e)
4100
4101 classification_dict[classification_id] = classification
4102
4103 return classification_dict
4104
4105 def new_affinity_group(self, affinity_group_data):
4106 """Adds a server group to VIM
4107 affinity_group_data contains a dictionary with information, keys:
4108 name: name in VIM for the server group
4109 type: affinity or anti-affinity
4110 scope: Only nfvi-node allowed
4111 Returns the server group identifier"""
4112 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4113
4114 try:
4115 name = affinity_group_data["name"]
4116 policy = affinity_group_data["type"]
4117
4118 self._reload_connection()
4119 new_server_group = self.nova.server_groups.create(name, policy)
4120
4121 return new_server_group.id
4122 except (
4123 ksExceptions.ClientException,
4124 nvExceptions.ClientException,
4125 ConnectionError,
4126 KeyError,
4127 ) as e:
4128 self._format_exception(e)
4129
4130 def get_affinity_group(self, affinity_group_id):
4131 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4132 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4133 try:
4134 self._reload_connection()
4135 server_group = self.nova.server_groups.find(id=affinity_group_id)
4136
4137 return server_group.to_dict()
4138 except (
4139 nvExceptions.NotFound,
4140 nvExceptions.ClientException,
4141 ksExceptions.ClientException,
4142 ConnectionError,
4143 ) as e:
4144 self._format_exception(e)
4145
4146 def delete_affinity_group(self, affinity_group_id):
4147 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4148 self.logger.debug("Getting server group '%s'", affinity_group_id)
4149 try:
4150 self._reload_connection()
4151 self.nova.server_groups.delete(affinity_group_id)
4152
4153 return affinity_group_id
4154 except (
4155 nvExceptions.NotFound,
4156 ksExceptions.ClientException,
4157 nvExceptions.ClientException,
4158 ConnectionError,
4159 ) as e:
4160 self._format_exception(e)
4161
4162 def get_vdu_state(self, vm_id):
4163 """
4164 Getting the state of a vdu
4165 param:
4166 vm_id: ID of an instance
4167 """
4168 self.logger.debug("Getting the status of VM")
4169 self.logger.debug("VIM VM ID %s", vm_id)
4170 self._reload_connection()
4171 server = self.nova.servers.find(id=vm_id)
4172 server_dict = server.to_dict()
4173 vdu_data = [
4174 server_dict["status"],
4175 server_dict["flavor"]["id"],
4176 server_dict["OS-EXT-SRV-ATTR:host"],
4177 server_dict["OS-EXT-AZ:availability_zone"],
4178 ]
4179 self.logger.debug("vdu_data %s", vdu_data)
4180 return vdu_data
4181
4182 def check_compute_availability(self, host, server_flavor_details):
4183 self._reload_connection()
4184 hypervisor_search = self.nova.hypervisors.search(
4185 hypervisor_match=host, servers=True
4186 )
4187 for hypervisor in hypervisor_search:
4188 hypervisor_id = hypervisor.to_dict()["id"]
4189 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4190 hypervisor_dict = hypervisor_details.to_dict()
4191 hypervisor_temp = json.dumps(hypervisor_dict)
4192 hypervisor_json = json.loads(hypervisor_temp)
4193 resources_available = [
4194 hypervisor_json["free_ram_mb"],
4195 hypervisor_json["disk_available_least"],
4196 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4197 ]
4198 compute_available = all(
4199 x > y for x, y in zip(resources_available, server_flavor_details)
4200 )
4201 if compute_available:
4202 return host
4203
4204 def check_availability_zone(
4205 self, old_az, server_flavor_details, old_host, host=None
4206 ):
4207 self._reload_connection()
4208 az_check = {"zone_check": False, "compute_availability": None}
4209 aggregates_list = self.nova.aggregates.list()
4210 for aggregate in aggregates_list:
4211 aggregate_details = aggregate.to_dict()
4212 aggregate_temp = json.dumps(aggregate_details)
4213 aggregate_json = json.loads(aggregate_temp)
4214 if aggregate_json["availability_zone"] == old_az:
4215 hosts_list = aggregate_json["hosts"]
4216 if host is not None:
4217 if host in hosts_list:
4218 az_check["zone_check"] = True
4219 available_compute_id = self.check_compute_availability(
4220 host, server_flavor_details
4221 )
4222 if available_compute_id is not None:
4223 az_check["compute_availability"] = available_compute_id
4224 else:
4225 for check_host in hosts_list:
4226 if check_host != old_host:
4227 available_compute_id = self.check_compute_availability(
4228 check_host, server_flavor_details
4229 )
4230 if available_compute_id is not None:
4231 az_check["zone_check"] = True
4232 az_check["compute_availability"] = available_compute_id
4233 break
4234 else:
4235 az_check["zone_check"] = True
4236 return az_check
4237
4238 def migrate_instance(self, vm_id, compute_host=None):
4239 """
4240 Migrate a vdu
4241 param:
4242 vm_id: ID of an instance
4243 compute_host: Host to migrate the vdu to
4244 """
4245 self._reload_connection()
4246 vm_state = False
4247 instance_state = self.get_vdu_state(vm_id)
4248 server_flavor_id = instance_state[1]
4249 server_hypervisor_name = instance_state[2]
4250 server_availability_zone = instance_state[3]
4251 try:
4252 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4253 server_flavor_details = [
4254 server_flavor["ram"],
4255 server_flavor["disk"],
4256 server_flavor["vcpus"],
4257 ]
4258 if compute_host == server_hypervisor_name:
4259 raise vimconn.VimConnException(
4260 "Unable to migrate instance '{}' to the same host '{}'".format(
4261 vm_id, compute_host
4262 ),
4263 http_code=vimconn.HTTP_Bad_Request,
4264 )
4265 az_status = self.check_availability_zone(
4266 server_availability_zone,
4267 server_flavor_details,
4268 server_hypervisor_name,
4269 compute_host,
4270 )
4271 availability_zone_check = az_status["zone_check"]
4272 available_compute_id = az_status.get("compute_availability")
4273
4274 if availability_zone_check is False:
4275 raise vimconn.VimConnException(
4276 "Unable to migrate instance '{}' to a different availability zone".format(
4277 vm_id
4278 ),
4279 http_code=vimconn.HTTP_Bad_Request,
4280 )
4281 if available_compute_id is not None:
4282 self.nova.servers.live_migrate(
4283 server=vm_id,
4284 host=available_compute_id,
4285 block_migration=True,
4286 disk_over_commit=False,
4287 )
4288 state = "MIGRATING"
4289 changed_compute_host = ""
4290 if state == "MIGRATING":
4291 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4292 changed_compute_host = self.get_vdu_state(vm_id)[2]
4293 if vm_state and changed_compute_host == available_compute_id:
4294 self.logger.debug(
4295 "Instance '{}' migrated to the new compute host '{}'".format(
4296 vm_id, changed_compute_host
4297 )
4298 )
4299 return state, available_compute_id
4300 else:
4301 raise vimconn.VimConnException(
4302 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4303 vm_id, available_compute_id
4304 ),
4305 http_code=vimconn.HTTP_Bad_Request,
4306 )
4307 else:
4308 raise vimconn.VimConnException(
4309 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4310 available_compute_id
4311 ),
4312 http_code=vimconn.HTTP_Bad_Request,
4313 )
4314 except (
4315 nvExceptions.BadRequest,
4316 nvExceptions.ClientException,
4317 nvExceptions.NotFound,
4318 ) as e:
4319 self._format_exception(e)
4320
4321 def resize_instance(self, vm_id, new_flavor_id):
4322 """
4323 For resizing the vm based on the given
4324 flavor details
4325 param:
4326 vm_id : ID of an instance
4327 new_flavor_id : Flavor id to be resized
4328 Return the status of a resized instance
4329 """
4330 self._reload_connection()
4331 self.logger.debug("resize the flavor of an instance")
4332 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4333 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4334 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4335 try:
4336 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4337 if old_flavor_disk > new_flavor_disk:
4338 raise nvExceptions.BadRequest(
4339 400,
4340 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4341 )
4342 else:
4343 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4344 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4345 if vm_state:
4346 instance_resized_status = self.confirm_resize(vm_id)
4347 return instance_resized_status
4348 else:
4349 raise nvExceptions.BadRequest(
4350 409,
4351 message="Cannot 'resize' vm_state is in ERROR",
4352 )
4353
4354 else:
4355 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4356 raise nvExceptions.BadRequest(
4357 409,
4358 message="Cannot 'resize' instance while it is in vm_state resized",
4359 )
4360 except (
4361 nvExceptions.BadRequest,
4362 nvExceptions.ClientException,
4363 nvExceptions.NotFound,
4364 ) as e:
4365 self._format_exception(e)
4366
4367 def confirm_resize(self, vm_id):
4368 """
4369 Confirm the resize of an instance
4370 param:
4371 vm_id: ID of an instance
4372 """
4373 self._reload_connection()
4374 self.nova.servers.confirm_resize(server=vm_id)
4375 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4376 self.__wait_for_vm(vm_id, "ACTIVE")
4377 instance_status = self.get_vdu_state(vm_id)[0]
4378 return instance_status