0a237d9c507b9d69bab20b809294d962a14d443f
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1357
1358 for numa in numas:
1359 if "id" in numa:
1360 node_id = numa["id"]
1361 # overwrite ram and vcpus
1362 # check if key "memory" is present in numa else use ram value at flavor
1363 self.process_numa_memory(numa, node_id, extra_specs)
1364 self.process_numa_vcpu(numa, node_id, extra_specs)
1365
1366 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1368
1369 if "paired-threads" in numa:
1370 threads = self.process_numa_paired_threads(numa, extra_specs)
1371 cpu_threads += threads
1372
1373 elif "cores" in numa:
1374 cores = self.process_numa_cores(numa, extra_specs)
1375 cpu_cores += cores
1376
1377 elif "threads" in numa:
1378 threads = self.process_numa_threads(numa, extra_specs)
1379 cpu_threads += threads
1380
1381 if cpu_cores:
1382 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1383 if cpu_threads:
1384 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1385
1386 @staticmethod
1387 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1388 """According to number of numa nodes, updates the extra_specs for VIO.
1389
1390 Args:
1391
1392 numa_nodes (int): List keeps the numa node numbers
1393 extra_specs (dict): Extra specs dict to be updated
1394
1395 """
1396 # If there are several numas, we do not define specific affinity.
1397 extra_specs["vmware:latency_sensitivity_level"] = "high"
1398
1399 def _change_flavor_name(
1400 self, name: str, name_suffix: int, flavor_data: dict
1401 ) -> str:
1402 """Change the flavor name if the name already exists.
1403
1404 Args:
1405 name (str): Flavor name to be checked
1406 name_suffix (int): Suffix to be appended to name
1407 flavor_data (dict): Flavor dict
1408
1409 Returns:
1410 name (str): New flavor name to be used
1411
1412 """
1413 # Get used names
1414 fl = self.nova.flavors.list()
1415 fl_names = [f.name for f in fl]
1416
1417 while name in fl_names:
1418 name_suffix += 1
1419 name = flavor_data["name"] + "-" + str(name_suffix)
1420
1421 return name
1422
1423 def _process_extended_config_of_flavor(
1424 self, extended: dict, extra_specs: dict
1425 ) -> None:
1426 """Process the extended dict to fill up extra_specs.
1427 Args:
1428
1429 extended (dict): Keeping the extra specification of flavor
1430 extra_specs (dict) Dict to be filled to be used during flavor creation
1431
1432 """
1433 quotas = {
1434 "cpu-quota": "cpu",
1435 "mem-quota": "memory",
1436 "vif-quota": "vif",
1437 "disk-io-quota": "disk_io",
1438 }
1439
1440 page_sizes = {
1441 "LARGE": "large",
1442 "SMALL": "small",
1443 "SIZE_2MB": "2MB",
1444 "SIZE_1GB": "1GB",
1445 "PREFER_LARGE": "any",
1446 }
1447
1448 policies = {
1449 "cpu-pinning-policy": "hw:cpu_policy",
1450 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1451 "mem-policy": "hw:numa_mempolicy",
1452 }
1453
1454 numas = extended.get("numas")
1455 if numas:
1456 self._process_numa_parameters_of_flavor(numas, extra_specs)
1457
1458 for quota, item in quotas.items():
1459 if quota in extended.keys():
1460 self.process_resource_quota(extended.get(quota), item, extra_specs)
1461
1462 # Set the mempage size as specified in the descriptor
1463 if extended.get("mempage-size"):
1464 if extended["mempage-size"] in page_sizes.keys():
1465 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1466 else:
1467 # Normally, validations in NBI should not allow to this condition.
1468 self.logger.debug(
1469 "Invalid mempage-size %s. Will be ignored",
1470 extended.get("mempage-size"),
1471 )
1472
1473 for policy, hw_policy in policies.items():
1474 if extended.get(policy):
1475 extra_specs[hw_policy] = extended[policy].lower()
1476
1477 @staticmethod
1478 def _get_flavor_details(flavor_data: dict) -> Tuple:
1479 """Returns the details of flavor
1480 Args:
1481 flavor_data (dict): Dictionary that includes required flavor details
1482
1483 Returns:
1484 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1485
1486 """
1487 return (
1488 flavor_data.get("ram", 64),
1489 flavor_data.get("vcpus", 1),
1490 {},
1491 flavor_data.get("extended"),
1492 )
1493
1494 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1495 """Adds a tenant flavor to openstack VIM.
1496 if change_name_if_used is True, it will change name in case of conflict,
1497 because it is not supported name repetition.
1498
1499 Args:
1500 flavor_data (dict): Flavor details to be processed
1501 change_name_if_used (bool): Change name in case of conflict
1502
1503 Returns:
1504 flavor_id (str): flavor identifier
1505
1506 """
1507 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1508 retry = 0
1509 max_retries = 3
1510 name_suffix = 0
1511
1512 try:
1513 name = flavor_data["name"]
1514 while retry < max_retries:
1515 retry += 1
1516 try:
1517 self._reload_connection()
1518
1519 if change_name_if_used:
1520 name = self._change_flavor_name(name, name_suffix, flavor_data)
1521
1522 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1523 flavor_data
1524 )
1525 if extended:
1526 self._process_extended_config_of_flavor(extended, extra_specs)
1527
1528 # Create flavor
1529
1530 new_flavor = self.nova.flavors.create(
1531 name=name,
1532 ram=ram,
1533 vcpus=vcpus,
1534 disk=flavor_data.get("disk", 0),
1535 ephemeral=flavor_data.get("ephemeral", 0),
1536 swap=flavor_data.get("swap", 0),
1537 is_public=flavor_data.get("is_public", True),
1538 )
1539
1540 # Add metadata
1541 if extra_specs:
1542 new_flavor.set_keys(extra_specs)
1543
1544 return new_flavor.id
1545
1546 except nvExceptions.Conflict as e:
1547 if change_name_if_used and retry < max_retries:
1548 continue
1549
1550 self._format_exception(e)
1551
1552 except (
1553 ksExceptions.ClientException,
1554 nvExceptions.ClientException,
1555 ConnectionError,
1556 KeyError,
1557 ) as e:
1558 self._format_exception(e)
1559
1560 def delete_flavor(self, flavor_id):
1561 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1562 try:
1563 self._reload_connection()
1564 self.nova.flavors.delete(flavor_id)
1565
1566 return flavor_id
1567 # except nvExceptions.BadRequest as e:
1568 except (
1569 nvExceptions.NotFound,
1570 ksExceptions.ClientException,
1571 nvExceptions.ClientException,
1572 ConnectionError,
1573 ) as e:
1574 self._format_exception(e)
1575
1576 def new_image(self, image_dict):
1577 """
1578 Adds a tenant image to VIM. imge_dict is a dictionary with:
1579 name: name
1580 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1581 location: path or URI
1582 public: "yes" or "no"
1583 metadata: metadata of the image
1584 Returns the image_id
1585 """
1586 retry = 0
1587 max_retries = 3
1588
1589 while retry < max_retries:
1590 retry += 1
1591 try:
1592 self._reload_connection()
1593
1594 # determine format http://docs.openstack.org/developer/glance/formats.html
1595 if "disk_format" in image_dict:
1596 disk_format = image_dict["disk_format"]
1597 else: # autodiscover based on extension
1598 if image_dict["location"].endswith(".qcow2"):
1599 disk_format = "qcow2"
1600 elif image_dict["location"].endswith(".vhd"):
1601 disk_format = "vhd"
1602 elif image_dict["location"].endswith(".vmdk"):
1603 disk_format = "vmdk"
1604 elif image_dict["location"].endswith(".vdi"):
1605 disk_format = "vdi"
1606 elif image_dict["location"].endswith(".iso"):
1607 disk_format = "iso"
1608 elif image_dict["location"].endswith(".aki"):
1609 disk_format = "aki"
1610 elif image_dict["location"].endswith(".ari"):
1611 disk_format = "ari"
1612 elif image_dict["location"].endswith(".ami"):
1613 disk_format = "ami"
1614 else:
1615 disk_format = "raw"
1616
1617 self.logger.debug(
1618 "new_image: '%s' loading from '%s'",
1619 image_dict["name"],
1620 image_dict["location"],
1621 )
1622 if self.vim_type == "VIO":
1623 container_format = "bare"
1624 if "container_format" in image_dict:
1625 container_format = image_dict["container_format"]
1626
1627 new_image = self.glance.images.create(
1628 name=image_dict["name"],
1629 container_format=container_format,
1630 disk_format=disk_format,
1631 )
1632 else:
1633 new_image = self.glance.images.create(name=image_dict["name"])
1634
1635 if image_dict["location"].startswith("http"):
1636 # TODO there is not a method to direct download. It must be downloaded locally with requests
1637 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1638 else: # local path
1639 with open(image_dict["location"]) as fimage:
1640 self.glance.images.upload(new_image.id, fimage)
1641 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1642 # image_dict.get("public","yes")=="yes",
1643 # container_format="bare", data=fimage, disk_format=disk_format)
1644
1645 metadata_to_load = image_dict.get("metadata")
1646
1647 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1648 # for openstack
1649 if self.vim_type == "VIO":
1650 metadata_to_load["upload_location"] = image_dict["location"]
1651 else:
1652 metadata_to_load["location"] = image_dict["location"]
1653
1654 self.glance.images.update(new_image.id, **metadata_to_load)
1655
1656 return new_image.id
1657 except (
1658 nvExceptions.Conflict,
1659 ksExceptions.ClientException,
1660 nvExceptions.ClientException,
1661 ) as e:
1662 self._format_exception(e)
1663 except (
1664 HTTPException,
1665 gl1Exceptions.HTTPException,
1666 gl1Exceptions.CommunicationError,
1667 ConnectionError,
1668 ) as e:
1669 if retry == max_retries:
1670 continue
1671
1672 self._format_exception(e)
1673 except IOError as e: # can not open the file
1674 raise vimconn.VimConnConnectionException(
1675 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1676 http_code=vimconn.HTTP_Bad_Request,
1677 )
1678
1679 def delete_image(self, image_id):
1680 """Deletes a tenant image from openstack VIM. Returns the old id"""
1681 try:
1682 self._reload_connection()
1683 self.glance.images.delete(image_id)
1684
1685 return image_id
1686 except (
1687 nvExceptions.NotFound,
1688 ksExceptions.ClientException,
1689 nvExceptions.ClientException,
1690 gl1Exceptions.CommunicationError,
1691 gl1Exceptions.HTTPNotFound,
1692 ConnectionError,
1693 ) as e: # TODO remove
1694 self._format_exception(e)
1695
1696 def get_image_id_from_path(self, path):
1697 """Get the image id from image path in the VIM database. Returns the image_id"""
1698 try:
1699 self._reload_connection()
1700 images = self.glance.images.list()
1701
1702 for image in images:
1703 if image.metadata.get("location") == path:
1704 return image.id
1705
1706 raise vimconn.VimConnNotFoundException(
1707 "image with location '{}' not found".format(path)
1708 )
1709 except (
1710 ksExceptions.ClientException,
1711 nvExceptions.ClientException,
1712 gl1Exceptions.CommunicationError,
1713 ConnectionError,
1714 ) as e:
1715 self._format_exception(e)
1716
1717 def get_image_list(self, filter_dict={}):
1718 """Obtain tenant images from VIM
1719 Filter_dict can be:
1720 id: image id
1721 name: image name
1722 checksum: image checksum
1723 Returns the image list of dictionaries:
1724 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1725 List can be empty
1726 """
1727 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1728
1729 try:
1730 self._reload_connection()
1731 # filter_dict_os = filter_dict.copy()
1732 # First we filter by the available filter fields: name, id. The others are removed.
1733 image_list = self.glance.images.list()
1734 filtered_list = []
1735
1736 for image in image_list:
1737 try:
1738 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1739 continue
1740
1741 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1742 continue
1743
1744 if (
1745 filter_dict.get("checksum")
1746 and image["checksum"] != filter_dict["checksum"]
1747 ):
1748 continue
1749
1750 filtered_list.append(image.copy())
1751 except gl1Exceptions.HTTPNotFound:
1752 pass
1753
1754 return filtered_list
1755 except (
1756 ksExceptions.ClientException,
1757 nvExceptions.ClientException,
1758 gl1Exceptions.CommunicationError,
1759 ConnectionError,
1760 ) as e:
1761 self._format_exception(e)
1762
1763 def __wait_for_vm(self, vm_id, status):
1764 """wait until vm is in the desired status and return True.
1765 If the VM gets in ERROR status, return false.
1766 If the timeout is reached generate an exception"""
1767 elapsed_time = 0
1768 while elapsed_time < server_timeout:
1769 vm_status = self.nova.servers.get(vm_id).status
1770
1771 if vm_status == status:
1772 return True
1773
1774 if vm_status == "ERROR":
1775 return False
1776
1777 time.sleep(5)
1778 elapsed_time += 5
1779
1780 # if we exceeded the timeout rollback
1781 if elapsed_time >= server_timeout:
1782 raise vimconn.VimConnException(
1783 "Timeout waiting for instance " + vm_id + " to get " + status,
1784 http_code=vimconn.HTTP_Request_Timeout,
1785 )
1786
1787 def _get_openstack_availablity_zones(self):
1788 """
1789 Get from openstack availability zones available
1790 :return:
1791 """
1792 try:
1793 openstack_availability_zone = self.nova.availability_zones.list()
1794 openstack_availability_zone = [
1795 str(zone.zoneName)
1796 for zone in openstack_availability_zone
1797 if zone.zoneName != "internal"
1798 ]
1799
1800 return openstack_availability_zone
1801 except Exception:
1802 return None
1803
1804 def _set_availablity_zones(self):
1805 """
1806 Set vim availablity zone
1807 :return:
1808 """
1809 if "availability_zone" in self.config:
1810 vim_availability_zones = self.config.get("availability_zone")
1811
1812 if isinstance(vim_availability_zones, str):
1813 self.availability_zone = [vim_availability_zones]
1814 elif isinstance(vim_availability_zones, list):
1815 self.availability_zone = vim_availability_zones
1816 else:
1817 self.availability_zone = self._get_openstack_availablity_zones()
1818
1819 def _get_vm_availability_zone(
1820 self, availability_zone_index, availability_zone_list
1821 ):
1822 """
1823 Return thge availability zone to be used by the created VM.
1824 :return: The VIM availability zone to be used or None
1825 """
1826 if availability_zone_index is None:
1827 if not self.config.get("availability_zone"):
1828 return None
1829 elif isinstance(self.config.get("availability_zone"), str):
1830 return self.config["availability_zone"]
1831 else:
1832 # TODO consider using a different parameter at config for default AV and AV list match
1833 return self.config["availability_zone"][0]
1834
1835 vim_availability_zones = self.availability_zone
1836 # check if VIM offer enough availability zones describe in the VNFD
1837 if vim_availability_zones and len(availability_zone_list) <= len(
1838 vim_availability_zones
1839 ):
1840 # check if all the names of NFV AV match VIM AV names
1841 match_by_index = False
1842 for av in availability_zone_list:
1843 if av not in vim_availability_zones:
1844 match_by_index = True
1845 break
1846
1847 if match_by_index:
1848 return vim_availability_zones[availability_zone_index]
1849 else:
1850 return availability_zone_list[availability_zone_index]
1851 else:
1852 raise vimconn.VimConnConflictException(
1853 "No enough availability zones at VIM for this deployment"
1854 )
1855
1856 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1857 """Fill up the security_groups in the port_dict.
1858
1859 Args:
1860 net (dict): Network details
1861 port_dict (dict): Port details
1862
1863 """
1864 if (
1865 self.config.get("security_groups")
1866 and net.get("port_security") is not False
1867 and not self.config.get("no_port_security_extension")
1868 ):
1869 if not self.security_groups_id:
1870 self._get_ids_from_name()
1871
1872 port_dict["security_groups"] = self.security_groups_id
1873
1874 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1875 """Fill up the network binding depending on network type in the port_dict.
1876
1877 Args:
1878 net (dict): Network details
1879 port_dict (dict): Port details
1880
1881 """
1882 if not net.get("type"):
1883 raise vimconn.VimConnException("Type is missing in the network details.")
1884
1885 if net["type"] == "virtual":
1886 pass
1887
1888 # For VF
1889 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1890 port_dict["binding:vnic_type"] = "direct"
1891
1892 # VIO specific Changes
1893 if self.vim_type == "VIO":
1894 # Need to create port with port_security_enabled = False and no-security-groups
1895 port_dict["port_security_enabled"] = False
1896 port_dict["provider_security_groups"] = []
1897 port_dict["security_groups"] = []
1898
1899 else:
1900 # For PT PCI-PASSTHROUGH
1901 port_dict["binding:vnic_type"] = "direct-physical"
1902
1903 @staticmethod
1904 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1905 """Set the "ip" parameter in net dictionary.
1906
1907 Args:
1908 new_port (dict): New created port
1909 net (dict): Network details
1910
1911 """
1912 fixed_ips = new_port["port"].get("fixed_ips")
1913
1914 if fixed_ips:
1915 net["ip"] = fixed_ips[0].get("ip_address")
1916 else:
1917 net["ip"] = None
1918
1919 @staticmethod
1920 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1921 """Fill up the mac_address and fixed_ips in port_dict.
1922
1923 Args:
1924 net (dict): Network details
1925 port_dict (dict): Port details
1926
1927 """
1928 if net.get("mac_address"):
1929 port_dict["mac_address"] = net["mac_address"]
1930
1931 if net.get("ip_address"):
1932 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1933 # TODO add "subnet_id": <subnet_id>
1934
1935 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1936 """Create new port using neutron.
1937
1938 Args:
1939 port_dict (dict): Port details
1940 created_items (dict): All created items
1941 net (dict): Network details
1942
1943 Returns:
1944 new_port (dict): New created port
1945
1946 """
1947 new_port = self.neutron.create_port({"port": port_dict})
1948 created_items["port:" + str(new_port["port"]["id"])] = True
1949 net["mac_adress"] = new_port["port"]["mac_address"]
1950 net["vim_id"] = new_port["port"]["id"]
1951
1952 return new_port
1953
1954 def _create_port(
1955 self, net: dict, name: str, created_items: dict
1956 ) -> Tuple[dict, dict]:
1957 """Create port using net details.
1958
1959 Args:
1960 net (dict): Network details
1961 name (str): Name to be used as network name if net dict does not include name
1962 created_items (dict): All created items
1963
1964 Returns:
1965 new_port, port New created port, port dictionary
1966
1967 """
1968
1969 port_dict = {
1970 "network_id": net["net_id"],
1971 "name": net.get("name"),
1972 "admin_state_up": True,
1973 }
1974
1975 if not port_dict["name"]:
1976 port_dict["name"] = name
1977
1978 self._prepare_port_dict_security_groups(net, port_dict)
1979
1980 self._prepare_port_dict_binding(net, port_dict)
1981
1982 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1983
1984 new_port = self._create_new_port(port_dict, created_items, net)
1985
1986 vimconnector._set_fixed_ip(new_port, net)
1987
1988 port = {"port-id": new_port["port"]["id"]}
1989
1990 if float(self.nova.api_version.get_string()) >= 2.32:
1991 port["tag"] = new_port["port"]["name"]
1992
1993 return new_port, port
1994
1995 def _prepare_network_for_vminstance(
1996 self,
1997 name: str,
1998 net_list: list,
1999 created_items: dict,
2000 net_list_vim: list,
2001 external_network: list,
2002 no_secured_ports: list,
2003 ) -> None:
2004 """Create port and fill up net dictionary for new VM instance creation.
2005
2006 Args:
2007 name (str): Name of network
2008 net_list (list): List of networks
2009 created_items (dict): All created items belongs to a VM
2010 net_list_vim (list): List of ports
2011 external_network (list): List of external-networks
2012 no_secured_ports (list): Port security disabled ports
2013 """
2014
2015 self._reload_connection()
2016
2017 for net in net_list:
2018 # Skip non-connected iface
2019 if not net.get("net_id"):
2020 continue
2021
2022 new_port, port = self._create_port(net, name, created_items)
2023
2024 net_list_vim.append(port)
2025
2026 if net.get("floating_ip", False):
2027 net["exit_on_floating_ip_error"] = True
2028 external_network.append(net)
2029
2030 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2031 net["exit_on_floating_ip_error"] = False
2032 external_network.append(net)
2033 net["floating_ip"] = self.config.get("use_floating_ip")
2034
2035 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2036 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2037 if net.get("port_security") is False and not self.config.get(
2038 "no_port_security_extension"
2039 ):
2040 no_secured_ports.append(
2041 (
2042 new_port["port"]["id"],
2043 net.get("port_security_disable_strategy"),
2044 )
2045 )
2046
2047 def _prepare_persistent_root_volumes(
2048 self,
2049 name: str,
2050 vm_av_zone: list,
2051 disk: dict,
2052 base_disk_index: int,
2053 block_device_mapping: dict,
2054 existing_vim_volumes: list,
2055 created_items: dict,
2056 ) -> Optional[str]:
2057 """Prepare persistent root volumes for new VM instance.
2058
2059 Args:
2060 name (str): Name of VM instance
2061 vm_av_zone (list): List of availability zones
2062 disk (dict): Disk details
2063 base_disk_index (int): Disk index
2064 block_device_mapping (dict): Block device details
2065 existing_vim_volumes (list): Existing disk details
2066 created_items (dict): All created items belongs to VM
2067
2068 Returns:
2069 boot_volume_id (str): ID of boot volume
2070
2071 """
2072 # Disk may include only vim_volume_id or only vim_id."
2073 # Use existing persistent root volume finding with volume_id or vim_id
2074 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2075
2076 if disk.get(key_id):
2077 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2078 existing_vim_volumes.append({"id": disk[key_id]})
2079
2080 else:
2081 # Create persistent root volume
2082 volume = self.cinder.volumes.create(
2083 size=disk["size"],
2084 name=name + "vd" + chr(base_disk_index),
2085 imageRef=disk["image_id"],
2086 # Make sure volume is in the same AZ as the VM to be attached to
2087 availability_zone=vm_av_zone,
2088 )
2089 boot_volume_id = volume.id
2090 created_items["volume:" + str(volume.id)] = True
2091 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2092
2093 return boot_volume_id
2094
2095 def _prepare_non_root_persistent_volumes(
2096 self,
2097 name: str,
2098 disk: dict,
2099 vm_av_zone: list,
2100 block_device_mapping: dict,
2101 base_disk_index: int,
2102 existing_vim_volumes: list,
2103 created_items: dict,
2104 ) -> None:
2105 """Prepare persistent volumes for new VM instance.
2106
2107 Args:
2108 name (str): Name of VM instance
2109 disk (dict): Disk details
2110 vm_av_zone (list): List of availability zones
2111 block_device_mapping (dict): Block device details
2112 base_disk_index (int): Disk index
2113 existing_vim_volumes (list): Existing disk details
2114 created_items (dict): All created items belongs to VM
2115 """
2116 # Non-root persistent volumes
2117 # Disk may include only vim_volume_id or only vim_id."
2118 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2119
2120 if disk.get(key_id):
2121 # Use existing persistent volume
2122 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2123 existing_vim_volumes.append({"id": disk[key_id]})
2124
2125 else:
2126 # Create persistent volume
2127 volume = self.cinder.volumes.create(
2128 size=disk["size"],
2129 name=name + "vd" + chr(base_disk_index),
2130 # Make sure volume is in the same AZ as the VM to be attached to
2131 availability_zone=vm_av_zone,
2132 )
2133 created_items["volume:" + str(volume.id)] = True
2134 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2135
2136 def _wait_for_created_volumes_availability(
2137 self, elapsed_time: int, created_items: dict
2138 ) -> Optional[int]:
2139 """Wait till created volumes become available.
2140
2141 Args:
2142 elapsed_time (int): Passed time while waiting
2143 created_items (dict): All created items belongs to VM
2144
2145 Returns:
2146 elapsed_time (int): Time spent while waiting
2147
2148 """
2149
2150 while elapsed_time < volume_timeout:
2151 for created_item in created_items:
2152 v, _, volume_id = created_item.partition(":")
2153 if v == "volume":
2154 if self.cinder.volumes.get(volume_id).status != "available":
2155 break
2156 else:
2157 # All ready: break from while
2158 break
2159
2160 time.sleep(5)
2161 elapsed_time += 5
2162
2163 return elapsed_time
2164
2165 def _wait_for_existing_volumes_availability(
2166 self, elapsed_time: int, existing_vim_volumes: list
2167 ) -> Optional[int]:
2168 """Wait till existing volumes become available.
2169
2170 Args:
2171 elapsed_time (int): Passed time while waiting
2172 existing_vim_volumes (list): Existing volume details
2173
2174 Returns:
2175 elapsed_time (int): Time spent while waiting
2176
2177 """
2178
2179 while elapsed_time < volume_timeout:
2180 for volume in existing_vim_volumes:
2181 if self.cinder.volumes.get(volume["id"]).status != "available":
2182 break
2183 else: # all ready: break from while
2184 break
2185
2186 time.sleep(5)
2187 elapsed_time += 5
2188
2189 return elapsed_time
2190
2191 def _prepare_disk_for_vminstance(
2192 self,
2193 name: str,
2194 existing_vim_volumes: list,
2195 created_items: dict,
2196 vm_av_zone: list,
2197 disk_list: list = None,
2198 ) -> None:
2199 """Prepare all volumes for new VM instance.
2200
2201 Args:
2202 name (str): Name of Instance
2203 existing_vim_volumes (list): List of existing volumes
2204 created_items (dict): All created items belongs to VM
2205 vm_av_zone (list): VM availability zone
2206 disk_list (list): List of disks
2207
2208 """
2209 # Create additional volumes in case these are present in disk_list
2210 base_disk_index = ord("b")
2211 boot_volume_id = None
2212 elapsed_time = 0
2213
2214 block_device_mapping = {}
2215 for disk in disk_list:
2216 if "image_id" in disk:
2217 # Root persistent volume
2218 base_disk_index = ord("a")
2219 boot_volume_id = self._prepare_persistent_root_volumes(
2220 name=name,
2221 vm_av_zone=vm_av_zone,
2222 disk=disk,
2223 base_disk_index=base_disk_index,
2224 block_device_mapping=block_device_mapping,
2225 existing_vim_volumes=existing_vim_volumes,
2226 created_items=created_items,
2227 )
2228 else:
2229 # Non-root persistent volume
2230 self._prepare_non_root_persistent_volumes(
2231 name=name,
2232 disk=disk,
2233 vm_av_zone=vm_av_zone,
2234 block_device_mapping=block_device_mapping,
2235 base_disk_index=base_disk_index,
2236 existing_vim_volumes=existing_vim_volumes,
2237 created_items=created_items,
2238 )
2239 base_disk_index += 1
2240
2241 # Wait until created volumes are with status available
2242 elapsed_time = self._wait_for_created_volumes_availability(
2243 elapsed_time, created_items
2244 )
2245 # Wait until existing volumes in vim are with status available
2246 elapsed_time = self._wait_for_existing_volumes_availability(
2247 elapsed_time, existing_vim_volumes
2248 )
2249 # If we exceeded the timeout rollback
2250 if elapsed_time >= volume_timeout:
2251 raise vimconn.VimConnException(
2252 "Timeout creating volumes for instance " + name,
2253 http_code=vimconn.HTTP_Request_Timeout,
2254 )
2255 if boot_volume_id:
2256 self.cinder.volumes.set_bootable(boot_volume_id, True)
2257
2258 def _find_the_external_network_for_floating_ip(self):
2259 """Get the external network ip in order to create floating IP.
2260
2261 Returns:
2262 pool_id (str): External network pool ID
2263
2264 """
2265
2266 # Find the external network
2267 external_nets = list()
2268
2269 for net in self.neutron.list_networks()["networks"]:
2270 if net["router:external"]:
2271 external_nets.append(net)
2272
2273 if len(external_nets) == 0:
2274 raise vimconn.VimConnException(
2275 "Cannot create floating_ip automatically since "
2276 "no external network is present",
2277 http_code=vimconn.HTTP_Conflict,
2278 )
2279
2280 if len(external_nets) > 1:
2281 raise vimconn.VimConnException(
2282 "Cannot create floating_ip automatically since "
2283 "multiple external networks are present",
2284 http_code=vimconn.HTTP_Conflict,
2285 )
2286
2287 # Pool ID
2288 return external_nets[0].get("id")
2289
2290 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2291 """Trigger neutron to create a new floating IP using external network ID.
2292
2293 Args:
2294 param (dict): Input parameters to create a floating IP
2295 created_items (dict): All created items belongs to new VM instance
2296
2297 Raises:
2298
2299 VimConnException
2300 """
2301 try:
2302 self.logger.debug("Creating floating IP")
2303 new_floating_ip = self.neutron.create_floatingip(param)
2304 free_floating_ip = new_floating_ip["floatingip"]["id"]
2305 created_items["floating_ip:" + str(free_floating_ip)] = True
2306
2307 except Exception as e:
2308 raise vimconn.VimConnException(
2309 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2310 http_code=vimconn.HTTP_Conflict,
2311 )
2312
2313 def _create_floating_ip(
2314 self, floating_network: dict, server: object, created_items: dict
2315 ) -> None:
2316 """Get the available Pool ID and create a new floating IP.
2317
2318 Args:
2319 floating_network (dict): Dict including external network ID
2320 server (object): Server object
2321 created_items (dict): All created items belongs to new VM instance
2322
2323 """
2324
2325 # Pool_id is available
2326 if (
2327 isinstance(floating_network["floating_ip"], str)
2328 and floating_network["floating_ip"].lower() != "true"
2329 ):
2330 pool_id = floating_network["floating_ip"]
2331
2332 # Find the Pool_id
2333 else:
2334 pool_id = self._find_the_external_network_for_floating_ip()
2335
2336 param = {
2337 "floatingip": {
2338 "floating_network_id": pool_id,
2339 "tenant_id": server.tenant_id,
2340 }
2341 }
2342
2343 self._neutron_create_float_ip(param, created_items)
2344
2345 def _find_floating_ip(
2346 self,
2347 server: object,
2348 floating_ips: list,
2349 floating_network: dict,
2350 ) -> Optional[str]:
2351 """Find the available free floating IPs if there are.
2352
2353 Args:
2354 server (object): Server object
2355 floating_ips (list): List of floating IPs
2356 floating_network (dict): Details of floating network such as ID
2357
2358 Returns:
2359 free_floating_ip (str): Free floating ip address
2360
2361 """
2362 for fip in floating_ips:
2363 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2364 continue
2365
2366 if isinstance(floating_network["floating_ip"], str):
2367 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2368 continue
2369
2370 return fip["id"]
2371
2372 def _assign_floating_ip(
2373 self, free_floating_ip: str, floating_network: dict
2374 ) -> Dict:
2375 """Assign the free floating ip address to port.
2376
2377 Args:
2378 free_floating_ip (str): Floating IP to be assigned
2379 floating_network (dict): ID of floating network
2380
2381 Returns:
2382 fip (dict) (dict): Floating ip details
2383
2384 """
2385 # The vim_id key contains the neutron.port_id
2386 self.neutron.update_floatingip(
2387 free_floating_ip,
2388 {"floatingip": {"port_id": floating_network["vim_id"]}},
2389 )
2390 # For race condition ensure not re-assigned to other VM after 5 seconds
2391 time.sleep(5)
2392
2393 return self.neutron.show_floatingip(free_floating_ip)
2394
2395 def _get_free_floating_ip(
2396 self, server: object, floating_network: dict
2397 ) -> Optional[str]:
2398 """Get the free floating IP address.
2399
2400 Args:
2401 server (object): Server Object
2402 floating_network (dict): Floating network details
2403
2404 Returns:
2405 free_floating_ip (str): Free floating ip addr
2406
2407 """
2408
2409 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2410
2411 # Randomize
2412 random.shuffle(floating_ips)
2413
2414 return self._find_floating_ip(server, floating_ips, floating_network)
2415
2416 def _prepare_external_network_for_vminstance(
2417 self,
2418 external_network: list,
2419 server: object,
2420 created_items: dict,
2421 vm_start_time: float,
2422 ) -> None:
2423 """Assign floating IP address for VM instance.
2424
2425 Args:
2426 external_network (list): ID of External network
2427 server (object): Server Object
2428 created_items (dict): All created items belongs to new VM instance
2429 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2430
2431 Raises:
2432 VimConnException
2433
2434 """
2435 for floating_network in external_network:
2436 try:
2437 assigned = False
2438 floating_ip_retries = 3
2439 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2440 # several times
2441 while not assigned:
2442 free_floating_ip = self._get_free_floating_ip(
2443 server, floating_network
2444 )
2445
2446 if not free_floating_ip:
2447 self._create_floating_ip(
2448 floating_network, server, created_items
2449 )
2450
2451 try:
2452 # For race condition ensure not already assigned
2453 fip = self.neutron.show_floatingip(free_floating_ip)
2454
2455 if fip["floatingip"].get("port_id"):
2456 continue
2457
2458 # Assign floating ip
2459 fip = self._assign_floating_ip(
2460 free_floating_ip, floating_network
2461 )
2462
2463 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2464 self.logger.warning(
2465 "floating_ip {} re-assigned to other port".format(
2466 free_floating_ip
2467 )
2468 )
2469 continue
2470
2471 self.logger.debug(
2472 "Assigned floating_ip {} to VM {}".format(
2473 free_floating_ip, server.id
2474 )
2475 )
2476
2477 assigned = True
2478
2479 except Exception as e:
2480 # Openstack need some time after VM creation to assign an IP. So retry if fails
2481 vm_status = self.nova.servers.get(server.id).status
2482
2483 if vm_status not in ("ACTIVE", "ERROR"):
2484 if time.time() - vm_start_time < server_timeout:
2485 time.sleep(5)
2486 continue
2487 elif floating_ip_retries > 0:
2488 floating_ip_retries -= 1
2489 continue
2490
2491 raise vimconn.VimConnException(
2492 "Cannot create floating_ip: {} {}".format(
2493 type(e).__name__, e
2494 ),
2495 http_code=vimconn.HTTP_Conflict,
2496 )
2497
2498 except Exception as e:
2499 if not floating_network["exit_on_floating_ip_error"]:
2500 self.logger.error("Cannot create floating_ip. %s", str(e))
2501 continue
2502
2503 raise
2504
2505 def _update_port_security_for_vminstance(
2506 self,
2507 no_secured_ports: list,
2508 server: object,
2509 ) -> None:
2510 """Updates the port security according to no_secured_ports list.
2511
2512 Args:
2513 no_secured_ports (list): List of ports that security will be disabled
2514 server (object): Server Object
2515
2516 Raises:
2517 VimConnException
2518
2519 """
2520 # Wait until the VM is active and then disable the port-security
2521 if no_secured_ports:
2522 self.__wait_for_vm(server.id, "ACTIVE")
2523
2524 for port in no_secured_ports:
2525 port_update = {
2526 "port": {"port_security_enabled": False, "security_groups": None}
2527 }
2528
2529 if port[1] == "allow-address-pairs":
2530 port_update = {
2531 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2532 }
2533
2534 try:
2535 self.neutron.update_port(port[0], port_update)
2536
2537 except Exception:
2538 raise vimconn.VimConnException(
2539 "It was not possible to disable port security for port {}".format(
2540 port[0]
2541 )
2542 )
2543
2544 def new_vminstance(
2545 self,
2546 name: str,
2547 description: str,
2548 start: bool,
2549 image_id: str,
2550 flavor_id: str,
2551 affinity_group_list: list,
2552 net_list: list,
2553 cloud_config=None,
2554 disk_list=None,
2555 availability_zone_index=None,
2556 availability_zone_list=None,
2557 ) -> tuple:
2558 """Adds a VM instance to VIM.
2559
2560 Args:
2561 name (str): name of VM
2562 description (str): description
2563 start (bool): indicates if VM must start or boot in pause mode. Ignored
2564 image_id (str) image uuid
2565 flavor_id (str) flavor uuid
2566 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2567 net_list (list): list of interfaces, each one is a dictionary with:
2568 name: name of network
2569 net_id: network uuid to connect
2570 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2571 model: interface model, ignored #TODO
2572 mac_address: used for SR-IOV ifaces #TODO for other types
2573 use: 'data', 'bridge', 'mgmt'
2574 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2575 vim_id: filled/added by this function
2576 floating_ip: True/False (or it can be None)
2577 port_security: True/False
2578 cloud_config (dict): (optional) dictionary with:
2579 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2580 users: (optional) list of users to be inserted, each item is a dict with:
2581 name: (mandatory) user name,
2582 key-pairs: (optional) list of strings with the public key to be inserted to the user
2583 user-data: (optional) string is a text script to be passed directly to cloud-init
2584 config-files: (optional). List of files to be transferred. Each item is a dict with:
2585 dest: (mandatory) string with the destination absolute path
2586 encoding: (optional, by default text). Can be one of:
2587 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2588 content : (mandatory) string with the content of the file
2589 permissions: (optional) string with file permissions, typically octal notation '0644'
2590 owner: (optional) file owner, string with the format 'owner:group'
2591 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2592 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2593 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2594 size: (mandatory) string with the size of the disk in GB
2595 vim_id: (optional) should use this existing volume id
2596 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2597 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2598 availability_zone_index is None
2599 #TODO ip, security groups
2600
2601 Returns:
2602 A tuple with the instance identifier and created_items or raises an exception on error
2603 created_items can be None or a dictionary where this method can include key-values that will be passed to
2604 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2605 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2606 as not present.
2607 """
2608 self.logger.debug(
2609 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2610 image_id,
2611 flavor_id,
2612 str(net_list),
2613 )
2614
2615 try:
2616 server = None
2617 created_items = {}
2618 net_list_vim = []
2619 # list of external networks to be connected to instance, later on used to create floating_ip
2620 external_network = []
2621 # List of ports with port-security disabled
2622 no_secured_ports = []
2623 block_device_mapping = None
2624 existing_vim_volumes = []
2625 server_group_id = None
2626 scheduller_hints = {}
2627
2628 # Check the Openstack Connection
2629 self._reload_connection()
2630
2631 # Prepare network list
2632 self._prepare_network_for_vminstance(
2633 name=name,
2634 net_list=net_list,
2635 created_items=created_items,
2636 net_list_vim=net_list_vim,
2637 external_network=external_network,
2638 no_secured_ports=no_secured_ports,
2639 )
2640
2641 # Cloud config
2642 config_drive, userdata = self._create_user_data(cloud_config)
2643
2644 # Get availability Zone
2645 vm_av_zone = self._get_vm_availability_zone(
2646 availability_zone_index, availability_zone_list
2647 )
2648
2649 if disk_list:
2650 # Prepare disks
2651 self._prepare_disk_for_vminstance(
2652 name=name,
2653 existing_vim_volumes=existing_vim_volumes,
2654 created_items=created_items,
2655 vm_av_zone=vm_av_zone,
2656 disk_list=disk_list,
2657 )
2658
2659 if affinity_group_list:
2660 # Only first id on the list will be used. Openstack restriction
2661 server_group_id = affinity_group_list[0]["affinity_group_id"]
2662 scheduller_hints["group"] = server_group_id
2663
2664 self.logger.debug(
2665 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2666 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2667 "block_device_mapping={}, server_group={})".format(
2668 name,
2669 image_id,
2670 flavor_id,
2671 net_list_vim,
2672 self.config.get("security_groups"),
2673 vm_av_zone,
2674 self.config.get("keypair"),
2675 userdata,
2676 config_drive,
2677 block_device_mapping,
2678 server_group_id,
2679 )
2680 )
2681
2682 # Create VM
2683 server = self.nova.servers.create(
2684 name=name,
2685 image=image_id,
2686 flavor=flavor_id,
2687 nics=net_list_vim,
2688 security_groups=self.config.get("security_groups"),
2689 # TODO remove security_groups in future versions. Already at neutron port
2690 availability_zone=vm_av_zone,
2691 key_name=self.config.get("keypair"),
2692 userdata=userdata,
2693 config_drive=config_drive,
2694 block_device_mapping=block_device_mapping,
2695 scheduler_hints=scheduller_hints,
2696 )
2697
2698 vm_start_time = time.time()
2699
2700 self._update_port_security_for_vminstance(no_secured_ports, server)
2701
2702 self._prepare_external_network_for_vminstance(
2703 external_network=external_network,
2704 server=server,
2705 created_items=created_items,
2706 vm_start_time=vm_start_time,
2707 )
2708
2709 return server.id, created_items
2710
2711 except Exception as e:
2712 server_id = None
2713 if server:
2714 server_id = server.id
2715
2716 try:
2717 self.delete_vminstance(server_id, created_items)
2718
2719 except Exception as e2:
2720 self.logger.error("new_vminstance rollback fail {}".format(e2))
2721
2722 self._format_exception(e)
2723
2724 def get_vminstance(self, vm_id):
2725 """Returns the VM instance information from VIM"""
2726 # self.logger.debug("Getting VM from VIM")
2727 try:
2728 self._reload_connection()
2729 server = self.nova.servers.find(id=vm_id)
2730 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2731
2732 return server.to_dict()
2733 except (
2734 ksExceptions.ClientException,
2735 nvExceptions.ClientException,
2736 nvExceptions.NotFound,
2737 ConnectionError,
2738 ) as e:
2739 self._format_exception(e)
2740
2741 def get_vminstance_console(self, vm_id, console_type="vnc"):
2742 """
2743 Get a console for the virtual machine
2744 Params:
2745 vm_id: uuid of the VM
2746 console_type, can be:
2747 "novnc" (by default), "xvpvnc" for VNC types,
2748 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2749 Returns dict with the console parameters:
2750 protocol: ssh, ftp, http, https, ...
2751 server: usually ip address
2752 port: the http, ssh, ... port
2753 suffix: extra text, e.g. the http path and query string
2754 """
2755 self.logger.debug("Getting VM CONSOLE from VIM")
2756
2757 try:
2758 self._reload_connection()
2759 server = self.nova.servers.find(id=vm_id)
2760
2761 if console_type is None or console_type == "novnc":
2762 console_dict = server.get_vnc_console("novnc")
2763 elif console_type == "xvpvnc":
2764 console_dict = server.get_vnc_console(console_type)
2765 elif console_type == "rdp-html5":
2766 console_dict = server.get_rdp_console(console_type)
2767 elif console_type == "spice-html5":
2768 console_dict = server.get_spice_console(console_type)
2769 else:
2770 raise vimconn.VimConnException(
2771 "console type '{}' not allowed".format(console_type),
2772 http_code=vimconn.HTTP_Bad_Request,
2773 )
2774
2775 console_dict1 = console_dict.get("console")
2776
2777 if console_dict1:
2778 console_url = console_dict1.get("url")
2779
2780 if console_url:
2781 # parse console_url
2782 protocol_index = console_url.find("//")
2783 suffix_index = (
2784 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2785 )
2786 port_index = (
2787 console_url[protocol_index + 2 : suffix_index].find(":")
2788 + protocol_index
2789 + 2
2790 )
2791
2792 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2793 return (
2794 -vimconn.HTTP_Internal_Server_Error,
2795 "Unexpected response from VIM",
2796 )
2797
2798 console_dict = {
2799 "protocol": console_url[0:protocol_index],
2800 "server": console_url[protocol_index + 2 : port_index],
2801 "port": console_url[port_index:suffix_index],
2802 "suffix": console_url[suffix_index + 1 :],
2803 }
2804 protocol_index += 2
2805
2806 return console_dict
2807 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2808 except (
2809 nvExceptions.NotFound,
2810 ksExceptions.ClientException,
2811 nvExceptions.ClientException,
2812 nvExceptions.BadRequest,
2813 ConnectionError,
2814 ) as e:
2815 self._format_exception(e)
2816
2817 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2818 """Neutron delete ports by id.
2819 Args:
2820 k_id (str): Port id in the VIM
2821 """
2822 try:
2823 port_dict = self.neutron.list_ports()
2824 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2825
2826 if k_id in existing_ports:
2827 self.neutron.delete_port(k_id)
2828
2829 except Exception as e:
2830 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2831
2832 def _delete_volumes_by_id_wth_cinder(
2833 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2834 ) -> bool:
2835 """Cinder delete volume by id.
2836 Args:
2837 k (str): Full item name in created_items
2838 k_id (str): ID of floating ip in VIM
2839 volumes_to_hold (list): Volumes not to delete
2840 created_items (dict): All created items belongs to VM
2841 """
2842 try:
2843 if k_id in volumes_to_hold:
2844 return
2845
2846 if self.cinder.volumes.get(k_id).status != "available":
2847 return True
2848
2849 else:
2850 self.cinder.volumes.delete(k_id)
2851 created_items[k] = None
2852
2853 except Exception as e:
2854 self.logger.error(
2855 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2856 )
2857
2858 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2859 """Neutron delete floating ip by id.
2860 Args:
2861 k (str): Full item name in created_items
2862 k_id (str): ID of floating ip in VIM
2863 created_items (dict): All created items belongs to VM
2864 """
2865 try:
2866 self.neutron.delete_floatingip(k_id)
2867 created_items[k] = None
2868
2869 except Exception as e:
2870 self.logger.error(
2871 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2872 )
2873
2874 @staticmethod
2875 def _get_item_name_id(k: str) -> Tuple[str, str]:
2876 k_item, _, k_id = k.partition(":")
2877 return k_item, k_id
2878
2879 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2880 """Delete VM ports attached to the networks before deleting virtual machine.
2881 Args:
2882 created_items (dict): All created items belongs to VM
2883 """
2884
2885 for k, v in created_items.items():
2886 if not v: # skip already deleted
2887 continue
2888
2889 try:
2890 k_item, k_id = self._get_item_name_id(k)
2891 if k_item == "port":
2892 self._delete_ports_by_id_wth_neutron(k_id)
2893
2894 except Exception as e:
2895 self.logger.error(
2896 "Error deleting port: {}: {}".format(type(e).__name__, e)
2897 )
2898
2899 def _delete_created_items(
2900 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2901 ) -> bool:
2902 """Delete Volumes and floating ip if they exist in created_items."""
2903 for k, v in created_items.items():
2904 if not v: # skip already deleted
2905 continue
2906
2907 try:
2908 k_item, k_id = self._get_item_name_id(k)
2909
2910 if k_item == "volume":
2911 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2912 k, k_id, volumes_to_hold, created_items
2913 )
2914
2915 if unavailable_vol:
2916 keep_waiting = True
2917
2918 elif k_item == "floating_ip":
2919 self._delete_floating_ip_by_id(k, k_id, created_items)
2920
2921 except Exception as e:
2922 self.logger.error("Error deleting {}: {}".format(k, e))
2923
2924 return keep_waiting
2925
2926 def delete_vminstance(
2927 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
2928 ) -> None:
2929 """Removes a VM instance from VIM. Returns the old identifier.
2930 Args:
2931 vm_id (str): Identifier of VM instance
2932 created_items (dict): All created items belongs to VM
2933 volumes_to_hold (list): Volumes_to_hold
2934 """
2935 if created_items is None:
2936 created_items = {}
2937 if volumes_to_hold is None:
2938 volumes_to_hold = []
2939
2940 try:
2941 self._reload_connection()
2942
2943 # Delete VM ports attached to the networks before the virtual machine
2944 if created_items:
2945 self._delete_vm_ports_attached_to_network(created_items)
2946
2947 if vm_id:
2948 self.nova.servers.delete(vm_id)
2949
2950 # Although having detached, volumes should have in active status before deleting.
2951 # We ensure in this loop
2952 keep_waiting = True
2953 elapsed_time = 0
2954
2955 while keep_waiting and elapsed_time < volume_timeout:
2956 keep_waiting = False
2957
2958 # Delete volumes and floating IP.
2959 keep_waiting = self._delete_created_items(
2960 created_items, volumes_to_hold, keep_waiting
2961 )
2962
2963 if keep_waiting:
2964 time.sleep(1)
2965 elapsed_time += 1
2966
2967 except (
2968 nvExceptions.NotFound,
2969 ksExceptions.ClientException,
2970 nvExceptions.ClientException,
2971 ConnectionError,
2972 ) as e:
2973 self._format_exception(e)
2974
2975 def refresh_vms_status(self, vm_list):
2976 """Get the status of the virtual machines and their interfaces/ports
2977 Params: the list of VM identifiers
2978 Returns a dictionary with:
2979 vm_id: #VIM id of this Virtual Machine
2980 status: #Mandatory. Text with one of:
2981 # DELETED (not found at vim)
2982 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2983 # OTHER (Vim reported other status not understood)
2984 # ERROR (VIM indicates an ERROR status)
2985 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2986 # CREATING (on building process), ERROR
2987 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2988 #
2989 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2990 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2991 interfaces:
2992 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2993 mac_address: #Text format XX:XX:XX:XX:XX:XX
2994 vim_net_id: #network id where this interface is connected
2995 vim_interface_id: #interface/port VIM id
2996 ip_address: #null, or text with IPv4, IPv6 address
2997 compute_node: #identification of compute node where PF,VF interface is allocated
2998 pci: #PCI address of the NIC that hosts the PF,VF
2999 vlan: #physical VLAN used for VF
3000 """
3001 vm_dict = {}
3002 self.logger.debug(
3003 "refresh_vms status: Getting tenant VM instance information from VIM"
3004 )
3005
3006 for vm_id in vm_list:
3007 vm = {}
3008
3009 try:
3010 vm_vim = self.get_vminstance(vm_id)
3011
3012 if vm_vim["status"] in vmStatus2manoFormat:
3013 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3014 else:
3015 vm["status"] = "OTHER"
3016 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3017
3018 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3019 vm_vim.pop("user_data", None)
3020 vm["vim_info"] = self.serialize(vm_vim)
3021
3022 vm["interfaces"] = []
3023 if vm_vim.get("fault"):
3024 vm["error_msg"] = str(vm_vim["fault"])
3025
3026 # get interfaces
3027 try:
3028 self._reload_connection()
3029 port_dict = self.neutron.list_ports(device_id=vm_id)
3030
3031 for port in port_dict["ports"]:
3032 interface = {}
3033 interface["vim_info"] = self.serialize(port)
3034 interface["mac_address"] = port.get("mac_address")
3035 interface["vim_net_id"] = port["network_id"]
3036 interface["vim_interface_id"] = port["id"]
3037 # check if OS-EXT-SRV-ATTR:host is there,
3038 # in case of non-admin credentials, it will be missing
3039
3040 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3041 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3042
3043 interface["pci"] = None
3044
3045 # check if binding:profile is there,
3046 # in case of non-admin credentials, it will be missing
3047 if port.get("binding:profile"):
3048 if port["binding:profile"].get("pci_slot"):
3049 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3050 # the slot to 0x00
3051 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3052 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3053 pci = port["binding:profile"]["pci_slot"]
3054 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3055 interface["pci"] = pci
3056
3057 interface["vlan"] = None
3058
3059 if port.get("binding:vif_details"):
3060 interface["vlan"] = port["binding:vif_details"].get("vlan")
3061
3062 # Get vlan from network in case not present in port for those old openstacks and cases where
3063 # it is needed vlan at PT
3064 if not interface["vlan"]:
3065 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3066 network = self.neutron.show_network(port["network_id"])
3067
3068 if (
3069 network["network"].get("provider:network_type")
3070 == "vlan"
3071 ):
3072 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3073 interface["vlan"] = network["network"].get(
3074 "provider:segmentation_id"
3075 )
3076
3077 ips = []
3078 # look for floating ip address
3079 try:
3080 floating_ip_dict = self.neutron.list_floatingips(
3081 port_id=port["id"]
3082 )
3083
3084 if floating_ip_dict.get("floatingips"):
3085 ips.append(
3086 floating_ip_dict["floatingips"][0].get(
3087 "floating_ip_address"
3088 )
3089 )
3090 except Exception:
3091 pass
3092
3093 for subnet in port["fixed_ips"]:
3094 ips.append(subnet["ip_address"])
3095
3096 interface["ip_address"] = ";".join(ips)
3097 vm["interfaces"].append(interface)
3098 except Exception as e:
3099 self.logger.error(
3100 "Error getting vm interface information {}: {}".format(
3101 type(e).__name__, e
3102 ),
3103 exc_info=True,
3104 )
3105 except vimconn.VimConnNotFoundException as e:
3106 self.logger.error("Exception getting vm status: %s", str(e))
3107 vm["status"] = "DELETED"
3108 vm["error_msg"] = str(e)
3109 except vimconn.VimConnException as e:
3110 self.logger.error("Exception getting vm status: %s", str(e))
3111 vm["status"] = "VIM_ERROR"
3112 vm["error_msg"] = str(e)
3113
3114 vm_dict[vm_id] = vm
3115
3116 return vm_dict
3117
3118 def action_vminstance(self, vm_id, action_dict, created_items={}):
3119 """Send and action over a VM instance from VIM
3120 Returns None or the console dict if the action was successfully sent to the VIM
3121 """
3122 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3123
3124 try:
3125 self._reload_connection()
3126 server = self.nova.servers.find(id=vm_id)
3127
3128 if "start" in action_dict:
3129 if action_dict["start"] == "rebuild":
3130 server.rebuild()
3131 else:
3132 if server.status == "PAUSED":
3133 server.unpause()
3134 elif server.status == "SUSPENDED":
3135 server.resume()
3136 elif server.status == "SHUTOFF":
3137 server.start()
3138 else:
3139 self.logger.debug(
3140 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3141 )
3142 raise vimconn.VimConnException(
3143 "Cannot 'start' instance while it is in active state",
3144 http_code=vimconn.HTTP_Bad_Request,
3145 )
3146
3147 elif "pause" in action_dict:
3148 server.pause()
3149 elif "resume" in action_dict:
3150 server.resume()
3151 elif "shutoff" in action_dict or "shutdown" in action_dict:
3152 self.logger.debug("server status %s", server.status)
3153 if server.status == "ACTIVE":
3154 server.stop()
3155 else:
3156 self.logger.debug("ERROR: VM is not in Active state")
3157 raise vimconn.VimConnException(
3158 "VM is not in active state, stop operation is not allowed",
3159 http_code=vimconn.HTTP_Bad_Request,
3160 )
3161 elif "forceOff" in action_dict:
3162 server.stop() # TODO
3163 elif "terminate" in action_dict:
3164 server.delete()
3165 elif "createImage" in action_dict:
3166 server.create_image()
3167 # "path":path_schema,
3168 # "description":description_schema,
3169 # "name":name_schema,
3170 # "metadata":metadata_schema,
3171 # "imageRef": id_schema,
3172 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3173 elif "rebuild" in action_dict:
3174 server.rebuild(server.image["id"])
3175 elif "reboot" in action_dict:
3176 server.reboot() # reboot_type="SOFT"
3177 elif "console" in action_dict:
3178 console_type = action_dict["console"]
3179
3180 if console_type is None or console_type == "novnc":
3181 console_dict = server.get_vnc_console("novnc")
3182 elif console_type == "xvpvnc":
3183 console_dict = server.get_vnc_console(console_type)
3184 elif console_type == "rdp-html5":
3185 console_dict = server.get_rdp_console(console_type)
3186 elif console_type == "spice-html5":
3187 console_dict = server.get_spice_console(console_type)
3188 else:
3189 raise vimconn.VimConnException(
3190 "console type '{}' not allowed".format(console_type),
3191 http_code=vimconn.HTTP_Bad_Request,
3192 )
3193
3194 try:
3195 console_url = console_dict["console"]["url"]
3196 # parse console_url
3197 protocol_index = console_url.find("//")
3198 suffix_index = (
3199 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3200 )
3201 port_index = (
3202 console_url[protocol_index + 2 : suffix_index].find(":")
3203 + protocol_index
3204 + 2
3205 )
3206
3207 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3208 raise vimconn.VimConnException(
3209 "Unexpected response from VIM " + str(console_dict)
3210 )
3211
3212 console_dict2 = {
3213 "protocol": console_url[0:protocol_index],
3214 "server": console_url[protocol_index + 2 : port_index],
3215 "port": int(console_url[port_index + 1 : suffix_index]),
3216 "suffix": console_url[suffix_index + 1 :],
3217 }
3218
3219 return console_dict2
3220 except Exception:
3221 raise vimconn.VimConnException(
3222 "Unexpected response from VIM " + str(console_dict)
3223 )
3224
3225 return None
3226 except (
3227 ksExceptions.ClientException,
3228 nvExceptions.ClientException,
3229 nvExceptions.NotFound,
3230 ConnectionError,
3231 ) as e:
3232 self._format_exception(e)
3233 # TODO insert exception vimconn.HTTP_Unauthorized
3234
3235 # ###### VIO Specific Changes #########
3236 def _generate_vlanID(self):
3237 """
3238 Method to get unused vlanID
3239 Args:
3240 None
3241 Returns:
3242 vlanID
3243 """
3244 # Get used VLAN IDs
3245 usedVlanIDs = []
3246 networks = self.get_network_list()
3247
3248 for net in networks:
3249 if net.get("provider:segmentation_id"):
3250 usedVlanIDs.append(net.get("provider:segmentation_id"))
3251
3252 used_vlanIDs = set(usedVlanIDs)
3253
3254 # find unused VLAN ID
3255 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3256 try:
3257 start_vlanid, end_vlanid = map(
3258 int, vlanID_range.replace(" ", "").split("-")
3259 )
3260
3261 for vlanID in range(start_vlanid, end_vlanid + 1):
3262 if vlanID not in used_vlanIDs:
3263 return vlanID
3264 except Exception as exp:
3265 raise vimconn.VimConnException(
3266 "Exception {} occurred while generating VLAN ID.".format(exp)
3267 )
3268 else:
3269 raise vimconn.VimConnConflictException(
3270 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3271 self.config.get("dataplane_net_vlan_range")
3272 )
3273 )
3274
3275 def _generate_multisegment_vlanID(self):
3276 """
3277 Method to get unused vlanID
3278 Args:
3279 None
3280 Returns:
3281 vlanID
3282 """
3283 # Get used VLAN IDs
3284 usedVlanIDs = []
3285 networks = self.get_network_list()
3286 for net in networks:
3287 if net.get("provider:network_type") == "vlan" and net.get(
3288 "provider:segmentation_id"
3289 ):
3290 usedVlanIDs.append(net.get("provider:segmentation_id"))
3291 elif net.get("segments"):
3292 for segment in net.get("segments"):
3293 if segment.get("provider:network_type") == "vlan" and segment.get(
3294 "provider:segmentation_id"
3295 ):
3296 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3297
3298 used_vlanIDs = set(usedVlanIDs)
3299
3300 # find unused VLAN ID
3301 for vlanID_range in self.config.get("multisegment_vlan_range"):
3302 try:
3303 start_vlanid, end_vlanid = map(
3304 int, vlanID_range.replace(" ", "").split("-")
3305 )
3306
3307 for vlanID in range(start_vlanid, end_vlanid + 1):
3308 if vlanID not in used_vlanIDs:
3309 return vlanID
3310 except Exception as exp:
3311 raise vimconn.VimConnException(
3312 "Exception {} occurred while generating VLAN ID.".format(exp)
3313 )
3314 else:
3315 raise vimconn.VimConnConflictException(
3316 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3317 self.config.get("multisegment_vlan_range")
3318 )
3319 )
3320
3321 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3322 """
3323 Method to validate user given vlanID ranges
3324 Args: None
3325 Returns: None
3326 """
3327 for vlanID_range in input_vlan_range:
3328 vlan_range = vlanID_range.replace(" ", "")
3329 # validate format
3330 vlanID_pattern = r"(\d)*-(\d)*$"
3331 match_obj = re.match(vlanID_pattern, vlan_range)
3332 if not match_obj:
3333 raise vimconn.VimConnConflictException(
3334 "Invalid VLAN range for {}: {}.You must provide "
3335 "'{}' in format [start_ID - end_ID].".format(
3336 text_vlan_range, vlanID_range, text_vlan_range
3337 )
3338 )
3339
3340 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3341 if start_vlanid <= 0:
3342 raise vimconn.VimConnConflictException(
3343 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3344 "networks valid IDs are 1 to 4094 ".format(
3345 text_vlan_range, vlanID_range
3346 )
3347 )
3348
3349 if end_vlanid > 4094:
3350 raise vimconn.VimConnConflictException(
3351 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3352 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3353 text_vlan_range, vlanID_range
3354 )
3355 )
3356
3357 if start_vlanid > end_vlanid:
3358 raise vimconn.VimConnConflictException(
3359 "Invalid VLAN range for {}: {}. You must provide '{}'"
3360 " in format start_ID - end_ID and start_ID < end_ID ".format(
3361 text_vlan_range, vlanID_range, text_vlan_range
3362 )
3363 )
3364
3365 # NOT USED FUNCTIONS
3366
3367 def new_external_port(self, port_data):
3368 """Adds a external port to VIM
3369 Returns the port identifier"""
3370 # TODO openstack if needed
3371 return (
3372 -vimconn.HTTP_Internal_Server_Error,
3373 "osconnector.new_external_port() not implemented",
3374 )
3375
3376 def connect_port_network(self, port_id, network_id, admin=False):
3377 """Connects a external port to a network
3378 Returns status code of the VIM response"""
3379 # TODO openstack if needed
3380 return (
3381 -vimconn.HTTP_Internal_Server_Error,
3382 "osconnector.connect_port_network() not implemented",
3383 )
3384
3385 def new_user(self, user_name, user_passwd, tenant_id=None):
3386 """Adds a new user to openstack VIM
3387 Returns the user identifier"""
3388 self.logger.debug("osconnector: Adding a new user to VIM")
3389
3390 try:
3391 self._reload_connection()
3392 user = self.keystone.users.create(
3393 user_name, password=user_passwd, default_project=tenant_id
3394 )
3395 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3396
3397 return user.id
3398 except ksExceptions.ConnectionError as e:
3399 error_value = -vimconn.HTTP_Bad_Request
3400 error_text = (
3401 type(e).__name__
3402 + ": "
3403 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3404 )
3405 except ksExceptions.ClientException as e: # TODO remove
3406 error_value = -vimconn.HTTP_Bad_Request
3407 error_text = (
3408 type(e).__name__
3409 + ": "
3410 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3411 )
3412
3413 # TODO insert exception vimconn.HTTP_Unauthorized
3414 # if reaching here is because an exception
3415 self.logger.debug("new_user " + error_text)
3416
3417 return error_value, error_text
3418
3419 def delete_user(self, user_id):
3420 """Delete a user from openstack VIM
3421 Returns the user identifier"""
3422 if self.debug:
3423 print("osconnector: Deleting a user from VIM")
3424
3425 try:
3426 self._reload_connection()
3427 self.keystone.users.delete(user_id)
3428
3429 return 1, user_id
3430 except ksExceptions.ConnectionError as e:
3431 error_value = -vimconn.HTTP_Bad_Request
3432 error_text = (
3433 type(e).__name__
3434 + ": "
3435 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3436 )
3437 except ksExceptions.NotFound as e:
3438 error_value = -vimconn.HTTP_Not_Found
3439 error_text = (
3440 type(e).__name__
3441 + ": "
3442 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3443 )
3444 except ksExceptions.ClientException as e: # TODO remove
3445 error_value = -vimconn.HTTP_Bad_Request
3446 error_text = (
3447 type(e).__name__
3448 + ": "
3449 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3450 )
3451
3452 # TODO insert exception vimconn.HTTP_Unauthorized
3453 # if reaching here is because an exception
3454 self.logger.debug("delete_tenant " + error_text)
3455
3456 return error_value, error_text
3457
3458 def get_hosts_info(self):
3459 """Get the information of deployed hosts
3460 Returns the hosts content"""
3461 if self.debug:
3462 print("osconnector: Getting Host info from VIM")
3463
3464 try:
3465 h_list = []
3466 self._reload_connection()
3467 hypervisors = self.nova.hypervisors.list()
3468
3469 for hype in hypervisors:
3470 h_list.append(hype.to_dict())
3471
3472 return 1, {"hosts": h_list}
3473 except nvExceptions.NotFound as e:
3474 error_value = -vimconn.HTTP_Not_Found
3475 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3476 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3477 error_value = -vimconn.HTTP_Bad_Request
3478 error_text = (
3479 type(e).__name__
3480 + ": "
3481 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3482 )
3483
3484 # TODO insert exception vimconn.HTTP_Unauthorized
3485 # if reaching here is because an exception
3486 self.logger.debug("get_hosts_info " + error_text)
3487
3488 return error_value, error_text
3489
3490 def get_hosts(self, vim_tenant):
3491 """Get the hosts and deployed instances
3492 Returns the hosts content"""
3493 r, hype_dict = self.get_hosts_info()
3494
3495 if r < 0:
3496 return r, hype_dict
3497
3498 hypervisors = hype_dict["hosts"]
3499
3500 try:
3501 servers = self.nova.servers.list()
3502 for hype in hypervisors:
3503 for server in servers:
3504 if (
3505 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3506 == hype["hypervisor_hostname"]
3507 ):
3508 if "vm" in hype:
3509 hype["vm"].append(server.id)
3510 else:
3511 hype["vm"] = [server.id]
3512
3513 return 1, hype_dict
3514 except nvExceptions.NotFound as e:
3515 error_value = -vimconn.HTTP_Not_Found
3516 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3517 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3518 error_value = -vimconn.HTTP_Bad_Request
3519 error_text = (
3520 type(e).__name__
3521 + ": "
3522 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3523 )
3524
3525 # TODO insert exception vimconn.HTTP_Unauthorized
3526 # if reaching here is because an exception
3527 self.logger.debug("get_hosts " + error_text)
3528
3529 return error_value, error_text
3530
3531 def new_classification(self, name, ctype, definition):
3532 self.logger.debug(
3533 "Adding a new (Traffic) Classification to VIM, named %s", name
3534 )
3535
3536 try:
3537 new_class = None
3538 self._reload_connection()
3539
3540 if ctype not in supportedClassificationTypes:
3541 raise vimconn.VimConnNotSupportedException(
3542 "OpenStack VIM connector does not support provided "
3543 "Classification Type {}, supported ones are: {}".format(
3544 ctype, supportedClassificationTypes
3545 )
3546 )
3547
3548 if not self._validate_classification(ctype, definition):
3549 raise vimconn.VimConnException(
3550 "Incorrect Classification definition for the type specified."
3551 )
3552
3553 classification_dict = definition
3554 classification_dict["name"] = name
3555 new_class = self.neutron.create_sfc_flow_classifier(
3556 {"flow_classifier": classification_dict}
3557 )
3558
3559 return new_class["flow_classifier"]["id"]
3560 except (
3561 neExceptions.ConnectionFailed,
3562 ksExceptions.ClientException,
3563 neExceptions.NeutronException,
3564 ConnectionError,
3565 ) as e:
3566 self.logger.error("Creation of Classification failed.")
3567 self._format_exception(e)
3568
3569 def get_classification(self, class_id):
3570 self.logger.debug(" Getting Classification %s from VIM", class_id)
3571 filter_dict = {"id": class_id}
3572 class_list = self.get_classification_list(filter_dict)
3573
3574 if len(class_list) == 0:
3575 raise vimconn.VimConnNotFoundException(
3576 "Classification '{}' not found".format(class_id)
3577 )
3578 elif len(class_list) > 1:
3579 raise vimconn.VimConnConflictException(
3580 "Found more than one Classification with this criteria"
3581 )
3582
3583 classification = class_list[0]
3584
3585 return classification
3586
3587 def get_classification_list(self, filter_dict={}):
3588 self.logger.debug(
3589 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3590 )
3591
3592 try:
3593 filter_dict_os = filter_dict.copy()
3594 self._reload_connection()
3595
3596 if self.api_version3 and "tenant_id" in filter_dict_os:
3597 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3598
3599 classification_dict = self.neutron.list_sfc_flow_classifiers(
3600 **filter_dict_os
3601 )
3602 classification_list = classification_dict["flow_classifiers"]
3603 self.__classification_os2mano(classification_list)
3604
3605 return classification_list
3606 except (
3607 neExceptions.ConnectionFailed,
3608 ksExceptions.ClientException,
3609 neExceptions.NeutronException,
3610 ConnectionError,
3611 ) as e:
3612 self._format_exception(e)
3613
3614 def delete_classification(self, class_id):
3615 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3616
3617 try:
3618 self._reload_connection()
3619 self.neutron.delete_sfc_flow_classifier(class_id)
3620
3621 return class_id
3622 except (
3623 neExceptions.ConnectionFailed,
3624 neExceptions.NeutronException,
3625 ksExceptions.ClientException,
3626 neExceptions.NeutronException,
3627 ConnectionError,
3628 ) as e:
3629 self._format_exception(e)
3630
3631 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3632 self.logger.debug(
3633 "Adding a new Service Function Instance to VIM, named '%s'", name
3634 )
3635
3636 try:
3637 new_sfi = None
3638 self._reload_connection()
3639 correlation = None
3640
3641 if sfc_encap:
3642 correlation = "nsh"
3643
3644 if len(ingress_ports) != 1:
3645 raise vimconn.VimConnNotSupportedException(
3646 "OpenStack VIM connector can only have 1 ingress port per SFI"
3647 )
3648
3649 if len(egress_ports) != 1:
3650 raise vimconn.VimConnNotSupportedException(
3651 "OpenStack VIM connector can only have 1 egress port per SFI"
3652 )
3653
3654 sfi_dict = {
3655 "name": name,
3656 "ingress": ingress_ports[0],
3657 "egress": egress_ports[0],
3658 "service_function_parameters": {"correlation": correlation},
3659 }
3660 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3661
3662 return new_sfi["port_pair"]["id"]
3663 except (
3664 neExceptions.ConnectionFailed,
3665 ksExceptions.ClientException,
3666 neExceptions.NeutronException,
3667 ConnectionError,
3668 ) as e:
3669 if new_sfi:
3670 try:
3671 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3672 except Exception:
3673 self.logger.error(
3674 "Creation of Service Function Instance failed, with "
3675 "subsequent deletion failure as well."
3676 )
3677
3678 self._format_exception(e)
3679
3680 def get_sfi(self, sfi_id):
3681 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3682 filter_dict = {"id": sfi_id}
3683 sfi_list = self.get_sfi_list(filter_dict)
3684
3685 if len(sfi_list) == 0:
3686 raise vimconn.VimConnNotFoundException(
3687 "Service Function Instance '{}' not found".format(sfi_id)
3688 )
3689 elif len(sfi_list) > 1:
3690 raise vimconn.VimConnConflictException(
3691 "Found more than one Service Function Instance with this criteria"
3692 )
3693
3694 sfi = sfi_list[0]
3695
3696 return sfi
3697
3698 def get_sfi_list(self, filter_dict={}):
3699 self.logger.debug(
3700 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3701 )
3702
3703 try:
3704 self._reload_connection()
3705 filter_dict_os = filter_dict.copy()
3706
3707 if self.api_version3 and "tenant_id" in filter_dict_os:
3708 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3709
3710 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3711 sfi_list = sfi_dict["port_pairs"]
3712 self.__sfi_os2mano(sfi_list)
3713
3714 return sfi_list
3715 except (
3716 neExceptions.ConnectionFailed,
3717 ksExceptions.ClientException,
3718 neExceptions.NeutronException,
3719 ConnectionError,
3720 ) as e:
3721 self._format_exception(e)
3722
3723 def delete_sfi(self, sfi_id):
3724 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3725
3726 try:
3727 self._reload_connection()
3728 self.neutron.delete_sfc_port_pair(sfi_id)
3729
3730 return sfi_id
3731 except (
3732 neExceptions.ConnectionFailed,
3733 neExceptions.NeutronException,
3734 ksExceptions.ClientException,
3735 neExceptions.NeutronException,
3736 ConnectionError,
3737 ) as e:
3738 self._format_exception(e)
3739
3740 def new_sf(self, name, sfis, sfc_encap=True):
3741 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3742
3743 try:
3744 new_sf = None
3745 self._reload_connection()
3746 # correlation = None
3747 # if sfc_encap:
3748 # correlation = "nsh"
3749
3750 for instance in sfis:
3751 sfi = self.get_sfi(instance)
3752
3753 if sfi.get("sfc_encap") != sfc_encap:
3754 raise vimconn.VimConnNotSupportedException(
3755 "OpenStack VIM connector requires all SFIs of the "
3756 "same SF to share the same SFC Encapsulation"
3757 )
3758
3759 sf_dict = {"name": name, "port_pairs": sfis}
3760 new_sf = self.neutron.create_sfc_port_pair_group(
3761 {"port_pair_group": sf_dict}
3762 )
3763
3764 return new_sf["port_pair_group"]["id"]
3765 except (
3766 neExceptions.ConnectionFailed,
3767 ksExceptions.ClientException,
3768 neExceptions.NeutronException,
3769 ConnectionError,
3770 ) as e:
3771 if new_sf:
3772 try:
3773 self.neutron.delete_sfc_port_pair_group(
3774 new_sf["port_pair_group"]["id"]
3775 )
3776 except Exception:
3777 self.logger.error(
3778 "Creation of Service Function failed, with "
3779 "subsequent deletion failure as well."
3780 )
3781
3782 self._format_exception(e)
3783
3784 def get_sf(self, sf_id):
3785 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3786 filter_dict = {"id": sf_id}
3787 sf_list = self.get_sf_list(filter_dict)
3788
3789 if len(sf_list) == 0:
3790 raise vimconn.VimConnNotFoundException(
3791 "Service Function '{}' not found".format(sf_id)
3792 )
3793 elif len(sf_list) > 1:
3794 raise vimconn.VimConnConflictException(
3795 "Found more than one Service Function with this criteria"
3796 )
3797
3798 sf = sf_list[0]
3799
3800 return sf
3801
3802 def get_sf_list(self, filter_dict={}):
3803 self.logger.debug(
3804 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3805 )
3806
3807 try:
3808 self._reload_connection()
3809 filter_dict_os = filter_dict.copy()
3810
3811 if self.api_version3 and "tenant_id" in filter_dict_os:
3812 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3813
3814 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3815 sf_list = sf_dict["port_pair_groups"]
3816 self.__sf_os2mano(sf_list)
3817
3818 return sf_list
3819 except (
3820 neExceptions.ConnectionFailed,
3821 ksExceptions.ClientException,
3822 neExceptions.NeutronException,
3823 ConnectionError,
3824 ) as e:
3825 self._format_exception(e)
3826
3827 def delete_sf(self, sf_id):
3828 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3829
3830 try:
3831 self._reload_connection()
3832 self.neutron.delete_sfc_port_pair_group(sf_id)
3833
3834 return sf_id
3835 except (
3836 neExceptions.ConnectionFailed,
3837 neExceptions.NeutronException,
3838 ksExceptions.ClientException,
3839 neExceptions.NeutronException,
3840 ConnectionError,
3841 ) as e:
3842 self._format_exception(e)
3843
3844 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3845 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3846
3847 try:
3848 new_sfp = None
3849 self._reload_connection()
3850 # In networking-sfc the MPLS encapsulation is legacy
3851 # should be used when no full SFC Encapsulation is intended
3852 correlation = "mpls"
3853
3854 if sfc_encap:
3855 correlation = "nsh"
3856
3857 sfp_dict = {
3858 "name": name,
3859 "flow_classifiers": classifications,
3860 "port_pair_groups": sfs,
3861 "chain_parameters": {"correlation": correlation},
3862 }
3863
3864 if spi:
3865 sfp_dict["chain_id"] = spi
3866
3867 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3868
3869 return new_sfp["port_chain"]["id"]
3870 except (
3871 neExceptions.ConnectionFailed,
3872 ksExceptions.ClientException,
3873 neExceptions.NeutronException,
3874 ConnectionError,
3875 ) as e:
3876 if new_sfp:
3877 try:
3878 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3879 except Exception:
3880 self.logger.error(
3881 "Creation of Service Function Path failed, with "
3882 "subsequent deletion failure as well."
3883 )
3884
3885 self._format_exception(e)
3886
3887 def get_sfp(self, sfp_id):
3888 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3889
3890 filter_dict = {"id": sfp_id}
3891 sfp_list = self.get_sfp_list(filter_dict)
3892
3893 if len(sfp_list) == 0:
3894 raise vimconn.VimConnNotFoundException(
3895 "Service Function Path '{}' not found".format(sfp_id)
3896 )
3897 elif len(sfp_list) > 1:
3898 raise vimconn.VimConnConflictException(
3899 "Found more than one Service Function Path with this criteria"
3900 )
3901
3902 sfp = sfp_list[0]
3903
3904 return sfp
3905
3906 def get_sfp_list(self, filter_dict={}):
3907 self.logger.debug(
3908 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3909 )
3910
3911 try:
3912 self._reload_connection()
3913 filter_dict_os = filter_dict.copy()
3914
3915 if self.api_version3 and "tenant_id" in filter_dict_os:
3916 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3917
3918 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3919 sfp_list = sfp_dict["port_chains"]
3920 self.__sfp_os2mano(sfp_list)
3921
3922 return sfp_list
3923 except (
3924 neExceptions.ConnectionFailed,
3925 ksExceptions.ClientException,
3926 neExceptions.NeutronException,
3927 ConnectionError,
3928 ) as e:
3929 self._format_exception(e)
3930
3931 def delete_sfp(self, sfp_id):
3932 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3933
3934 try:
3935 self._reload_connection()
3936 self.neutron.delete_sfc_port_chain(sfp_id)
3937
3938 return sfp_id
3939 except (
3940 neExceptions.ConnectionFailed,
3941 neExceptions.NeutronException,
3942 ksExceptions.ClientException,
3943 neExceptions.NeutronException,
3944 ConnectionError,
3945 ) as e:
3946 self._format_exception(e)
3947
3948 def refresh_sfps_status(self, sfp_list):
3949 """Get the status of the service function path
3950 Params: the list of sfp identifiers
3951 Returns a dictionary with:
3952 vm_id: #VIM id of this service function path
3953 status: #Mandatory. Text with one of:
3954 # DELETED (not found at vim)
3955 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3956 # OTHER (Vim reported other status not understood)
3957 # ERROR (VIM indicates an ERROR status)
3958 # ACTIVE,
3959 # CREATING (on building process)
3960 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3961 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3962 """
3963 sfp_dict = {}
3964 self.logger.debug(
3965 "refresh_sfps status: Getting tenant SFP information from VIM"
3966 )
3967
3968 for sfp_id in sfp_list:
3969 sfp = {}
3970
3971 try:
3972 sfp_vim = self.get_sfp(sfp_id)
3973
3974 if sfp_vim["spi"]:
3975 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3976 else:
3977 sfp["status"] = "OTHER"
3978 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3979
3980 sfp["vim_info"] = self.serialize(sfp_vim)
3981
3982 if sfp_vim.get("fault"):
3983 sfp["error_msg"] = str(sfp_vim["fault"])
3984 except vimconn.VimConnNotFoundException as e:
3985 self.logger.error("Exception getting sfp status: %s", str(e))
3986 sfp["status"] = "DELETED"
3987 sfp["error_msg"] = str(e)
3988 except vimconn.VimConnException as e:
3989 self.logger.error("Exception getting sfp status: %s", str(e))
3990 sfp["status"] = "VIM_ERROR"
3991 sfp["error_msg"] = str(e)
3992
3993 sfp_dict[sfp_id] = sfp
3994
3995 return sfp_dict
3996
3997 def refresh_sfis_status(self, sfi_list):
3998 """Get the status of the service function instances
3999 Params: the list of sfi identifiers
4000 Returns a dictionary with:
4001 vm_id: #VIM id of this service function instance
4002 status: #Mandatory. Text with one of:
4003 # DELETED (not found at vim)
4004 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4005 # OTHER (Vim reported other status not understood)
4006 # ERROR (VIM indicates an ERROR status)
4007 # ACTIVE,
4008 # CREATING (on building process)
4009 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4010 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4011 """
4012 sfi_dict = {}
4013 self.logger.debug(
4014 "refresh_sfis status: Getting tenant sfi information from VIM"
4015 )
4016
4017 for sfi_id in sfi_list:
4018 sfi = {}
4019
4020 try:
4021 sfi_vim = self.get_sfi(sfi_id)
4022
4023 if sfi_vim:
4024 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4025 else:
4026 sfi["status"] = "OTHER"
4027 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4028
4029 sfi["vim_info"] = self.serialize(sfi_vim)
4030
4031 if sfi_vim.get("fault"):
4032 sfi["error_msg"] = str(sfi_vim["fault"])
4033 except vimconn.VimConnNotFoundException as e:
4034 self.logger.error("Exception getting sfi status: %s", str(e))
4035 sfi["status"] = "DELETED"
4036 sfi["error_msg"] = str(e)
4037 except vimconn.VimConnException as e:
4038 self.logger.error("Exception getting sfi status: %s", str(e))
4039 sfi["status"] = "VIM_ERROR"
4040 sfi["error_msg"] = str(e)
4041
4042 sfi_dict[sfi_id] = sfi
4043
4044 return sfi_dict
4045
4046 def refresh_sfs_status(self, sf_list):
4047 """Get the status of the service functions
4048 Params: the list of sf identifiers
4049 Returns a dictionary with:
4050 vm_id: #VIM id of this service function
4051 status: #Mandatory. Text with one of:
4052 # DELETED (not found at vim)
4053 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4054 # OTHER (Vim reported other status not understood)
4055 # ERROR (VIM indicates an ERROR status)
4056 # ACTIVE,
4057 # CREATING (on building process)
4058 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4059 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4060 """
4061 sf_dict = {}
4062 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4063
4064 for sf_id in sf_list:
4065 sf = {}
4066
4067 try:
4068 sf_vim = self.get_sf(sf_id)
4069
4070 if sf_vim:
4071 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4072 else:
4073 sf["status"] = "OTHER"
4074 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4075
4076 sf["vim_info"] = self.serialize(sf_vim)
4077
4078 if sf_vim.get("fault"):
4079 sf["error_msg"] = str(sf_vim["fault"])
4080 except vimconn.VimConnNotFoundException as e:
4081 self.logger.error("Exception getting sf status: %s", str(e))
4082 sf["status"] = "DELETED"
4083 sf["error_msg"] = str(e)
4084 except vimconn.VimConnException as e:
4085 self.logger.error("Exception getting sf status: %s", str(e))
4086 sf["status"] = "VIM_ERROR"
4087 sf["error_msg"] = str(e)
4088
4089 sf_dict[sf_id] = sf
4090
4091 return sf_dict
4092
4093 def refresh_classifications_status(self, classification_list):
4094 """Get the status of the classifications
4095 Params: the list of classification identifiers
4096 Returns a dictionary with:
4097 vm_id: #VIM id of this classifier
4098 status: #Mandatory. Text with one of:
4099 # DELETED (not found at vim)
4100 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4101 # OTHER (Vim reported other status not understood)
4102 # ERROR (VIM indicates an ERROR status)
4103 # ACTIVE,
4104 # CREATING (on building process)
4105 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4106 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4107 """
4108 classification_dict = {}
4109 self.logger.debug(
4110 "refresh_classifications status: Getting tenant classification information from VIM"
4111 )
4112
4113 for classification_id in classification_list:
4114 classification = {}
4115
4116 try:
4117 classification_vim = self.get_classification(classification_id)
4118
4119 if classification_vim:
4120 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4121 else:
4122 classification["status"] = "OTHER"
4123 classification["error_msg"] = (
4124 "VIM status reported " + classification["status"]
4125 )
4126
4127 classification["vim_info"] = self.serialize(classification_vim)
4128
4129 if classification_vim.get("fault"):
4130 classification["error_msg"] = str(classification_vim["fault"])
4131 except vimconn.VimConnNotFoundException as e:
4132 self.logger.error("Exception getting classification status: %s", str(e))
4133 classification["status"] = "DELETED"
4134 classification["error_msg"] = str(e)
4135 except vimconn.VimConnException as e:
4136 self.logger.error("Exception getting classification status: %s", str(e))
4137 classification["status"] = "VIM_ERROR"
4138 classification["error_msg"] = str(e)
4139
4140 classification_dict[classification_id] = classification
4141
4142 return classification_dict
4143
4144 def new_affinity_group(self, affinity_group_data):
4145 """Adds a server group to VIM
4146 affinity_group_data contains a dictionary with information, keys:
4147 name: name in VIM for the server group
4148 type: affinity or anti-affinity
4149 scope: Only nfvi-node allowed
4150 Returns the server group identifier"""
4151 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4152
4153 try:
4154 name = affinity_group_data["name"]
4155 policy = affinity_group_data["type"]
4156
4157 self._reload_connection()
4158 new_server_group = self.nova.server_groups.create(name, policy)
4159
4160 return new_server_group.id
4161 except (
4162 ksExceptions.ClientException,
4163 nvExceptions.ClientException,
4164 ConnectionError,
4165 KeyError,
4166 ) as e:
4167 self._format_exception(e)
4168
4169 def get_affinity_group(self, affinity_group_id):
4170 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4171 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4172 try:
4173 self._reload_connection()
4174 server_group = self.nova.server_groups.find(id=affinity_group_id)
4175
4176 return server_group.to_dict()
4177 except (
4178 nvExceptions.NotFound,
4179 nvExceptions.ClientException,
4180 ksExceptions.ClientException,
4181 ConnectionError,
4182 ) as e:
4183 self._format_exception(e)
4184
4185 def delete_affinity_group(self, affinity_group_id):
4186 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4187 self.logger.debug("Getting server group '%s'", affinity_group_id)
4188 try:
4189 self._reload_connection()
4190 self.nova.server_groups.delete(affinity_group_id)
4191
4192 return affinity_group_id
4193 except (
4194 nvExceptions.NotFound,
4195 ksExceptions.ClientException,
4196 nvExceptions.ClientException,
4197 ConnectionError,
4198 ) as e:
4199 self._format_exception(e)
4200
4201 def get_vdu_state(self, vm_id):
4202 """
4203 Getting the state of a vdu
4204 param:
4205 vm_id: ID of an instance
4206 """
4207 self.logger.debug("Getting the status of VM")
4208 self.logger.debug("VIM VM ID %s", vm_id)
4209 self._reload_connection()
4210 server = self.nova.servers.find(id=vm_id)
4211 server_dict = server.to_dict()
4212 vdu_data = [
4213 server_dict["status"],
4214 server_dict["flavor"]["id"],
4215 server_dict["OS-EXT-SRV-ATTR:host"],
4216 server_dict["OS-EXT-AZ:availability_zone"],
4217 ]
4218 self.logger.debug("vdu_data %s", vdu_data)
4219 return vdu_data
4220
4221 def check_compute_availability(self, host, server_flavor_details):
4222 self._reload_connection()
4223 hypervisor_search = self.nova.hypervisors.search(
4224 hypervisor_match=host, servers=True
4225 )
4226 for hypervisor in hypervisor_search:
4227 hypervisor_id = hypervisor.to_dict()["id"]
4228 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4229 hypervisor_dict = hypervisor_details.to_dict()
4230 hypervisor_temp = json.dumps(hypervisor_dict)
4231 hypervisor_json = json.loads(hypervisor_temp)
4232 resources_available = [
4233 hypervisor_json["free_ram_mb"],
4234 hypervisor_json["disk_available_least"],
4235 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4236 ]
4237 compute_available = all(
4238 x > y for x, y in zip(resources_available, server_flavor_details)
4239 )
4240 if compute_available:
4241 return host
4242
4243 def check_availability_zone(
4244 self, old_az, server_flavor_details, old_host, host=None
4245 ):
4246 self._reload_connection()
4247 az_check = {"zone_check": False, "compute_availability": None}
4248 aggregates_list = self.nova.aggregates.list()
4249 for aggregate in aggregates_list:
4250 aggregate_details = aggregate.to_dict()
4251 aggregate_temp = json.dumps(aggregate_details)
4252 aggregate_json = json.loads(aggregate_temp)
4253 if aggregate_json["availability_zone"] == old_az:
4254 hosts_list = aggregate_json["hosts"]
4255 if host is not None:
4256 if host in hosts_list:
4257 az_check["zone_check"] = True
4258 available_compute_id = self.check_compute_availability(
4259 host, server_flavor_details
4260 )
4261 if available_compute_id is not None:
4262 az_check["compute_availability"] = available_compute_id
4263 else:
4264 for check_host in hosts_list:
4265 if check_host != old_host:
4266 available_compute_id = self.check_compute_availability(
4267 check_host, server_flavor_details
4268 )
4269 if available_compute_id is not None:
4270 az_check["zone_check"] = True
4271 az_check["compute_availability"] = available_compute_id
4272 break
4273 else:
4274 az_check["zone_check"] = True
4275 return az_check
4276
4277 def migrate_instance(self, vm_id, compute_host=None):
4278 """
4279 Migrate a vdu
4280 param:
4281 vm_id: ID of an instance
4282 compute_host: Host to migrate the vdu to
4283 """
4284 self._reload_connection()
4285 vm_state = False
4286 instance_state = self.get_vdu_state(vm_id)
4287 server_flavor_id = instance_state[1]
4288 server_hypervisor_name = instance_state[2]
4289 server_availability_zone = instance_state[3]
4290 try:
4291 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4292 server_flavor_details = [
4293 server_flavor["ram"],
4294 server_flavor["disk"],
4295 server_flavor["vcpus"],
4296 ]
4297 if compute_host == server_hypervisor_name:
4298 raise vimconn.VimConnException(
4299 "Unable to migrate instance '{}' to the same host '{}'".format(
4300 vm_id, compute_host
4301 ),
4302 http_code=vimconn.HTTP_Bad_Request,
4303 )
4304 az_status = self.check_availability_zone(
4305 server_availability_zone,
4306 server_flavor_details,
4307 server_hypervisor_name,
4308 compute_host,
4309 )
4310 availability_zone_check = az_status["zone_check"]
4311 available_compute_id = az_status.get("compute_availability")
4312
4313 if availability_zone_check is False:
4314 raise vimconn.VimConnException(
4315 "Unable to migrate instance '{}' to a different availability zone".format(
4316 vm_id
4317 ),
4318 http_code=vimconn.HTTP_Bad_Request,
4319 )
4320 if available_compute_id is not None:
4321 self.nova.servers.live_migrate(
4322 server=vm_id,
4323 host=available_compute_id,
4324 block_migration=True,
4325 disk_over_commit=False,
4326 )
4327 state = "MIGRATING"
4328 changed_compute_host = ""
4329 if state == "MIGRATING":
4330 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4331 changed_compute_host = self.get_vdu_state(vm_id)[2]
4332 if vm_state and changed_compute_host == available_compute_id:
4333 self.logger.debug(
4334 "Instance '{}' migrated to the new compute host '{}'".format(
4335 vm_id, changed_compute_host
4336 )
4337 )
4338 return state, available_compute_id
4339 else:
4340 raise vimconn.VimConnException(
4341 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4342 vm_id, available_compute_id
4343 ),
4344 http_code=vimconn.HTTP_Bad_Request,
4345 )
4346 else:
4347 raise vimconn.VimConnException(
4348 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4349 available_compute_id
4350 ),
4351 http_code=vimconn.HTTP_Bad_Request,
4352 )
4353 except (
4354 nvExceptions.BadRequest,
4355 nvExceptions.ClientException,
4356 nvExceptions.NotFound,
4357 ) as e:
4358 self._format_exception(e)
4359
4360 def resize_instance(self, vm_id, new_flavor_id):
4361 """
4362 For resizing the vm based on the given
4363 flavor details
4364 param:
4365 vm_id : ID of an instance
4366 new_flavor_id : Flavor id to be resized
4367 Return the status of a resized instance
4368 """
4369 self._reload_connection()
4370 self.logger.debug("resize the flavor of an instance")
4371 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4372 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4373 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4374 try:
4375 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4376 if old_flavor_disk > new_flavor_disk:
4377 raise nvExceptions.BadRequest(
4378 400,
4379 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4380 )
4381 else:
4382 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4383 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4384 if vm_state:
4385 instance_resized_status = self.confirm_resize(vm_id)
4386 return instance_resized_status
4387 else:
4388 raise nvExceptions.BadRequest(
4389 409,
4390 message="Cannot 'resize' vm_state is in ERROR",
4391 )
4392
4393 else:
4394 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4395 raise nvExceptions.BadRequest(
4396 409,
4397 message="Cannot 'resize' instance while it is in vm_state resized",
4398 )
4399 except (
4400 nvExceptions.BadRequest,
4401 nvExceptions.ClientException,
4402 nvExceptions.NotFound,
4403 ) as e:
4404 self._format_exception(e)
4405
4406 def confirm_resize(self, vm_id):
4407 """
4408 Confirm the resize of an instance
4409 param:
4410 vm_id: ID of an instance
4411 """
4412 self._reload_connection()
4413 self.nova.servers.confirm_resize(server=vm_id)
4414 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4415 self.__wait_for_vm(vm_id, "ACTIVE")
4416 instance_status = self.get_vdu_state(vm_id)[0]
4417 return instance_status