Feature 10960 Performance optimizations for the polling of VM status in RO
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1357
1358 for numa in numas:
1359 if "id" in numa:
1360 node_id = numa["id"]
1361 # overwrite ram and vcpus
1362 # check if key "memory" is present in numa else use ram value at flavor
1363 self.process_numa_memory(numa, node_id, extra_specs)
1364 self.process_numa_vcpu(numa, node_id, extra_specs)
1365
1366 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1368
1369 if "paired-threads" in numa:
1370 threads = self.process_numa_paired_threads(numa, extra_specs)
1371 cpu_threads += threads
1372
1373 elif "cores" in numa:
1374 cores = self.process_numa_cores(numa, extra_specs)
1375 cpu_cores += cores
1376
1377 elif "threads" in numa:
1378 threads = self.process_numa_threads(numa, extra_specs)
1379 cpu_threads += threads
1380
1381 if cpu_cores:
1382 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1383 if cpu_threads:
1384 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1385
1386 @staticmethod
1387 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1388 """According to number of numa nodes, updates the extra_specs for VIO.
1389
1390 Args:
1391
1392 numa_nodes (int): List keeps the numa node numbers
1393 extra_specs (dict): Extra specs dict to be updated
1394
1395 """
1396 # If there are several numas, we do not define specific affinity.
1397 extra_specs["vmware:latency_sensitivity_level"] = "high"
1398
1399 def _change_flavor_name(
1400 self, name: str, name_suffix: int, flavor_data: dict
1401 ) -> str:
1402 """Change the flavor name if the name already exists.
1403
1404 Args:
1405 name (str): Flavor name to be checked
1406 name_suffix (int): Suffix to be appended to name
1407 flavor_data (dict): Flavor dict
1408
1409 Returns:
1410 name (str): New flavor name to be used
1411
1412 """
1413 # Get used names
1414 fl = self.nova.flavors.list()
1415 fl_names = [f.name for f in fl]
1416
1417 while name in fl_names:
1418 name_suffix += 1
1419 name = flavor_data["name"] + "-" + str(name_suffix)
1420
1421 return name
1422
1423 def _process_extended_config_of_flavor(
1424 self, extended: dict, extra_specs: dict
1425 ) -> None:
1426 """Process the extended dict to fill up extra_specs.
1427 Args:
1428
1429 extended (dict): Keeping the extra specification of flavor
1430 extra_specs (dict) Dict to be filled to be used during flavor creation
1431
1432 """
1433 quotas = {
1434 "cpu-quota": "cpu",
1435 "mem-quota": "memory",
1436 "vif-quota": "vif",
1437 "disk-io-quota": "disk_io",
1438 }
1439
1440 page_sizes = {
1441 "LARGE": "large",
1442 "SMALL": "small",
1443 "SIZE_2MB": "2MB",
1444 "SIZE_1GB": "1GB",
1445 "PREFER_LARGE": "any",
1446 }
1447
1448 policies = {
1449 "cpu-pinning-policy": "hw:cpu_policy",
1450 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1451 "mem-policy": "hw:numa_mempolicy",
1452 }
1453
1454 numas = extended.get("numas")
1455 if numas:
1456 self._process_numa_parameters_of_flavor(numas, extra_specs)
1457
1458 for quota, item in quotas.items():
1459 if quota in extended.keys():
1460 self.process_resource_quota(extended.get(quota), item, extra_specs)
1461
1462 # Set the mempage size as specified in the descriptor
1463 if extended.get("mempage-size"):
1464 if extended["mempage-size"] in page_sizes.keys():
1465 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1466 else:
1467 # Normally, validations in NBI should not allow to this condition.
1468 self.logger.debug(
1469 "Invalid mempage-size %s. Will be ignored",
1470 extended.get("mempage-size"),
1471 )
1472
1473 for policy, hw_policy in policies.items():
1474 if extended.get(policy):
1475 extra_specs[hw_policy] = extended[policy].lower()
1476
1477 @staticmethod
1478 def _get_flavor_details(flavor_data: dict) -> Tuple:
1479 """Returns the details of flavor
1480 Args:
1481 flavor_data (dict): Dictionary that includes required flavor details
1482
1483 Returns:
1484 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1485
1486 """
1487 return (
1488 flavor_data.get("ram", 64),
1489 flavor_data.get("vcpus", 1),
1490 {},
1491 flavor_data.get("extended"),
1492 )
1493
1494 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1495 """Adds a tenant flavor to openstack VIM.
1496 if change_name_if_used is True, it will change name in case of conflict,
1497 because it is not supported name repetition.
1498
1499 Args:
1500 flavor_data (dict): Flavor details to be processed
1501 change_name_if_used (bool): Change name in case of conflict
1502
1503 Returns:
1504 flavor_id (str): flavor identifier
1505
1506 """
1507 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1508 retry = 0
1509 max_retries = 3
1510 name_suffix = 0
1511
1512 try:
1513 name = flavor_data["name"]
1514 while retry < max_retries:
1515 retry += 1
1516 try:
1517 self._reload_connection()
1518
1519 if change_name_if_used:
1520 name = self._change_flavor_name(name, name_suffix, flavor_data)
1521
1522 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1523 flavor_data
1524 )
1525 if extended:
1526 self._process_extended_config_of_flavor(extended, extra_specs)
1527
1528 # Create flavor
1529
1530 new_flavor = self.nova.flavors.create(
1531 name=name,
1532 ram=ram,
1533 vcpus=vcpus,
1534 disk=flavor_data.get("disk", 0),
1535 ephemeral=flavor_data.get("ephemeral", 0),
1536 swap=flavor_data.get("swap", 0),
1537 is_public=flavor_data.get("is_public", True),
1538 )
1539
1540 # Add metadata
1541 if extra_specs:
1542 new_flavor.set_keys(extra_specs)
1543
1544 return new_flavor.id
1545
1546 except nvExceptions.Conflict as e:
1547 if change_name_if_used and retry < max_retries:
1548 continue
1549
1550 self._format_exception(e)
1551
1552 except (
1553 ksExceptions.ClientException,
1554 nvExceptions.ClientException,
1555 ConnectionError,
1556 KeyError,
1557 ) as e:
1558 self._format_exception(e)
1559
1560 def delete_flavor(self, flavor_id):
1561 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1562 try:
1563 self._reload_connection()
1564 self.nova.flavors.delete(flavor_id)
1565
1566 return flavor_id
1567 # except nvExceptions.BadRequest as e:
1568 except (
1569 nvExceptions.NotFound,
1570 ksExceptions.ClientException,
1571 nvExceptions.ClientException,
1572 ConnectionError,
1573 ) as e:
1574 self._format_exception(e)
1575
1576 def new_image(self, image_dict):
1577 """
1578 Adds a tenant image to VIM. imge_dict is a dictionary with:
1579 name: name
1580 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1581 location: path or URI
1582 public: "yes" or "no"
1583 metadata: metadata of the image
1584 Returns the image_id
1585 """
1586 retry = 0
1587 max_retries = 3
1588
1589 while retry < max_retries:
1590 retry += 1
1591 try:
1592 self._reload_connection()
1593
1594 # determine format http://docs.openstack.org/developer/glance/formats.html
1595 if "disk_format" in image_dict:
1596 disk_format = image_dict["disk_format"]
1597 else: # autodiscover based on extension
1598 if image_dict["location"].endswith(".qcow2"):
1599 disk_format = "qcow2"
1600 elif image_dict["location"].endswith(".vhd"):
1601 disk_format = "vhd"
1602 elif image_dict["location"].endswith(".vmdk"):
1603 disk_format = "vmdk"
1604 elif image_dict["location"].endswith(".vdi"):
1605 disk_format = "vdi"
1606 elif image_dict["location"].endswith(".iso"):
1607 disk_format = "iso"
1608 elif image_dict["location"].endswith(".aki"):
1609 disk_format = "aki"
1610 elif image_dict["location"].endswith(".ari"):
1611 disk_format = "ari"
1612 elif image_dict["location"].endswith(".ami"):
1613 disk_format = "ami"
1614 else:
1615 disk_format = "raw"
1616
1617 self.logger.debug(
1618 "new_image: '%s' loading from '%s'",
1619 image_dict["name"],
1620 image_dict["location"],
1621 )
1622 if self.vim_type == "VIO":
1623 container_format = "bare"
1624 if "container_format" in image_dict:
1625 container_format = image_dict["container_format"]
1626
1627 new_image = self.glance.images.create(
1628 name=image_dict["name"],
1629 container_format=container_format,
1630 disk_format=disk_format,
1631 )
1632 else:
1633 new_image = self.glance.images.create(name=image_dict["name"])
1634
1635 if image_dict["location"].startswith("http"):
1636 # TODO there is not a method to direct download. It must be downloaded locally with requests
1637 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1638 else: # local path
1639 with open(image_dict["location"]) as fimage:
1640 self.glance.images.upload(new_image.id, fimage)
1641 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1642 # image_dict.get("public","yes")=="yes",
1643 # container_format="bare", data=fimage, disk_format=disk_format)
1644
1645 metadata_to_load = image_dict.get("metadata")
1646
1647 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1648 # for openstack
1649 if self.vim_type == "VIO":
1650 metadata_to_load["upload_location"] = image_dict["location"]
1651 else:
1652 metadata_to_load["location"] = image_dict["location"]
1653
1654 self.glance.images.update(new_image.id, **metadata_to_load)
1655
1656 return new_image.id
1657 except (
1658 nvExceptions.Conflict,
1659 ksExceptions.ClientException,
1660 nvExceptions.ClientException,
1661 ) as e:
1662 self._format_exception(e)
1663 except (
1664 HTTPException,
1665 gl1Exceptions.HTTPException,
1666 gl1Exceptions.CommunicationError,
1667 ConnectionError,
1668 ) as e:
1669 if retry == max_retries:
1670 continue
1671
1672 self._format_exception(e)
1673 except IOError as e: # can not open the file
1674 raise vimconn.VimConnConnectionException(
1675 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1676 http_code=vimconn.HTTP_Bad_Request,
1677 )
1678
1679 def delete_image(self, image_id):
1680 """Deletes a tenant image from openstack VIM. Returns the old id"""
1681 try:
1682 self._reload_connection()
1683 self.glance.images.delete(image_id)
1684
1685 return image_id
1686 except (
1687 nvExceptions.NotFound,
1688 ksExceptions.ClientException,
1689 nvExceptions.ClientException,
1690 gl1Exceptions.CommunicationError,
1691 gl1Exceptions.HTTPNotFound,
1692 ConnectionError,
1693 ) as e: # TODO remove
1694 self._format_exception(e)
1695
1696 def get_image_id_from_path(self, path):
1697 """Get the image id from image path in the VIM database. Returns the image_id"""
1698 try:
1699 self._reload_connection()
1700 images = self.glance.images.list()
1701
1702 for image in images:
1703 if image.metadata.get("location") == path:
1704 return image.id
1705
1706 raise vimconn.VimConnNotFoundException(
1707 "image with location '{}' not found".format(path)
1708 )
1709 except (
1710 ksExceptions.ClientException,
1711 nvExceptions.ClientException,
1712 gl1Exceptions.CommunicationError,
1713 ConnectionError,
1714 ) as e:
1715 self._format_exception(e)
1716
1717 def get_image_list(self, filter_dict={}):
1718 """Obtain tenant images from VIM
1719 Filter_dict can be:
1720 id: image id
1721 name: image name
1722 checksum: image checksum
1723 Returns the image list of dictionaries:
1724 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1725 List can be empty
1726 """
1727 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1728
1729 try:
1730 self._reload_connection()
1731 # filter_dict_os = filter_dict.copy()
1732 # First we filter by the available filter fields: name, id. The others are removed.
1733 image_list = self.glance.images.list()
1734 filtered_list = []
1735
1736 for image in image_list:
1737 try:
1738 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1739 continue
1740
1741 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1742 continue
1743
1744 if (
1745 filter_dict.get("checksum")
1746 and image["checksum"] != filter_dict["checksum"]
1747 ):
1748 continue
1749
1750 filtered_list.append(image.copy())
1751 except gl1Exceptions.HTTPNotFound:
1752 pass
1753
1754 return filtered_list
1755 except (
1756 ksExceptions.ClientException,
1757 nvExceptions.ClientException,
1758 gl1Exceptions.CommunicationError,
1759 ConnectionError,
1760 ) as e:
1761 self._format_exception(e)
1762
1763 def __wait_for_vm(self, vm_id, status):
1764 """wait until vm is in the desired status and return True.
1765 If the VM gets in ERROR status, return false.
1766 If the timeout is reached generate an exception"""
1767 elapsed_time = 0
1768 while elapsed_time < server_timeout:
1769 vm_status = self.nova.servers.get(vm_id).status
1770
1771 if vm_status == status:
1772 return True
1773
1774 if vm_status == "ERROR":
1775 return False
1776
1777 time.sleep(5)
1778 elapsed_time += 5
1779
1780 # if we exceeded the timeout rollback
1781 if elapsed_time >= server_timeout:
1782 raise vimconn.VimConnException(
1783 "Timeout waiting for instance " + vm_id + " to get " + status,
1784 http_code=vimconn.HTTP_Request_Timeout,
1785 )
1786
1787 def _get_openstack_availablity_zones(self):
1788 """
1789 Get from openstack availability zones available
1790 :return:
1791 """
1792 try:
1793 openstack_availability_zone = self.nova.availability_zones.list()
1794 openstack_availability_zone = [
1795 str(zone.zoneName)
1796 for zone in openstack_availability_zone
1797 if zone.zoneName != "internal"
1798 ]
1799
1800 return openstack_availability_zone
1801 except Exception:
1802 return None
1803
1804 def _set_availablity_zones(self):
1805 """
1806 Set vim availablity zone
1807 :return:
1808 """
1809 if "availability_zone" in self.config:
1810 vim_availability_zones = self.config.get("availability_zone")
1811
1812 if isinstance(vim_availability_zones, str):
1813 self.availability_zone = [vim_availability_zones]
1814 elif isinstance(vim_availability_zones, list):
1815 self.availability_zone = vim_availability_zones
1816 else:
1817 self.availability_zone = self._get_openstack_availablity_zones()
1818
1819 def _get_vm_availability_zone(
1820 self, availability_zone_index, availability_zone_list
1821 ):
1822 """
1823 Return thge availability zone to be used by the created VM.
1824 :return: The VIM availability zone to be used or None
1825 """
1826 if availability_zone_index is None:
1827 if not self.config.get("availability_zone"):
1828 return None
1829 elif isinstance(self.config.get("availability_zone"), str):
1830 return self.config["availability_zone"]
1831 else:
1832 # TODO consider using a different parameter at config for default AV and AV list match
1833 return self.config["availability_zone"][0]
1834
1835 vim_availability_zones = self.availability_zone
1836 # check if VIM offer enough availability zones describe in the VNFD
1837 if vim_availability_zones and len(availability_zone_list) <= len(
1838 vim_availability_zones
1839 ):
1840 # check if all the names of NFV AV match VIM AV names
1841 match_by_index = False
1842 for av in availability_zone_list:
1843 if av not in vim_availability_zones:
1844 match_by_index = True
1845 break
1846
1847 if match_by_index:
1848 return vim_availability_zones[availability_zone_index]
1849 else:
1850 return availability_zone_list[availability_zone_index]
1851 else:
1852 raise vimconn.VimConnConflictException(
1853 "No enough availability zones at VIM for this deployment"
1854 )
1855
1856 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1857 """Fill up the security_groups in the port_dict.
1858
1859 Args:
1860 net (dict): Network details
1861 port_dict (dict): Port details
1862
1863 """
1864 if (
1865 self.config.get("security_groups")
1866 and net.get("port_security") is not False
1867 and not self.config.get("no_port_security_extension")
1868 ):
1869 if not self.security_groups_id:
1870 self._get_ids_from_name()
1871
1872 port_dict["security_groups"] = self.security_groups_id
1873
1874 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1875 """Fill up the network binding depending on network type in the port_dict.
1876
1877 Args:
1878 net (dict): Network details
1879 port_dict (dict): Port details
1880
1881 """
1882 if not net.get("type"):
1883 raise vimconn.VimConnException("Type is missing in the network details.")
1884
1885 if net["type"] == "virtual":
1886 pass
1887
1888 # For VF
1889 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1890 port_dict["binding:vnic_type"] = "direct"
1891
1892 # VIO specific Changes
1893 if self.vim_type == "VIO":
1894 # Need to create port with port_security_enabled = False and no-security-groups
1895 port_dict["port_security_enabled"] = False
1896 port_dict["provider_security_groups"] = []
1897 port_dict["security_groups"] = []
1898
1899 else:
1900 # For PT PCI-PASSTHROUGH
1901 port_dict["binding:vnic_type"] = "direct-physical"
1902
1903 @staticmethod
1904 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1905 """Set the "ip" parameter in net dictionary.
1906
1907 Args:
1908 new_port (dict): New created port
1909 net (dict): Network details
1910
1911 """
1912 fixed_ips = new_port["port"].get("fixed_ips")
1913
1914 if fixed_ips:
1915 net["ip"] = fixed_ips[0].get("ip_address")
1916 else:
1917 net["ip"] = None
1918
1919 @staticmethod
1920 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1921 """Fill up the mac_address and fixed_ips in port_dict.
1922
1923 Args:
1924 net (dict): Network details
1925 port_dict (dict): Port details
1926
1927 """
1928 if net.get("mac_address"):
1929 port_dict["mac_address"] = net["mac_address"]
1930
1931 if net.get("ip_address"):
1932 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1933 # TODO add "subnet_id": <subnet_id>
1934
1935 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1936 """Create new port using neutron.
1937
1938 Args:
1939 port_dict (dict): Port details
1940 created_items (dict): All created items
1941 net (dict): Network details
1942
1943 Returns:
1944 new_port (dict): New created port
1945
1946 """
1947 new_port = self.neutron.create_port({"port": port_dict})
1948 created_items["port:" + str(new_port["port"]["id"])] = True
1949 net["mac_adress"] = new_port["port"]["mac_address"]
1950 net["vim_id"] = new_port["port"]["id"]
1951
1952 return new_port
1953
1954 def _create_port(
1955 self, net: dict, name: str, created_items: dict
1956 ) -> Tuple[dict, dict]:
1957 """Create port using net details.
1958
1959 Args:
1960 net (dict): Network details
1961 name (str): Name to be used as network name if net dict does not include name
1962 created_items (dict): All created items
1963
1964 Returns:
1965 new_port, port New created port, port dictionary
1966
1967 """
1968
1969 port_dict = {
1970 "network_id": net["net_id"],
1971 "name": net.get("name"),
1972 "admin_state_up": True,
1973 }
1974
1975 if not port_dict["name"]:
1976 port_dict["name"] = name
1977
1978 self._prepare_port_dict_security_groups(net, port_dict)
1979
1980 self._prepare_port_dict_binding(net, port_dict)
1981
1982 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1983
1984 new_port = self._create_new_port(port_dict, created_items, net)
1985
1986 vimconnector._set_fixed_ip(new_port, net)
1987
1988 port = {"port-id": new_port["port"]["id"]}
1989
1990 if float(self.nova.api_version.get_string()) >= 2.32:
1991 port["tag"] = new_port["port"]["name"]
1992
1993 return new_port, port
1994
1995 def _prepare_network_for_vminstance(
1996 self,
1997 name: str,
1998 net_list: list,
1999 created_items: dict,
2000 net_list_vim: list,
2001 external_network: list,
2002 no_secured_ports: list,
2003 ) -> None:
2004 """Create port and fill up net dictionary for new VM instance creation.
2005
2006 Args:
2007 name (str): Name of network
2008 net_list (list): List of networks
2009 created_items (dict): All created items belongs to a VM
2010 net_list_vim (list): List of ports
2011 external_network (list): List of external-networks
2012 no_secured_ports (list): Port security disabled ports
2013 """
2014
2015 self._reload_connection()
2016
2017 for net in net_list:
2018 # Skip non-connected iface
2019 if not net.get("net_id"):
2020 continue
2021
2022 new_port, port = self._create_port(net, name, created_items)
2023
2024 net_list_vim.append(port)
2025
2026 if net.get("floating_ip", False):
2027 net["exit_on_floating_ip_error"] = True
2028 external_network.append(net)
2029
2030 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2031 net["exit_on_floating_ip_error"] = False
2032 external_network.append(net)
2033 net["floating_ip"] = self.config.get("use_floating_ip")
2034
2035 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2036 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2037 if net.get("port_security") is False and not self.config.get(
2038 "no_port_security_extension"
2039 ):
2040 no_secured_ports.append(
2041 (
2042 new_port["port"]["id"],
2043 net.get("port_security_disable_strategy"),
2044 )
2045 )
2046
2047 def _prepare_persistent_root_volumes(
2048 self,
2049 name: str,
2050 vm_av_zone: list,
2051 disk: dict,
2052 base_disk_index: int,
2053 block_device_mapping: dict,
2054 existing_vim_volumes: list,
2055 created_items: dict,
2056 ) -> Optional[str]:
2057 """Prepare persistent root volumes for new VM instance.
2058
2059 Args:
2060 name (str): Name of VM instance
2061 vm_av_zone (list): List of availability zones
2062 disk (dict): Disk details
2063 base_disk_index (int): Disk index
2064 block_device_mapping (dict): Block device details
2065 existing_vim_volumes (list): Existing disk details
2066 created_items (dict): All created items belongs to VM
2067
2068 Returns:
2069 boot_volume_id (str): ID of boot volume
2070
2071 """
2072 # Disk may include only vim_volume_id or only vim_id."
2073 # Use existing persistent root volume finding with volume_id or vim_id
2074 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2075
2076 if disk.get(key_id):
2077 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2078 existing_vim_volumes.append({"id": disk[key_id]})
2079
2080 else:
2081 # Create persistent root volume
2082 volume = self.cinder.volumes.create(
2083 size=disk["size"],
2084 name=name + "vd" + chr(base_disk_index),
2085 imageRef=disk["image_id"],
2086 # Make sure volume is in the same AZ as the VM to be attached to
2087 availability_zone=vm_av_zone,
2088 )
2089 boot_volume_id = volume.id
2090 created_items["volume:" + str(volume.id)] = True
2091 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2092
2093 return boot_volume_id
2094
2095 def _prepare_non_root_persistent_volumes(
2096 self,
2097 name: str,
2098 disk: dict,
2099 vm_av_zone: list,
2100 block_device_mapping: dict,
2101 base_disk_index: int,
2102 existing_vim_volumes: list,
2103 created_items: dict,
2104 ) -> None:
2105 """Prepare persistent volumes for new VM instance.
2106
2107 Args:
2108 name (str): Name of VM instance
2109 disk (dict): Disk details
2110 vm_av_zone (list): List of availability zones
2111 block_device_mapping (dict): Block device details
2112 base_disk_index (int): Disk index
2113 existing_vim_volumes (list): Existing disk details
2114 created_items (dict): All created items belongs to VM
2115 """
2116 # Non-root persistent volumes
2117 # Disk may include only vim_volume_id or only vim_id."
2118 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2119
2120 if disk.get(key_id):
2121 # Use existing persistent volume
2122 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2123 existing_vim_volumes.append({"id": disk[key_id]})
2124
2125 else:
2126 # Create persistent volume
2127 volume = self.cinder.volumes.create(
2128 size=disk["size"],
2129 name=name + "vd" + chr(base_disk_index),
2130 # Make sure volume is in the same AZ as the VM to be attached to
2131 availability_zone=vm_av_zone,
2132 )
2133 created_items["volume:" + str(volume.id)] = True
2134 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2135
2136 def _wait_for_created_volumes_availability(
2137 self, elapsed_time: int, created_items: dict
2138 ) -> Optional[int]:
2139 """Wait till created volumes become available.
2140
2141 Args:
2142 elapsed_time (int): Passed time while waiting
2143 created_items (dict): All created items belongs to VM
2144
2145 Returns:
2146 elapsed_time (int): Time spent while waiting
2147
2148 """
2149
2150 while elapsed_time < volume_timeout:
2151 for created_item in created_items:
2152 v, _, volume_id = created_item.partition(":")
2153 if v == "volume":
2154 if self.cinder.volumes.get(volume_id).status != "available":
2155 break
2156 else:
2157 # All ready: break from while
2158 break
2159
2160 time.sleep(5)
2161 elapsed_time += 5
2162
2163 return elapsed_time
2164
2165 def _wait_for_existing_volumes_availability(
2166 self, elapsed_time: int, existing_vim_volumes: list
2167 ) -> Optional[int]:
2168 """Wait till existing volumes become available.
2169
2170 Args:
2171 elapsed_time (int): Passed time while waiting
2172 existing_vim_volumes (list): Existing volume details
2173
2174 Returns:
2175 elapsed_time (int): Time spent while waiting
2176
2177 """
2178
2179 while elapsed_time < volume_timeout:
2180 for volume in existing_vim_volumes:
2181 if self.cinder.volumes.get(volume["id"]).status != "available":
2182 break
2183 else: # all ready: break from while
2184 break
2185
2186 time.sleep(5)
2187 elapsed_time += 5
2188
2189 return elapsed_time
2190
2191 def _prepare_disk_for_vminstance(
2192 self,
2193 name: str,
2194 existing_vim_volumes: list,
2195 created_items: dict,
2196 vm_av_zone: list,
2197 block_device_mapping: dict,
2198 disk_list: list = None,
2199 ) -> None:
2200 """Prepare all volumes for new VM instance.
2201
2202 Args:
2203 name (str): Name of Instance
2204 existing_vim_volumes (list): List of existing volumes
2205 created_items (dict): All created items belongs to VM
2206 vm_av_zone (list): VM availability zone
2207 block_device_mapping (dict): Block devices to be attached to VM
2208 disk_list (list): List of disks
2209
2210 """
2211 # Create additional volumes in case these are present in disk_list
2212 base_disk_index = ord("b")
2213 boot_volume_id = None
2214 elapsed_time = 0
2215
2216 for disk in disk_list:
2217 if "image_id" in disk:
2218 # Root persistent volume
2219 base_disk_index = ord("a")
2220 boot_volume_id = self._prepare_persistent_root_volumes(
2221 name=name,
2222 vm_av_zone=vm_av_zone,
2223 disk=disk,
2224 base_disk_index=base_disk_index,
2225 block_device_mapping=block_device_mapping,
2226 existing_vim_volumes=existing_vim_volumes,
2227 created_items=created_items,
2228 )
2229 else:
2230 # Non-root persistent volume
2231 self._prepare_non_root_persistent_volumes(
2232 name=name,
2233 disk=disk,
2234 vm_av_zone=vm_av_zone,
2235 block_device_mapping=block_device_mapping,
2236 base_disk_index=base_disk_index,
2237 existing_vim_volumes=existing_vim_volumes,
2238 created_items=created_items,
2239 )
2240 base_disk_index += 1
2241
2242 # Wait until created volumes are with status available
2243 elapsed_time = self._wait_for_created_volumes_availability(
2244 elapsed_time, created_items
2245 )
2246 # Wait until existing volumes in vim are with status available
2247 elapsed_time = self._wait_for_existing_volumes_availability(
2248 elapsed_time, existing_vim_volumes
2249 )
2250 # If we exceeded the timeout rollback
2251 if elapsed_time >= volume_timeout:
2252 raise vimconn.VimConnException(
2253 "Timeout creating volumes for instance " + name,
2254 http_code=vimconn.HTTP_Request_Timeout,
2255 )
2256 if boot_volume_id:
2257 self.cinder.volumes.set_bootable(boot_volume_id, True)
2258
2259 def _find_the_external_network_for_floating_ip(self):
2260 """Get the external network ip in order to create floating IP.
2261
2262 Returns:
2263 pool_id (str): External network pool ID
2264
2265 """
2266
2267 # Find the external network
2268 external_nets = list()
2269
2270 for net in self.neutron.list_networks()["networks"]:
2271 if net["router:external"]:
2272 external_nets.append(net)
2273
2274 if len(external_nets) == 0:
2275 raise vimconn.VimConnException(
2276 "Cannot create floating_ip automatically since "
2277 "no external network is present",
2278 http_code=vimconn.HTTP_Conflict,
2279 )
2280
2281 if len(external_nets) > 1:
2282 raise vimconn.VimConnException(
2283 "Cannot create floating_ip automatically since "
2284 "multiple external networks are present",
2285 http_code=vimconn.HTTP_Conflict,
2286 )
2287
2288 # Pool ID
2289 return external_nets[0].get("id")
2290
2291 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2292 """Trigger neutron to create a new floating IP using external network ID.
2293
2294 Args:
2295 param (dict): Input parameters to create a floating IP
2296 created_items (dict): All created items belongs to new VM instance
2297
2298 Raises:
2299
2300 VimConnException
2301 """
2302 try:
2303 self.logger.debug("Creating floating IP")
2304 new_floating_ip = self.neutron.create_floatingip(param)
2305 free_floating_ip = new_floating_ip["floatingip"]["id"]
2306 created_items["floating_ip:" + str(free_floating_ip)] = True
2307
2308 except Exception as e:
2309 raise vimconn.VimConnException(
2310 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2311 http_code=vimconn.HTTP_Conflict,
2312 )
2313
2314 def _create_floating_ip(
2315 self, floating_network: dict, server: object, created_items: dict
2316 ) -> None:
2317 """Get the available Pool ID and create a new floating IP.
2318
2319 Args:
2320 floating_network (dict): Dict including external network ID
2321 server (object): Server object
2322 created_items (dict): All created items belongs to new VM instance
2323
2324 """
2325
2326 # Pool_id is available
2327 if (
2328 isinstance(floating_network["floating_ip"], str)
2329 and floating_network["floating_ip"].lower() != "true"
2330 ):
2331 pool_id = floating_network["floating_ip"]
2332
2333 # Find the Pool_id
2334 else:
2335 pool_id = self._find_the_external_network_for_floating_ip()
2336
2337 param = {
2338 "floatingip": {
2339 "floating_network_id": pool_id,
2340 "tenant_id": server.tenant_id,
2341 }
2342 }
2343
2344 self._neutron_create_float_ip(param, created_items)
2345
2346 def _find_floating_ip(
2347 self,
2348 server: object,
2349 floating_ips: list,
2350 floating_network: dict,
2351 ) -> Optional[str]:
2352 """Find the available free floating IPs if there are.
2353
2354 Args:
2355 server (object): Server object
2356 floating_ips (list): List of floating IPs
2357 floating_network (dict): Details of floating network such as ID
2358
2359 Returns:
2360 free_floating_ip (str): Free floating ip address
2361
2362 """
2363 for fip in floating_ips:
2364 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2365 continue
2366
2367 if isinstance(floating_network["floating_ip"], str):
2368 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2369 continue
2370
2371 return fip["id"]
2372
2373 def _assign_floating_ip(
2374 self, free_floating_ip: str, floating_network: dict
2375 ) -> Dict:
2376 """Assign the free floating ip address to port.
2377
2378 Args:
2379 free_floating_ip (str): Floating IP to be assigned
2380 floating_network (dict): ID of floating network
2381
2382 Returns:
2383 fip (dict) (dict): Floating ip details
2384
2385 """
2386 # The vim_id key contains the neutron.port_id
2387 self.neutron.update_floatingip(
2388 free_floating_ip,
2389 {"floatingip": {"port_id": floating_network["vim_id"]}},
2390 )
2391 # For race condition ensure not re-assigned to other VM after 5 seconds
2392 time.sleep(5)
2393
2394 return self.neutron.show_floatingip(free_floating_ip)
2395
2396 def _get_free_floating_ip(
2397 self, server: object, floating_network: dict
2398 ) -> Optional[str]:
2399 """Get the free floating IP address.
2400
2401 Args:
2402 server (object): Server Object
2403 floating_network (dict): Floating network details
2404
2405 Returns:
2406 free_floating_ip (str): Free floating ip addr
2407
2408 """
2409
2410 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2411
2412 # Randomize
2413 random.shuffle(floating_ips)
2414
2415 return self._find_floating_ip(server, floating_ips, floating_network)
2416
2417 def _prepare_external_network_for_vminstance(
2418 self,
2419 external_network: list,
2420 server: object,
2421 created_items: dict,
2422 vm_start_time: float,
2423 ) -> None:
2424 """Assign floating IP address for VM instance.
2425
2426 Args:
2427 external_network (list): ID of External network
2428 server (object): Server Object
2429 created_items (dict): All created items belongs to new VM instance
2430 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2431
2432 Raises:
2433 VimConnException
2434
2435 """
2436 for floating_network in external_network:
2437 try:
2438 assigned = False
2439 floating_ip_retries = 3
2440 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2441 # several times
2442 while not assigned:
2443 free_floating_ip = self._get_free_floating_ip(
2444 server, floating_network
2445 )
2446
2447 if not free_floating_ip:
2448 self._create_floating_ip(
2449 floating_network, server, created_items
2450 )
2451
2452 try:
2453 # For race condition ensure not already assigned
2454 fip = self.neutron.show_floatingip(free_floating_ip)
2455
2456 if fip["floatingip"].get("port_id"):
2457 continue
2458
2459 # Assign floating ip
2460 fip = self._assign_floating_ip(
2461 free_floating_ip, floating_network
2462 )
2463
2464 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2465 self.logger.warning(
2466 "floating_ip {} re-assigned to other port".format(
2467 free_floating_ip
2468 )
2469 )
2470 continue
2471
2472 self.logger.debug(
2473 "Assigned floating_ip {} to VM {}".format(
2474 free_floating_ip, server.id
2475 )
2476 )
2477
2478 assigned = True
2479
2480 except Exception as e:
2481 # Openstack need some time after VM creation to assign an IP. So retry if fails
2482 vm_status = self.nova.servers.get(server.id).status
2483
2484 if vm_status not in ("ACTIVE", "ERROR"):
2485 if time.time() - vm_start_time < server_timeout:
2486 time.sleep(5)
2487 continue
2488 elif floating_ip_retries > 0:
2489 floating_ip_retries -= 1
2490 continue
2491
2492 raise vimconn.VimConnException(
2493 "Cannot create floating_ip: {} {}".format(
2494 type(e).__name__, e
2495 ),
2496 http_code=vimconn.HTTP_Conflict,
2497 )
2498
2499 except Exception as e:
2500 if not floating_network["exit_on_floating_ip_error"]:
2501 self.logger.error("Cannot create floating_ip. %s", str(e))
2502 continue
2503
2504 raise
2505
2506 def _update_port_security_for_vminstance(
2507 self,
2508 no_secured_ports: list,
2509 server: object,
2510 ) -> None:
2511 """Updates the port security according to no_secured_ports list.
2512
2513 Args:
2514 no_secured_ports (list): List of ports that security will be disabled
2515 server (object): Server Object
2516
2517 Raises:
2518 VimConnException
2519
2520 """
2521 # Wait until the VM is active and then disable the port-security
2522 if no_secured_ports:
2523 self.__wait_for_vm(server.id, "ACTIVE")
2524
2525 for port in no_secured_ports:
2526 port_update = {
2527 "port": {"port_security_enabled": False, "security_groups": None}
2528 }
2529
2530 if port[1] == "allow-address-pairs":
2531 port_update = {
2532 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2533 }
2534
2535 try:
2536 self.neutron.update_port(port[0], port_update)
2537
2538 except Exception:
2539 raise vimconn.VimConnException(
2540 "It was not possible to disable port security for port {}".format(
2541 port[0]
2542 )
2543 )
2544
2545 def new_vminstance(
2546 self,
2547 name: str,
2548 description: str,
2549 start: bool,
2550 image_id: str,
2551 flavor_id: str,
2552 affinity_group_list: list,
2553 net_list: list,
2554 cloud_config=None,
2555 disk_list=None,
2556 availability_zone_index=None,
2557 availability_zone_list=None,
2558 ) -> tuple:
2559 """Adds a VM instance to VIM.
2560
2561 Args:
2562 name (str): name of VM
2563 description (str): description
2564 start (bool): indicates if VM must start or boot in pause mode. Ignored
2565 image_id (str) image uuid
2566 flavor_id (str) flavor uuid
2567 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2568 net_list (list): list of interfaces, each one is a dictionary with:
2569 name: name of network
2570 net_id: network uuid to connect
2571 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2572 model: interface model, ignored #TODO
2573 mac_address: used for SR-IOV ifaces #TODO for other types
2574 use: 'data', 'bridge', 'mgmt'
2575 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2576 vim_id: filled/added by this function
2577 floating_ip: True/False (or it can be None)
2578 port_security: True/False
2579 cloud_config (dict): (optional) dictionary with:
2580 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2581 users: (optional) list of users to be inserted, each item is a dict with:
2582 name: (mandatory) user name,
2583 key-pairs: (optional) list of strings with the public key to be inserted to the user
2584 user-data: (optional) string is a text script to be passed directly to cloud-init
2585 config-files: (optional). List of files to be transferred. Each item is a dict with:
2586 dest: (mandatory) string with the destination absolute path
2587 encoding: (optional, by default text). Can be one of:
2588 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2589 content : (mandatory) string with the content of the file
2590 permissions: (optional) string with file permissions, typically octal notation '0644'
2591 owner: (optional) file owner, string with the format 'owner:group'
2592 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2593 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2594 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2595 size: (mandatory) string with the size of the disk in GB
2596 vim_id: (optional) should use this existing volume id
2597 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2598 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2599 availability_zone_index is None
2600 #TODO ip, security groups
2601
2602 Returns:
2603 A tuple with the instance identifier and created_items or raises an exception on error
2604 created_items can be None or a dictionary where this method can include key-values that will be passed to
2605 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2606 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2607 as not present.
2608 """
2609 self.logger.debug(
2610 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2611 image_id,
2612 flavor_id,
2613 str(net_list),
2614 )
2615
2616 try:
2617 server = None
2618 created_items = {}
2619 net_list_vim = []
2620 # list of external networks to be connected to instance, later on used to create floating_ip
2621 external_network = []
2622 # List of ports with port-security disabled
2623 no_secured_ports = []
2624 block_device_mapping = {}
2625 existing_vim_volumes = []
2626 server_group_id = None
2627 scheduller_hints = {}
2628
2629 # Check the Openstack Connection
2630 self._reload_connection()
2631
2632 # Prepare network list
2633 self._prepare_network_for_vminstance(
2634 name=name,
2635 net_list=net_list,
2636 created_items=created_items,
2637 net_list_vim=net_list_vim,
2638 external_network=external_network,
2639 no_secured_ports=no_secured_ports,
2640 )
2641
2642 # Cloud config
2643 config_drive, userdata = self._create_user_data(cloud_config)
2644
2645 # Get availability Zone
2646 vm_av_zone = self._get_vm_availability_zone(
2647 availability_zone_index, availability_zone_list
2648 )
2649
2650 if disk_list:
2651 # Prepare disks
2652 self._prepare_disk_for_vminstance(
2653 name=name,
2654 existing_vim_volumes=existing_vim_volumes,
2655 created_items=created_items,
2656 vm_av_zone=vm_av_zone,
2657 block_device_mapping=block_device_mapping,
2658 disk_list=disk_list,
2659 )
2660
2661 if affinity_group_list:
2662 # Only first id on the list will be used. Openstack restriction
2663 server_group_id = affinity_group_list[0]["affinity_group_id"]
2664 scheduller_hints["group"] = server_group_id
2665
2666 self.logger.debug(
2667 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2668 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2669 "block_device_mapping={}, server_group={})".format(
2670 name,
2671 image_id,
2672 flavor_id,
2673 net_list_vim,
2674 self.config.get("security_groups"),
2675 vm_av_zone,
2676 self.config.get("keypair"),
2677 userdata,
2678 config_drive,
2679 block_device_mapping,
2680 server_group_id,
2681 )
2682 )
2683
2684 # Create VM
2685 server = self.nova.servers.create(
2686 name=name,
2687 image=image_id,
2688 flavor=flavor_id,
2689 nics=net_list_vim,
2690 security_groups=self.config.get("security_groups"),
2691 # TODO remove security_groups in future versions. Already at neutron port
2692 availability_zone=vm_av_zone,
2693 key_name=self.config.get("keypair"),
2694 userdata=userdata,
2695 config_drive=config_drive,
2696 block_device_mapping=block_device_mapping,
2697 scheduler_hints=scheduller_hints,
2698 )
2699
2700 vm_start_time = time.time()
2701
2702 self._update_port_security_for_vminstance(no_secured_ports, server)
2703
2704 self._prepare_external_network_for_vminstance(
2705 external_network=external_network,
2706 server=server,
2707 created_items=created_items,
2708 vm_start_time=vm_start_time,
2709 )
2710
2711 return server.id, created_items
2712
2713 except Exception as e:
2714 server_id = None
2715 if server:
2716 server_id = server.id
2717
2718 try:
2719 self.delete_vminstance(server_id, created_items)
2720
2721 except Exception as e2:
2722 self.logger.error("new_vminstance rollback fail {}".format(e2))
2723
2724 self._format_exception(e)
2725
2726 def get_vminstance(self, vm_id):
2727 """Returns the VM instance information from VIM"""
2728 # self.logger.debug("Getting VM from VIM")
2729 try:
2730 self._reload_connection()
2731 server = self.nova.servers.find(id=vm_id)
2732 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2733
2734 return server.to_dict()
2735 except (
2736 ksExceptions.ClientException,
2737 nvExceptions.ClientException,
2738 nvExceptions.NotFound,
2739 ConnectionError,
2740 ) as e:
2741 self._format_exception(e)
2742
2743 def get_vminstance_console(self, vm_id, console_type="vnc"):
2744 """
2745 Get a console for the virtual machine
2746 Params:
2747 vm_id: uuid of the VM
2748 console_type, can be:
2749 "novnc" (by default), "xvpvnc" for VNC types,
2750 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2751 Returns dict with the console parameters:
2752 protocol: ssh, ftp, http, https, ...
2753 server: usually ip address
2754 port: the http, ssh, ... port
2755 suffix: extra text, e.g. the http path and query string
2756 """
2757 self.logger.debug("Getting VM CONSOLE from VIM")
2758
2759 try:
2760 self._reload_connection()
2761 server = self.nova.servers.find(id=vm_id)
2762
2763 if console_type is None or console_type == "novnc":
2764 console_dict = server.get_vnc_console("novnc")
2765 elif console_type == "xvpvnc":
2766 console_dict = server.get_vnc_console(console_type)
2767 elif console_type == "rdp-html5":
2768 console_dict = server.get_rdp_console(console_type)
2769 elif console_type == "spice-html5":
2770 console_dict = server.get_spice_console(console_type)
2771 else:
2772 raise vimconn.VimConnException(
2773 "console type '{}' not allowed".format(console_type),
2774 http_code=vimconn.HTTP_Bad_Request,
2775 )
2776
2777 console_dict1 = console_dict.get("console")
2778
2779 if console_dict1:
2780 console_url = console_dict1.get("url")
2781
2782 if console_url:
2783 # parse console_url
2784 protocol_index = console_url.find("//")
2785 suffix_index = (
2786 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2787 )
2788 port_index = (
2789 console_url[protocol_index + 2 : suffix_index].find(":")
2790 + protocol_index
2791 + 2
2792 )
2793
2794 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2795 return (
2796 -vimconn.HTTP_Internal_Server_Error,
2797 "Unexpected response from VIM",
2798 )
2799
2800 console_dict = {
2801 "protocol": console_url[0:protocol_index],
2802 "server": console_url[protocol_index + 2 : port_index],
2803 "port": console_url[port_index:suffix_index],
2804 "suffix": console_url[suffix_index + 1 :],
2805 }
2806 protocol_index += 2
2807
2808 return console_dict
2809 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2810 except (
2811 nvExceptions.NotFound,
2812 ksExceptions.ClientException,
2813 nvExceptions.ClientException,
2814 nvExceptions.BadRequest,
2815 ConnectionError,
2816 ) as e:
2817 self._format_exception(e)
2818
2819 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2820 """Neutron delete ports by id.
2821 Args:
2822 k_id (str): Port id in the VIM
2823 """
2824 try:
2825 port_dict = self.neutron.list_ports()
2826 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2827
2828 if k_id in existing_ports:
2829 self.neutron.delete_port(k_id)
2830
2831 except Exception as e:
2832 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2833
2834 def _delete_volumes_by_id_wth_cinder(
2835 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2836 ) -> bool:
2837 """Cinder delete volume by id.
2838 Args:
2839 k (str): Full item name in created_items
2840 k_id (str): ID of floating ip in VIM
2841 volumes_to_hold (list): Volumes not to delete
2842 created_items (dict): All created items belongs to VM
2843 """
2844 try:
2845 if k_id in volumes_to_hold:
2846 return
2847
2848 if self.cinder.volumes.get(k_id).status != "available":
2849 return True
2850
2851 else:
2852 self.cinder.volumes.delete(k_id)
2853 created_items[k] = None
2854
2855 except Exception as e:
2856 self.logger.error(
2857 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2858 )
2859
2860 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2861 """Neutron delete floating ip by id.
2862 Args:
2863 k (str): Full item name in created_items
2864 k_id (str): ID of floating ip in VIM
2865 created_items (dict): All created items belongs to VM
2866 """
2867 try:
2868 self.neutron.delete_floatingip(k_id)
2869 created_items[k] = None
2870
2871 except Exception as e:
2872 self.logger.error(
2873 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2874 )
2875
2876 @staticmethod
2877 def _get_item_name_id(k: str) -> Tuple[str, str]:
2878 k_item, _, k_id = k.partition(":")
2879 return k_item, k_id
2880
2881 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2882 """Delete VM ports attached to the networks before deleting virtual machine.
2883 Args:
2884 created_items (dict): All created items belongs to VM
2885 """
2886
2887 for k, v in created_items.items():
2888 if not v: # skip already deleted
2889 continue
2890
2891 try:
2892 k_item, k_id = self._get_item_name_id(k)
2893 if k_item == "port":
2894 self._delete_ports_by_id_wth_neutron(k_id)
2895
2896 except Exception as e:
2897 self.logger.error(
2898 "Error deleting port: {}: {}".format(type(e).__name__, e)
2899 )
2900
2901 def _delete_created_items(
2902 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2903 ) -> bool:
2904 """Delete Volumes and floating ip if they exist in created_items."""
2905 for k, v in created_items.items():
2906 if not v: # skip already deleted
2907 continue
2908
2909 try:
2910 k_item, k_id = self._get_item_name_id(k)
2911
2912 if k_item == "volume":
2913 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2914 k, k_id, volumes_to_hold, created_items
2915 )
2916
2917 if unavailable_vol:
2918 keep_waiting = True
2919
2920 elif k_item == "floating_ip":
2921 self._delete_floating_ip_by_id(k, k_id, created_items)
2922
2923 except Exception as e:
2924 self.logger.error("Error deleting {}: {}".format(k, e))
2925
2926 return keep_waiting
2927
2928 def delete_vminstance(
2929 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
2930 ) -> None:
2931 """Removes a VM instance from VIM. Returns the old identifier.
2932 Args:
2933 vm_id (str): Identifier of VM instance
2934 created_items (dict): All created items belongs to VM
2935 volumes_to_hold (list): Volumes_to_hold
2936 """
2937 if created_items is None:
2938 created_items = {}
2939 if volumes_to_hold is None:
2940 volumes_to_hold = []
2941
2942 try:
2943 self._reload_connection()
2944
2945 # Delete VM ports attached to the networks before the virtual machine
2946 if created_items:
2947 self._delete_vm_ports_attached_to_network(created_items)
2948
2949 if vm_id:
2950 self.nova.servers.delete(vm_id)
2951
2952 # Although having detached, volumes should have in active status before deleting.
2953 # We ensure in this loop
2954 keep_waiting = True
2955 elapsed_time = 0
2956
2957 while keep_waiting and elapsed_time < volume_timeout:
2958 keep_waiting = False
2959
2960 # Delete volumes and floating IP.
2961 keep_waiting = self._delete_created_items(
2962 created_items, volumes_to_hold, keep_waiting
2963 )
2964
2965 if keep_waiting:
2966 time.sleep(1)
2967 elapsed_time += 1
2968
2969 except (
2970 nvExceptions.NotFound,
2971 ksExceptions.ClientException,
2972 nvExceptions.ClientException,
2973 ConnectionError,
2974 ) as e:
2975 self._format_exception(e)
2976
2977 def refresh_vms_status(self, vm_list):
2978 """Get the status of the virtual machines and their interfaces/ports
2979 Params: the list of VM identifiers
2980 Returns a dictionary with:
2981 vm_id: #VIM id of this Virtual Machine
2982 status: #Mandatory. Text with one of:
2983 # DELETED (not found at vim)
2984 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2985 # OTHER (Vim reported other status not understood)
2986 # ERROR (VIM indicates an ERROR status)
2987 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2988 # CREATING (on building process), ERROR
2989 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2990 #
2991 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2992 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2993 interfaces:
2994 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2995 mac_address: #Text format XX:XX:XX:XX:XX:XX
2996 vim_net_id: #network id where this interface is connected
2997 vim_interface_id: #interface/port VIM id
2998 ip_address: #null, or text with IPv4, IPv6 address
2999 compute_node: #identification of compute node where PF,VF interface is allocated
3000 pci: #PCI address of the NIC that hosts the PF,VF
3001 vlan: #physical VLAN used for VF
3002 """
3003 vm_dict = {}
3004 self.logger.debug(
3005 "refresh_vms status: Getting tenant VM instance information from VIM"
3006 )
3007
3008 for vm_id in vm_list:
3009 vm = {}
3010
3011 try:
3012 vm_vim = self.get_vminstance(vm_id)
3013
3014 if vm_vim["status"] in vmStatus2manoFormat:
3015 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3016 else:
3017 vm["status"] = "OTHER"
3018 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3019
3020 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3021 vm_vim.pop("user_data", None)
3022 vm["vim_info"] = self.serialize(vm_vim)
3023
3024 vm["interfaces"] = []
3025 if vm_vim.get("fault"):
3026 vm["error_msg"] = str(vm_vim["fault"])
3027
3028 # get interfaces
3029 try:
3030 self._reload_connection()
3031 port_dict = self.neutron.list_ports(device_id=vm_id)
3032
3033 for port in port_dict["ports"]:
3034 interface = {}
3035 interface["vim_info"] = self.serialize(port)
3036 interface["mac_address"] = port.get("mac_address")
3037 interface["vim_net_id"] = port["network_id"]
3038 interface["vim_interface_id"] = port["id"]
3039 # check if OS-EXT-SRV-ATTR:host is there,
3040 # in case of non-admin credentials, it will be missing
3041
3042 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3043 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3044
3045 interface["pci"] = None
3046
3047 # check if binding:profile is there,
3048 # in case of non-admin credentials, it will be missing
3049 if port.get("binding:profile"):
3050 if port["binding:profile"].get("pci_slot"):
3051 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3052 # the slot to 0x00
3053 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3054 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3055 pci = port["binding:profile"]["pci_slot"]
3056 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3057 interface["pci"] = pci
3058
3059 interface["vlan"] = None
3060
3061 if port.get("binding:vif_details"):
3062 interface["vlan"] = port["binding:vif_details"].get("vlan")
3063
3064 # Get vlan from network in case not present in port for those old openstacks and cases where
3065 # it is needed vlan at PT
3066 if not interface["vlan"]:
3067 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3068 network = self.neutron.show_network(port["network_id"])
3069
3070 if (
3071 network["network"].get("provider:network_type")
3072 == "vlan"
3073 ):
3074 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3075 interface["vlan"] = network["network"].get(
3076 "provider:segmentation_id"
3077 )
3078
3079 ips = []
3080 # look for floating ip address
3081 try:
3082 floating_ip_dict = self.neutron.list_floatingips(
3083 port_id=port["id"]
3084 )
3085
3086 if floating_ip_dict.get("floatingips"):
3087 ips.append(
3088 floating_ip_dict["floatingips"][0].get(
3089 "floating_ip_address"
3090 )
3091 )
3092 except Exception:
3093 pass
3094
3095 for subnet in port["fixed_ips"]:
3096 ips.append(subnet["ip_address"])
3097
3098 interface["ip_address"] = ";".join(ips)
3099 vm["interfaces"].append(interface)
3100 except Exception as e:
3101 self.logger.error(
3102 "Error getting vm interface information {}: {}".format(
3103 type(e).__name__, e
3104 ),
3105 exc_info=True,
3106 )
3107 except vimconn.VimConnNotFoundException as e:
3108 self.logger.error("Exception getting vm status: %s", str(e))
3109 vm["status"] = "DELETED"
3110 vm["error_msg"] = str(e)
3111 except vimconn.VimConnException as e:
3112 self.logger.error("Exception getting vm status: %s", str(e))
3113 vm["status"] = "VIM_ERROR"
3114 vm["error_msg"] = str(e)
3115
3116 vm_dict[vm_id] = vm
3117
3118 return vm_dict
3119
3120 def action_vminstance(self, vm_id, action_dict, created_items={}):
3121 """Send and action over a VM instance from VIM
3122 Returns None or the console dict if the action was successfully sent to the VIM
3123 """
3124 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3125
3126 try:
3127 self._reload_connection()
3128 server = self.nova.servers.find(id=vm_id)
3129
3130 if "start" in action_dict:
3131 if action_dict["start"] == "rebuild":
3132 server.rebuild()
3133 else:
3134 if server.status == "PAUSED":
3135 server.unpause()
3136 elif server.status == "SUSPENDED":
3137 server.resume()
3138 elif server.status == "SHUTOFF":
3139 server.start()
3140 else:
3141 self.logger.debug(
3142 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3143 )
3144 raise vimconn.VimConnException(
3145 "Cannot 'start' instance while it is in active state",
3146 http_code=vimconn.HTTP_Bad_Request,
3147 )
3148
3149 elif "pause" in action_dict:
3150 server.pause()
3151 elif "resume" in action_dict:
3152 server.resume()
3153 elif "shutoff" in action_dict or "shutdown" in action_dict:
3154 self.logger.debug("server status %s", server.status)
3155 if server.status == "ACTIVE":
3156 server.stop()
3157 else:
3158 self.logger.debug("ERROR: VM is not in Active state")
3159 raise vimconn.VimConnException(
3160 "VM is not in active state, stop operation is not allowed",
3161 http_code=vimconn.HTTP_Bad_Request,
3162 )
3163 elif "forceOff" in action_dict:
3164 server.stop() # TODO
3165 elif "terminate" in action_dict:
3166 server.delete()
3167 elif "createImage" in action_dict:
3168 server.create_image()
3169 # "path":path_schema,
3170 # "description":description_schema,
3171 # "name":name_schema,
3172 # "metadata":metadata_schema,
3173 # "imageRef": id_schema,
3174 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3175 elif "rebuild" in action_dict:
3176 server.rebuild(server.image["id"])
3177 elif "reboot" in action_dict:
3178 server.reboot() # reboot_type="SOFT"
3179 elif "console" in action_dict:
3180 console_type = action_dict["console"]
3181
3182 if console_type is None or console_type == "novnc":
3183 console_dict = server.get_vnc_console("novnc")
3184 elif console_type == "xvpvnc":
3185 console_dict = server.get_vnc_console(console_type)
3186 elif console_type == "rdp-html5":
3187 console_dict = server.get_rdp_console(console_type)
3188 elif console_type == "spice-html5":
3189 console_dict = server.get_spice_console(console_type)
3190 else:
3191 raise vimconn.VimConnException(
3192 "console type '{}' not allowed".format(console_type),
3193 http_code=vimconn.HTTP_Bad_Request,
3194 )
3195
3196 try:
3197 console_url = console_dict["console"]["url"]
3198 # parse console_url
3199 protocol_index = console_url.find("//")
3200 suffix_index = (
3201 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3202 )
3203 port_index = (
3204 console_url[protocol_index + 2 : suffix_index].find(":")
3205 + protocol_index
3206 + 2
3207 )
3208
3209 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3210 raise vimconn.VimConnException(
3211 "Unexpected response from VIM " + str(console_dict)
3212 )
3213
3214 console_dict2 = {
3215 "protocol": console_url[0:protocol_index],
3216 "server": console_url[protocol_index + 2 : port_index],
3217 "port": int(console_url[port_index + 1 : suffix_index]),
3218 "suffix": console_url[suffix_index + 1 :],
3219 }
3220
3221 return console_dict2
3222 except Exception:
3223 raise vimconn.VimConnException(
3224 "Unexpected response from VIM " + str(console_dict)
3225 )
3226
3227 return None
3228 except (
3229 ksExceptions.ClientException,
3230 nvExceptions.ClientException,
3231 nvExceptions.NotFound,
3232 ConnectionError,
3233 ) as e:
3234 self._format_exception(e)
3235 # TODO insert exception vimconn.HTTP_Unauthorized
3236
3237 # ###### VIO Specific Changes #########
3238 def _generate_vlanID(self):
3239 """
3240 Method to get unused vlanID
3241 Args:
3242 None
3243 Returns:
3244 vlanID
3245 """
3246 # Get used VLAN IDs
3247 usedVlanIDs = []
3248 networks = self.get_network_list()
3249
3250 for net in networks:
3251 if net.get("provider:segmentation_id"):
3252 usedVlanIDs.append(net.get("provider:segmentation_id"))
3253
3254 used_vlanIDs = set(usedVlanIDs)
3255
3256 # find unused VLAN ID
3257 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3258 try:
3259 start_vlanid, end_vlanid = map(
3260 int, vlanID_range.replace(" ", "").split("-")
3261 )
3262
3263 for vlanID in range(start_vlanid, end_vlanid + 1):
3264 if vlanID not in used_vlanIDs:
3265 return vlanID
3266 except Exception as exp:
3267 raise vimconn.VimConnException(
3268 "Exception {} occurred while generating VLAN ID.".format(exp)
3269 )
3270 else:
3271 raise vimconn.VimConnConflictException(
3272 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3273 self.config.get("dataplane_net_vlan_range")
3274 )
3275 )
3276
3277 def _generate_multisegment_vlanID(self):
3278 """
3279 Method to get unused vlanID
3280 Args:
3281 None
3282 Returns:
3283 vlanID
3284 """
3285 # Get used VLAN IDs
3286 usedVlanIDs = []
3287 networks = self.get_network_list()
3288 for net in networks:
3289 if net.get("provider:network_type") == "vlan" and net.get(
3290 "provider:segmentation_id"
3291 ):
3292 usedVlanIDs.append(net.get("provider:segmentation_id"))
3293 elif net.get("segments"):
3294 for segment in net.get("segments"):
3295 if segment.get("provider:network_type") == "vlan" and segment.get(
3296 "provider:segmentation_id"
3297 ):
3298 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3299
3300 used_vlanIDs = set(usedVlanIDs)
3301
3302 # find unused VLAN ID
3303 for vlanID_range in self.config.get("multisegment_vlan_range"):
3304 try:
3305 start_vlanid, end_vlanid = map(
3306 int, vlanID_range.replace(" ", "").split("-")
3307 )
3308
3309 for vlanID in range(start_vlanid, end_vlanid + 1):
3310 if vlanID not in used_vlanIDs:
3311 return vlanID
3312 except Exception as exp:
3313 raise vimconn.VimConnException(
3314 "Exception {} occurred while generating VLAN ID.".format(exp)
3315 )
3316 else:
3317 raise vimconn.VimConnConflictException(
3318 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3319 self.config.get("multisegment_vlan_range")
3320 )
3321 )
3322
3323 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3324 """
3325 Method to validate user given vlanID ranges
3326 Args: None
3327 Returns: None
3328 """
3329 for vlanID_range in input_vlan_range:
3330 vlan_range = vlanID_range.replace(" ", "")
3331 # validate format
3332 vlanID_pattern = r"(\d)*-(\d)*$"
3333 match_obj = re.match(vlanID_pattern, vlan_range)
3334 if not match_obj:
3335 raise vimconn.VimConnConflictException(
3336 "Invalid VLAN range for {}: {}.You must provide "
3337 "'{}' in format [start_ID - end_ID].".format(
3338 text_vlan_range, vlanID_range, text_vlan_range
3339 )
3340 )
3341
3342 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3343 if start_vlanid <= 0:
3344 raise vimconn.VimConnConflictException(
3345 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3346 "networks valid IDs are 1 to 4094 ".format(
3347 text_vlan_range, vlanID_range
3348 )
3349 )
3350
3351 if end_vlanid > 4094:
3352 raise vimconn.VimConnConflictException(
3353 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3354 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3355 text_vlan_range, vlanID_range
3356 )
3357 )
3358
3359 if start_vlanid > end_vlanid:
3360 raise vimconn.VimConnConflictException(
3361 "Invalid VLAN range for {}: {}. You must provide '{}'"
3362 " in format start_ID - end_ID and start_ID < end_ID ".format(
3363 text_vlan_range, vlanID_range, text_vlan_range
3364 )
3365 )
3366
3367 # NOT USED FUNCTIONS
3368
3369 def new_external_port(self, port_data):
3370 """Adds a external port to VIM
3371 Returns the port identifier"""
3372 # TODO openstack if needed
3373 return (
3374 -vimconn.HTTP_Internal_Server_Error,
3375 "osconnector.new_external_port() not implemented",
3376 )
3377
3378 def connect_port_network(self, port_id, network_id, admin=False):
3379 """Connects a external port to a network
3380 Returns status code of the VIM response"""
3381 # TODO openstack if needed
3382 return (
3383 -vimconn.HTTP_Internal_Server_Error,
3384 "osconnector.connect_port_network() not implemented",
3385 )
3386
3387 def new_user(self, user_name, user_passwd, tenant_id=None):
3388 """Adds a new user to openstack VIM
3389 Returns the user identifier"""
3390 self.logger.debug("osconnector: Adding a new user to VIM")
3391
3392 try:
3393 self._reload_connection()
3394 user = self.keystone.users.create(
3395 user_name, password=user_passwd, default_project=tenant_id
3396 )
3397 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3398
3399 return user.id
3400 except ksExceptions.ConnectionError as e:
3401 error_value = -vimconn.HTTP_Bad_Request
3402 error_text = (
3403 type(e).__name__
3404 + ": "
3405 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3406 )
3407 except ksExceptions.ClientException as e: # TODO remove
3408 error_value = -vimconn.HTTP_Bad_Request
3409 error_text = (
3410 type(e).__name__
3411 + ": "
3412 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3413 )
3414
3415 # TODO insert exception vimconn.HTTP_Unauthorized
3416 # if reaching here is because an exception
3417 self.logger.debug("new_user " + error_text)
3418
3419 return error_value, error_text
3420
3421 def delete_user(self, user_id):
3422 """Delete a user from openstack VIM
3423 Returns the user identifier"""
3424 if self.debug:
3425 print("osconnector: Deleting a user from VIM")
3426
3427 try:
3428 self._reload_connection()
3429 self.keystone.users.delete(user_id)
3430
3431 return 1, user_id
3432 except ksExceptions.ConnectionError as e:
3433 error_value = -vimconn.HTTP_Bad_Request
3434 error_text = (
3435 type(e).__name__
3436 + ": "
3437 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3438 )
3439 except ksExceptions.NotFound as e:
3440 error_value = -vimconn.HTTP_Not_Found
3441 error_text = (
3442 type(e).__name__
3443 + ": "
3444 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3445 )
3446 except ksExceptions.ClientException as e: # TODO remove
3447 error_value = -vimconn.HTTP_Bad_Request
3448 error_text = (
3449 type(e).__name__
3450 + ": "
3451 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3452 )
3453
3454 # TODO insert exception vimconn.HTTP_Unauthorized
3455 # if reaching here is because an exception
3456 self.logger.debug("delete_tenant " + error_text)
3457
3458 return error_value, error_text
3459
3460 def get_hosts_info(self):
3461 """Get the information of deployed hosts
3462 Returns the hosts content"""
3463 if self.debug:
3464 print("osconnector: Getting Host info from VIM")
3465
3466 try:
3467 h_list = []
3468 self._reload_connection()
3469 hypervisors = self.nova.hypervisors.list()
3470
3471 for hype in hypervisors:
3472 h_list.append(hype.to_dict())
3473
3474 return 1, {"hosts": h_list}
3475 except nvExceptions.NotFound as e:
3476 error_value = -vimconn.HTTP_Not_Found
3477 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3478 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3479 error_value = -vimconn.HTTP_Bad_Request
3480 error_text = (
3481 type(e).__name__
3482 + ": "
3483 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3484 )
3485
3486 # TODO insert exception vimconn.HTTP_Unauthorized
3487 # if reaching here is because an exception
3488 self.logger.debug("get_hosts_info " + error_text)
3489
3490 return error_value, error_text
3491
3492 def get_hosts(self, vim_tenant):
3493 """Get the hosts and deployed instances
3494 Returns the hosts content"""
3495 r, hype_dict = self.get_hosts_info()
3496
3497 if r < 0:
3498 return r, hype_dict
3499
3500 hypervisors = hype_dict["hosts"]
3501
3502 try:
3503 servers = self.nova.servers.list()
3504 for hype in hypervisors:
3505 for server in servers:
3506 if (
3507 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3508 == hype["hypervisor_hostname"]
3509 ):
3510 if "vm" in hype:
3511 hype["vm"].append(server.id)
3512 else:
3513 hype["vm"] = [server.id]
3514
3515 return 1, hype_dict
3516 except nvExceptions.NotFound as e:
3517 error_value = -vimconn.HTTP_Not_Found
3518 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3519 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3520 error_value = -vimconn.HTTP_Bad_Request
3521 error_text = (
3522 type(e).__name__
3523 + ": "
3524 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3525 )
3526
3527 # TODO insert exception vimconn.HTTP_Unauthorized
3528 # if reaching here is because an exception
3529 self.logger.debug("get_hosts " + error_text)
3530
3531 return error_value, error_text
3532
3533 def new_classification(self, name, ctype, definition):
3534 self.logger.debug(
3535 "Adding a new (Traffic) Classification to VIM, named %s", name
3536 )
3537
3538 try:
3539 new_class = None
3540 self._reload_connection()
3541
3542 if ctype not in supportedClassificationTypes:
3543 raise vimconn.VimConnNotSupportedException(
3544 "OpenStack VIM connector does not support provided "
3545 "Classification Type {}, supported ones are: {}".format(
3546 ctype, supportedClassificationTypes
3547 )
3548 )
3549
3550 if not self._validate_classification(ctype, definition):
3551 raise vimconn.VimConnException(
3552 "Incorrect Classification definition for the type specified."
3553 )
3554
3555 classification_dict = definition
3556 classification_dict["name"] = name
3557 new_class = self.neutron.create_sfc_flow_classifier(
3558 {"flow_classifier": classification_dict}
3559 )
3560
3561 return new_class["flow_classifier"]["id"]
3562 except (
3563 neExceptions.ConnectionFailed,
3564 ksExceptions.ClientException,
3565 neExceptions.NeutronException,
3566 ConnectionError,
3567 ) as e:
3568 self.logger.error("Creation of Classification failed.")
3569 self._format_exception(e)
3570
3571 def get_classification(self, class_id):
3572 self.logger.debug(" Getting Classification %s from VIM", class_id)
3573 filter_dict = {"id": class_id}
3574 class_list = self.get_classification_list(filter_dict)
3575
3576 if len(class_list) == 0:
3577 raise vimconn.VimConnNotFoundException(
3578 "Classification '{}' not found".format(class_id)
3579 )
3580 elif len(class_list) > 1:
3581 raise vimconn.VimConnConflictException(
3582 "Found more than one Classification with this criteria"
3583 )
3584
3585 classification = class_list[0]
3586
3587 return classification
3588
3589 def get_classification_list(self, filter_dict={}):
3590 self.logger.debug(
3591 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3592 )
3593
3594 try:
3595 filter_dict_os = filter_dict.copy()
3596 self._reload_connection()
3597
3598 if self.api_version3 and "tenant_id" in filter_dict_os:
3599 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3600
3601 classification_dict = self.neutron.list_sfc_flow_classifiers(
3602 **filter_dict_os
3603 )
3604 classification_list = classification_dict["flow_classifiers"]
3605 self.__classification_os2mano(classification_list)
3606
3607 return classification_list
3608 except (
3609 neExceptions.ConnectionFailed,
3610 ksExceptions.ClientException,
3611 neExceptions.NeutronException,
3612 ConnectionError,
3613 ) as e:
3614 self._format_exception(e)
3615
3616 def delete_classification(self, class_id):
3617 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3618
3619 try:
3620 self._reload_connection()
3621 self.neutron.delete_sfc_flow_classifier(class_id)
3622
3623 return class_id
3624 except (
3625 neExceptions.ConnectionFailed,
3626 neExceptions.NeutronException,
3627 ksExceptions.ClientException,
3628 neExceptions.NeutronException,
3629 ConnectionError,
3630 ) as e:
3631 self._format_exception(e)
3632
3633 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3634 self.logger.debug(
3635 "Adding a new Service Function Instance to VIM, named '%s'", name
3636 )
3637
3638 try:
3639 new_sfi = None
3640 self._reload_connection()
3641 correlation = None
3642
3643 if sfc_encap:
3644 correlation = "nsh"
3645
3646 if len(ingress_ports) != 1:
3647 raise vimconn.VimConnNotSupportedException(
3648 "OpenStack VIM connector can only have 1 ingress port per SFI"
3649 )
3650
3651 if len(egress_ports) != 1:
3652 raise vimconn.VimConnNotSupportedException(
3653 "OpenStack VIM connector can only have 1 egress port per SFI"
3654 )
3655
3656 sfi_dict = {
3657 "name": name,
3658 "ingress": ingress_ports[0],
3659 "egress": egress_ports[0],
3660 "service_function_parameters": {"correlation": correlation},
3661 }
3662 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3663
3664 return new_sfi["port_pair"]["id"]
3665 except (
3666 neExceptions.ConnectionFailed,
3667 ksExceptions.ClientException,
3668 neExceptions.NeutronException,
3669 ConnectionError,
3670 ) as e:
3671 if new_sfi:
3672 try:
3673 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3674 except Exception:
3675 self.logger.error(
3676 "Creation of Service Function Instance failed, with "
3677 "subsequent deletion failure as well."
3678 )
3679
3680 self._format_exception(e)
3681
3682 def get_sfi(self, sfi_id):
3683 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3684 filter_dict = {"id": sfi_id}
3685 sfi_list = self.get_sfi_list(filter_dict)
3686
3687 if len(sfi_list) == 0:
3688 raise vimconn.VimConnNotFoundException(
3689 "Service Function Instance '{}' not found".format(sfi_id)
3690 )
3691 elif len(sfi_list) > 1:
3692 raise vimconn.VimConnConflictException(
3693 "Found more than one Service Function Instance with this criteria"
3694 )
3695
3696 sfi = sfi_list[0]
3697
3698 return sfi
3699
3700 def get_sfi_list(self, filter_dict={}):
3701 self.logger.debug(
3702 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3703 )
3704
3705 try:
3706 self._reload_connection()
3707 filter_dict_os = filter_dict.copy()
3708
3709 if self.api_version3 and "tenant_id" in filter_dict_os:
3710 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3711
3712 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3713 sfi_list = sfi_dict["port_pairs"]
3714 self.__sfi_os2mano(sfi_list)
3715
3716 return sfi_list
3717 except (
3718 neExceptions.ConnectionFailed,
3719 ksExceptions.ClientException,
3720 neExceptions.NeutronException,
3721 ConnectionError,
3722 ) as e:
3723 self._format_exception(e)
3724
3725 def delete_sfi(self, sfi_id):
3726 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3727
3728 try:
3729 self._reload_connection()
3730 self.neutron.delete_sfc_port_pair(sfi_id)
3731
3732 return sfi_id
3733 except (
3734 neExceptions.ConnectionFailed,
3735 neExceptions.NeutronException,
3736 ksExceptions.ClientException,
3737 neExceptions.NeutronException,
3738 ConnectionError,
3739 ) as e:
3740 self._format_exception(e)
3741
3742 def new_sf(self, name, sfis, sfc_encap=True):
3743 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3744
3745 try:
3746 new_sf = None
3747 self._reload_connection()
3748 # correlation = None
3749 # if sfc_encap:
3750 # correlation = "nsh"
3751
3752 for instance in sfis:
3753 sfi = self.get_sfi(instance)
3754
3755 if sfi.get("sfc_encap") != sfc_encap:
3756 raise vimconn.VimConnNotSupportedException(
3757 "OpenStack VIM connector requires all SFIs of the "
3758 "same SF to share the same SFC Encapsulation"
3759 )
3760
3761 sf_dict = {"name": name, "port_pairs": sfis}
3762 new_sf = self.neutron.create_sfc_port_pair_group(
3763 {"port_pair_group": sf_dict}
3764 )
3765
3766 return new_sf["port_pair_group"]["id"]
3767 except (
3768 neExceptions.ConnectionFailed,
3769 ksExceptions.ClientException,
3770 neExceptions.NeutronException,
3771 ConnectionError,
3772 ) as e:
3773 if new_sf:
3774 try:
3775 self.neutron.delete_sfc_port_pair_group(
3776 new_sf["port_pair_group"]["id"]
3777 )
3778 except Exception:
3779 self.logger.error(
3780 "Creation of Service Function failed, with "
3781 "subsequent deletion failure as well."
3782 )
3783
3784 self._format_exception(e)
3785
3786 def get_sf(self, sf_id):
3787 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3788 filter_dict = {"id": sf_id}
3789 sf_list = self.get_sf_list(filter_dict)
3790
3791 if len(sf_list) == 0:
3792 raise vimconn.VimConnNotFoundException(
3793 "Service Function '{}' not found".format(sf_id)
3794 )
3795 elif len(sf_list) > 1:
3796 raise vimconn.VimConnConflictException(
3797 "Found more than one Service Function with this criteria"
3798 )
3799
3800 sf = sf_list[0]
3801
3802 return sf
3803
3804 def get_sf_list(self, filter_dict={}):
3805 self.logger.debug(
3806 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3807 )
3808
3809 try:
3810 self._reload_connection()
3811 filter_dict_os = filter_dict.copy()
3812
3813 if self.api_version3 and "tenant_id" in filter_dict_os:
3814 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3815
3816 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3817 sf_list = sf_dict["port_pair_groups"]
3818 self.__sf_os2mano(sf_list)
3819
3820 return sf_list
3821 except (
3822 neExceptions.ConnectionFailed,
3823 ksExceptions.ClientException,
3824 neExceptions.NeutronException,
3825 ConnectionError,
3826 ) as e:
3827 self._format_exception(e)
3828
3829 def delete_sf(self, sf_id):
3830 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3831
3832 try:
3833 self._reload_connection()
3834 self.neutron.delete_sfc_port_pair_group(sf_id)
3835
3836 return sf_id
3837 except (
3838 neExceptions.ConnectionFailed,
3839 neExceptions.NeutronException,
3840 ksExceptions.ClientException,
3841 neExceptions.NeutronException,
3842 ConnectionError,
3843 ) as e:
3844 self._format_exception(e)
3845
3846 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3847 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3848
3849 try:
3850 new_sfp = None
3851 self._reload_connection()
3852 # In networking-sfc the MPLS encapsulation is legacy
3853 # should be used when no full SFC Encapsulation is intended
3854 correlation = "mpls"
3855
3856 if sfc_encap:
3857 correlation = "nsh"
3858
3859 sfp_dict = {
3860 "name": name,
3861 "flow_classifiers": classifications,
3862 "port_pair_groups": sfs,
3863 "chain_parameters": {"correlation": correlation},
3864 }
3865
3866 if spi:
3867 sfp_dict["chain_id"] = spi
3868
3869 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3870
3871 return new_sfp["port_chain"]["id"]
3872 except (
3873 neExceptions.ConnectionFailed,
3874 ksExceptions.ClientException,
3875 neExceptions.NeutronException,
3876 ConnectionError,
3877 ) as e:
3878 if new_sfp:
3879 try:
3880 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3881 except Exception:
3882 self.logger.error(
3883 "Creation of Service Function Path failed, with "
3884 "subsequent deletion failure as well."
3885 )
3886
3887 self._format_exception(e)
3888
3889 def get_sfp(self, sfp_id):
3890 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3891
3892 filter_dict = {"id": sfp_id}
3893 sfp_list = self.get_sfp_list(filter_dict)
3894
3895 if len(sfp_list) == 0:
3896 raise vimconn.VimConnNotFoundException(
3897 "Service Function Path '{}' not found".format(sfp_id)
3898 )
3899 elif len(sfp_list) > 1:
3900 raise vimconn.VimConnConflictException(
3901 "Found more than one Service Function Path with this criteria"
3902 )
3903
3904 sfp = sfp_list[0]
3905
3906 return sfp
3907
3908 def get_sfp_list(self, filter_dict={}):
3909 self.logger.debug(
3910 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3911 )
3912
3913 try:
3914 self._reload_connection()
3915 filter_dict_os = filter_dict.copy()
3916
3917 if self.api_version3 and "tenant_id" in filter_dict_os:
3918 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3919
3920 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3921 sfp_list = sfp_dict["port_chains"]
3922 self.__sfp_os2mano(sfp_list)
3923
3924 return sfp_list
3925 except (
3926 neExceptions.ConnectionFailed,
3927 ksExceptions.ClientException,
3928 neExceptions.NeutronException,
3929 ConnectionError,
3930 ) as e:
3931 self._format_exception(e)
3932
3933 def delete_sfp(self, sfp_id):
3934 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3935
3936 try:
3937 self._reload_connection()
3938 self.neutron.delete_sfc_port_chain(sfp_id)
3939
3940 return sfp_id
3941 except (
3942 neExceptions.ConnectionFailed,
3943 neExceptions.NeutronException,
3944 ksExceptions.ClientException,
3945 neExceptions.NeutronException,
3946 ConnectionError,
3947 ) as e:
3948 self._format_exception(e)
3949
3950 def refresh_sfps_status(self, sfp_list):
3951 """Get the status of the service function path
3952 Params: the list of sfp identifiers
3953 Returns a dictionary with:
3954 vm_id: #VIM id of this service function path
3955 status: #Mandatory. Text with one of:
3956 # DELETED (not found at vim)
3957 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3958 # OTHER (Vim reported other status not understood)
3959 # ERROR (VIM indicates an ERROR status)
3960 # ACTIVE,
3961 # CREATING (on building process)
3962 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3963 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3964 """
3965 sfp_dict = {}
3966 self.logger.debug(
3967 "refresh_sfps status: Getting tenant SFP information from VIM"
3968 )
3969
3970 for sfp_id in sfp_list:
3971 sfp = {}
3972
3973 try:
3974 sfp_vim = self.get_sfp(sfp_id)
3975
3976 if sfp_vim["spi"]:
3977 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3978 else:
3979 sfp["status"] = "OTHER"
3980 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3981
3982 sfp["vim_info"] = self.serialize(sfp_vim)
3983
3984 if sfp_vim.get("fault"):
3985 sfp["error_msg"] = str(sfp_vim["fault"])
3986 except vimconn.VimConnNotFoundException as e:
3987 self.logger.error("Exception getting sfp status: %s", str(e))
3988 sfp["status"] = "DELETED"
3989 sfp["error_msg"] = str(e)
3990 except vimconn.VimConnException as e:
3991 self.logger.error("Exception getting sfp status: %s", str(e))
3992 sfp["status"] = "VIM_ERROR"
3993 sfp["error_msg"] = str(e)
3994
3995 sfp_dict[sfp_id] = sfp
3996
3997 return sfp_dict
3998
3999 def refresh_sfis_status(self, sfi_list):
4000 """Get the status of the service function instances
4001 Params: the list of sfi identifiers
4002 Returns a dictionary with:
4003 vm_id: #VIM id of this service function instance
4004 status: #Mandatory. Text with one of:
4005 # DELETED (not found at vim)
4006 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4007 # OTHER (Vim reported other status not understood)
4008 # ERROR (VIM indicates an ERROR status)
4009 # ACTIVE,
4010 # CREATING (on building process)
4011 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4012 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4013 """
4014 sfi_dict = {}
4015 self.logger.debug(
4016 "refresh_sfis status: Getting tenant sfi information from VIM"
4017 )
4018
4019 for sfi_id in sfi_list:
4020 sfi = {}
4021
4022 try:
4023 sfi_vim = self.get_sfi(sfi_id)
4024
4025 if sfi_vim:
4026 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4027 else:
4028 sfi["status"] = "OTHER"
4029 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4030
4031 sfi["vim_info"] = self.serialize(sfi_vim)
4032
4033 if sfi_vim.get("fault"):
4034 sfi["error_msg"] = str(sfi_vim["fault"])
4035 except vimconn.VimConnNotFoundException as e:
4036 self.logger.error("Exception getting sfi status: %s", str(e))
4037 sfi["status"] = "DELETED"
4038 sfi["error_msg"] = str(e)
4039 except vimconn.VimConnException as e:
4040 self.logger.error("Exception getting sfi status: %s", str(e))
4041 sfi["status"] = "VIM_ERROR"
4042 sfi["error_msg"] = str(e)
4043
4044 sfi_dict[sfi_id] = sfi
4045
4046 return sfi_dict
4047
4048 def refresh_sfs_status(self, sf_list):
4049 """Get the status of the service functions
4050 Params: the list of sf identifiers
4051 Returns a dictionary with:
4052 vm_id: #VIM id of this service function
4053 status: #Mandatory. Text with one of:
4054 # DELETED (not found at vim)
4055 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4056 # OTHER (Vim reported other status not understood)
4057 # ERROR (VIM indicates an ERROR status)
4058 # ACTIVE,
4059 # CREATING (on building process)
4060 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4061 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4062 """
4063 sf_dict = {}
4064 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4065
4066 for sf_id in sf_list:
4067 sf = {}
4068
4069 try:
4070 sf_vim = self.get_sf(sf_id)
4071
4072 if sf_vim:
4073 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4074 else:
4075 sf["status"] = "OTHER"
4076 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4077
4078 sf["vim_info"] = self.serialize(sf_vim)
4079
4080 if sf_vim.get("fault"):
4081 sf["error_msg"] = str(sf_vim["fault"])
4082 except vimconn.VimConnNotFoundException as e:
4083 self.logger.error("Exception getting sf status: %s", str(e))
4084 sf["status"] = "DELETED"
4085 sf["error_msg"] = str(e)
4086 except vimconn.VimConnException as e:
4087 self.logger.error("Exception getting sf status: %s", str(e))
4088 sf["status"] = "VIM_ERROR"
4089 sf["error_msg"] = str(e)
4090
4091 sf_dict[sf_id] = sf
4092
4093 return sf_dict
4094
4095 def refresh_classifications_status(self, classification_list):
4096 """Get the status of the classifications
4097 Params: the list of classification identifiers
4098 Returns a dictionary with:
4099 vm_id: #VIM id of this classifier
4100 status: #Mandatory. Text with one of:
4101 # DELETED (not found at vim)
4102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4103 # OTHER (Vim reported other status not understood)
4104 # ERROR (VIM indicates an ERROR status)
4105 # ACTIVE,
4106 # CREATING (on building process)
4107 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4108 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4109 """
4110 classification_dict = {}
4111 self.logger.debug(
4112 "refresh_classifications status: Getting tenant classification information from VIM"
4113 )
4114
4115 for classification_id in classification_list:
4116 classification = {}
4117
4118 try:
4119 classification_vim = self.get_classification(classification_id)
4120
4121 if classification_vim:
4122 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4123 else:
4124 classification["status"] = "OTHER"
4125 classification["error_msg"] = (
4126 "VIM status reported " + classification["status"]
4127 )
4128
4129 classification["vim_info"] = self.serialize(classification_vim)
4130
4131 if classification_vim.get("fault"):
4132 classification["error_msg"] = str(classification_vim["fault"])
4133 except vimconn.VimConnNotFoundException as e:
4134 self.logger.error("Exception getting classification status: %s", str(e))
4135 classification["status"] = "DELETED"
4136 classification["error_msg"] = str(e)
4137 except vimconn.VimConnException as e:
4138 self.logger.error("Exception getting classification status: %s", str(e))
4139 classification["status"] = "VIM_ERROR"
4140 classification["error_msg"] = str(e)
4141
4142 classification_dict[classification_id] = classification
4143
4144 return classification_dict
4145
4146 def new_affinity_group(self, affinity_group_data):
4147 """Adds a server group to VIM
4148 affinity_group_data contains a dictionary with information, keys:
4149 name: name in VIM for the server group
4150 type: affinity or anti-affinity
4151 scope: Only nfvi-node allowed
4152 Returns the server group identifier"""
4153 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4154
4155 try:
4156 name = affinity_group_data["name"]
4157 policy = affinity_group_data["type"]
4158
4159 self._reload_connection()
4160 new_server_group = self.nova.server_groups.create(name, policy)
4161
4162 return new_server_group.id
4163 except (
4164 ksExceptions.ClientException,
4165 nvExceptions.ClientException,
4166 ConnectionError,
4167 KeyError,
4168 ) as e:
4169 self._format_exception(e)
4170
4171 def get_affinity_group(self, affinity_group_id):
4172 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4173 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4174 try:
4175 self._reload_connection()
4176 server_group = self.nova.server_groups.find(id=affinity_group_id)
4177
4178 return server_group.to_dict()
4179 except (
4180 nvExceptions.NotFound,
4181 nvExceptions.ClientException,
4182 ksExceptions.ClientException,
4183 ConnectionError,
4184 ) as e:
4185 self._format_exception(e)
4186
4187 def delete_affinity_group(self, affinity_group_id):
4188 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4189 self.logger.debug("Getting server group '%s'", affinity_group_id)
4190 try:
4191 self._reload_connection()
4192 self.nova.server_groups.delete(affinity_group_id)
4193
4194 return affinity_group_id
4195 except (
4196 nvExceptions.NotFound,
4197 ksExceptions.ClientException,
4198 nvExceptions.ClientException,
4199 ConnectionError,
4200 ) as e:
4201 self._format_exception(e)
4202
4203 def get_vdu_state(self, vm_id):
4204 """
4205 Getting the state of a vdu
4206 param:
4207 vm_id: ID of an instance
4208 """
4209 self.logger.debug("Getting the status of VM")
4210 self.logger.debug("VIM VM ID %s", vm_id)
4211 self._reload_connection()
4212 server = self.nova.servers.find(id=vm_id)
4213 server_dict = server.to_dict()
4214 vdu_data = [
4215 server_dict["status"],
4216 server_dict["flavor"]["id"],
4217 server_dict["OS-EXT-SRV-ATTR:host"],
4218 server_dict["OS-EXT-AZ:availability_zone"],
4219 ]
4220 self.logger.debug("vdu_data %s", vdu_data)
4221 return vdu_data
4222
4223 def check_compute_availability(self, host, server_flavor_details):
4224 self._reload_connection()
4225 hypervisor_search = self.nova.hypervisors.search(
4226 hypervisor_match=host, servers=True
4227 )
4228 for hypervisor in hypervisor_search:
4229 hypervisor_id = hypervisor.to_dict()["id"]
4230 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4231 hypervisor_dict = hypervisor_details.to_dict()
4232 hypervisor_temp = json.dumps(hypervisor_dict)
4233 hypervisor_json = json.loads(hypervisor_temp)
4234 resources_available = [
4235 hypervisor_json["free_ram_mb"],
4236 hypervisor_json["disk_available_least"],
4237 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4238 ]
4239 compute_available = all(
4240 x > y for x, y in zip(resources_available, server_flavor_details)
4241 )
4242 if compute_available:
4243 return host
4244
4245 def check_availability_zone(
4246 self, old_az, server_flavor_details, old_host, host=None
4247 ):
4248 self._reload_connection()
4249 az_check = {"zone_check": False, "compute_availability": None}
4250 aggregates_list = self.nova.aggregates.list()
4251 for aggregate in aggregates_list:
4252 aggregate_details = aggregate.to_dict()
4253 aggregate_temp = json.dumps(aggregate_details)
4254 aggregate_json = json.loads(aggregate_temp)
4255 if aggregate_json["availability_zone"] == old_az:
4256 hosts_list = aggregate_json["hosts"]
4257 if host is not None:
4258 if host in hosts_list:
4259 az_check["zone_check"] = True
4260 available_compute_id = self.check_compute_availability(
4261 host, server_flavor_details
4262 )
4263 if available_compute_id is not None:
4264 az_check["compute_availability"] = available_compute_id
4265 else:
4266 for check_host in hosts_list:
4267 if check_host != old_host:
4268 available_compute_id = self.check_compute_availability(
4269 check_host, server_flavor_details
4270 )
4271 if available_compute_id is not None:
4272 az_check["zone_check"] = True
4273 az_check["compute_availability"] = available_compute_id
4274 break
4275 else:
4276 az_check["zone_check"] = True
4277 return az_check
4278
4279 def migrate_instance(self, vm_id, compute_host=None):
4280 """
4281 Migrate a vdu
4282 param:
4283 vm_id: ID of an instance
4284 compute_host: Host to migrate the vdu to
4285 """
4286 self._reload_connection()
4287 vm_state = False
4288 instance_state = self.get_vdu_state(vm_id)
4289 server_flavor_id = instance_state[1]
4290 server_hypervisor_name = instance_state[2]
4291 server_availability_zone = instance_state[3]
4292 try:
4293 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4294 server_flavor_details = [
4295 server_flavor["ram"],
4296 server_flavor["disk"],
4297 server_flavor["vcpus"],
4298 ]
4299 if compute_host == server_hypervisor_name:
4300 raise vimconn.VimConnException(
4301 "Unable to migrate instance '{}' to the same host '{}'".format(
4302 vm_id, compute_host
4303 ),
4304 http_code=vimconn.HTTP_Bad_Request,
4305 )
4306 az_status = self.check_availability_zone(
4307 server_availability_zone,
4308 server_flavor_details,
4309 server_hypervisor_name,
4310 compute_host,
4311 )
4312 availability_zone_check = az_status["zone_check"]
4313 available_compute_id = az_status.get("compute_availability")
4314
4315 if availability_zone_check is False:
4316 raise vimconn.VimConnException(
4317 "Unable to migrate instance '{}' to a different availability zone".format(
4318 vm_id
4319 ),
4320 http_code=vimconn.HTTP_Bad_Request,
4321 )
4322 if available_compute_id is not None:
4323 self.nova.servers.live_migrate(
4324 server=vm_id,
4325 host=available_compute_id,
4326 block_migration=True,
4327 disk_over_commit=False,
4328 )
4329 state = "MIGRATING"
4330 changed_compute_host = ""
4331 if state == "MIGRATING":
4332 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4333 changed_compute_host = self.get_vdu_state(vm_id)[2]
4334 if vm_state and changed_compute_host == available_compute_id:
4335 self.logger.debug(
4336 "Instance '{}' migrated to the new compute host '{}'".format(
4337 vm_id, changed_compute_host
4338 )
4339 )
4340 return state, available_compute_id
4341 else:
4342 raise vimconn.VimConnException(
4343 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4344 vm_id, available_compute_id
4345 ),
4346 http_code=vimconn.HTTP_Bad_Request,
4347 )
4348 else:
4349 raise vimconn.VimConnException(
4350 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4351 available_compute_id
4352 ),
4353 http_code=vimconn.HTTP_Bad_Request,
4354 )
4355 except (
4356 nvExceptions.BadRequest,
4357 nvExceptions.ClientException,
4358 nvExceptions.NotFound,
4359 ) as e:
4360 self._format_exception(e)
4361
4362 def resize_instance(self, vm_id, new_flavor_id):
4363 """
4364 For resizing the vm based on the given
4365 flavor details
4366 param:
4367 vm_id : ID of an instance
4368 new_flavor_id : Flavor id to be resized
4369 Return the status of a resized instance
4370 """
4371 self._reload_connection()
4372 self.logger.debug("resize the flavor of an instance")
4373 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4374 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4375 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4376 try:
4377 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4378 if old_flavor_disk > new_flavor_disk:
4379 raise nvExceptions.BadRequest(
4380 400,
4381 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4382 )
4383 else:
4384 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4385 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4386 if vm_state:
4387 instance_resized_status = self.confirm_resize(vm_id)
4388 return instance_resized_status
4389 else:
4390 raise nvExceptions.BadRequest(
4391 409,
4392 message="Cannot 'resize' vm_state is in ERROR",
4393 )
4394
4395 else:
4396 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4397 raise nvExceptions.BadRequest(
4398 409,
4399 message="Cannot 'resize' instance while it is in vm_state resized",
4400 )
4401 except (
4402 nvExceptions.BadRequest,
4403 nvExceptions.ClientException,
4404 nvExceptions.NotFound,
4405 ) as e:
4406 self._format_exception(e)
4407
4408 def confirm_resize(self, vm_id):
4409 """
4410 Confirm the resize of an instance
4411 param:
4412 vm_id: ID of an instance
4413 """
4414 self._reload_connection()
4415 self.nova.servers.confirm_resize(server=vm_id)
4416 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4417 self.__wait_for_vm(vm_id, "ACTIVE")
4418 instance_status = self.get_vdu_state(vm_id)[0]
4419 return instance_status
4420
4421 def get_monitoring_data(self):
4422 try:
4423 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
4424 self._reload_connection()
4425 all_servers = self.nova.servers.list(detailed=True)
4426 all_ports = self.neutron.list_ports()
4427 return all_servers, all_ports
4428 except (
4429 vimconn.VimConnException,
4430 vimconn.VimConnNotFoundException,
4431 vimconn.VimConnConnectionException,
4432 ) as e:
4433 raise vimconn.VimConnException(
4434 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
4435 )