Fix bug 2216 to remove hardcoded numa affinity in VIO
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1357
1358 for numa in numas:
1359 if "id" in numa:
1360 node_id = numa["id"]
1361 # overwrite ram and vcpus
1362 # check if key "memory" is present in numa else use ram value at flavor
1363 self.process_numa_memory(numa, node_id, extra_specs)
1364 self.process_numa_vcpu(numa, node_id, extra_specs)
1365
1366 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1367 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1368
1369 if "paired-threads" in numa:
1370 threads = self.process_numa_paired_threads(numa, extra_specs)
1371 cpu_threads += threads
1372
1373 elif "cores" in numa:
1374 cores = self.process_numa_cores(numa, extra_specs)
1375 cpu_cores += cores
1376
1377 elif "threads" in numa:
1378 threads = self.process_numa_threads(numa, extra_specs)
1379 cpu_threads += threads
1380
1381 if cpu_cores:
1382 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1383 if cpu_threads:
1384 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1385
1386 @staticmethod
1387 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1388 """According to number of numa nodes, updates the extra_specs for VIO.
1389
1390 Args:
1391
1392 numa_nodes (int): List keeps the numa node numbers
1393 extra_specs (dict): Extra specs dict to be updated
1394
1395 """
1396 # If there are several numas, we do not define specific affinity.
1397 extra_specs["vmware:latency_sensitivity_level"] = "high"
1398
1399 def _change_flavor_name(
1400 self, name: str, name_suffix: int, flavor_data: dict
1401 ) -> str:
1402 """Change the flavor name if the name already exists.
1403
1404 Args:
1405 name (str): Flavor name to be checked
1406 name_suffix (int): Suffix to be appended to name
1407 flavor_data (dict): Flavor dict
1408
1409 Returns:
1410 name (str): New flavor name to be used
1411
1412 """
1413 # Get used names
1414 fl = self.nova.flavors.list()
1415 fl_names = [f.name for f in fl]
1416
1417 while name in fl_names:
1418 name_suffix += 1
1419 name = flavor_data["name"] + "-" + str(name_suffix)
1420
1421 return name
1422
1423 def _process_extended_config_of_flavor(
1424 self, extended: dict, extra_specs: dict
1425 ) -> None:
1426 """Process the extended dict to fill up extra_specs.
1427 Args:
1428
1429 extended (dict): Keeping the extra specification of flavor
1430 extra_specs (dict) Dict to be filled to be used during flavor creation
1431
1432 """
1433 quotas = {
1434 "cpu-quota": "cpu",
1435 "mem-quota": "memory",
1436 "vif-quota": "vif",
1437 "disk-io-quota": "disk_io",
1438 }
1439
1440 page_sizes = {
1441 "LARGE": "large",
1442 "SMALL": "small",
1443 "SIZE_2MB": "2MB",
1444 "SIZE_1GB": "1GB",
1445 "PREFER_LARGE": "any",
1446 }
1447
1448 policies = {
1449 "cpu-pinning-policy": "hw:cpu_policy",
1450 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1451 "mem-policy": "hw:numa_mempolicy",
1452 }
1453
1454 numas = extended.get("numas")
1455 if numas:
1456 self._process_numa_parameters_of_flavor(numas, extra_specs)
1457
1458 for quota, item in quotas.items():
1459 if quota in extended.keys():
1460 self.process_resource_quota(extended.get(quota), item, extra_specs)
1461
1462 # Set the mempage size as specified in the descriptor
1463 if extended.get("mempage-size"):
1464 if extended["mempage-size"] in page_sizes.keys():
1465 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1466 else:
1467 # Normally, validations in NBI should not allow to this condition.
1468 self.logger.debug(
1469 "Invalid mempage-size %s. Will be ignored",
1470 extended.get("mempage-size"),
1471 )
1472
1473 for policy, hw_policy in policies.items():
1474 if extended.get(policy):
1475 extra_specs[hw_policy] = extended[policy].lower()
1476
1477 @staticmethod
1478 def _get_flavor_details(flavor_data: dict) -> Tuple:
1479 """Returns the details of flavor
1480 Args:
1481 flavor_data (dict): Dictionary that includes required flavor details
1482
1483 Returns:
1484 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1485
1486 """
1487 return (
1488 flavor_data.get("ram", 64),
1489 flavor_data.get("vcpus", 1),
1490 {},
1491 flavor_data.get("extended"),
1492 )
1493
1494 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1495 """Adds a tenant flavor to openstack VIM.
1496 if change_name_if_used is True, it will change name in case of conflict,
1497 because it is not supported name repetition.
1498
1499 Args:
1500 flavor_data (dict): Flavor details to be processed
1501 change_name_if_used (bool): Change name in case of conflict
1502
1503 Returns:
1504 flavor_id (str): flavor identifier
1505
1506 """
1507 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1508 retry = 0
1509 max_retries = 3
1510 name_suffix = 0
1511
1512 try:
1513 name = flavor_data["name"]
1514 while retry < max_retries:
1515 retry += 1
1516 try:
1517 self._reload_connection()
1518
1519 if change_name_if_used:
1520 name = self._change_flavor_name(name, name_suffix, flavor_data)
1521
1522 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1523 flavor_data
1524 )
1525 if extended:
1526 self._process_extended_config_of_flavor(extended, extra_specs)
1527
1528 # Create flavor
1529
1530 new_flavor = self.nova.flavors.create(
1531 name=name,
1532 ram=ram,
1533 vcpus=vcpus,
1534 disk=flavor_data.get("disk", 0),
1535 ephemeral=flavor_data.get("ephemeral", 0),
1536 swap=flavor_data.get("swap", 0),
1537 is_public=flavor_data.get("is_public", True),
1538 )
1539
1540 # Add metadata
1541 if extra_specs:
1542 new_flavor.set_keys(extra_specs)
1543
1544 return new_flavor.id
1545
1546 except nvExceptions.Conflict as e:
1547 if change_name_if_used and retry < max_retries:
1548 continue
1549
1550 self._format_exception(e)
1551
1552 except (
1553 ksExceptions.ClientException,
1554 nvExceptions.ClientException,
1555 ConnectionError,
1556 KeyError,
1557 ) as e:
1558 self._format_exception(e)
1559
1560 def delete_flavor(self, flavor_id):
1561 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1562 try:
1563 self._reload_connection()
1564 self.nova.flavors.delete(flavor_id)
1565
1566 return flavor_id
1567 # except nvExceptions.BadRequest as e:
1568 except (
1569 nvExceptions.NotFound,
1570 ksExceptions.ClientException,
1571 nvExceptions.ClientException,
1572 ConnectionError,
1573 ) as e:
1574 self._format_exception(e)
1575
1576 def new_image(self, image_dict):
1577 """
1578 Adds a tenant image to VIM. imge_dict is a dictionary with:
1579 name: name
1580 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1581 location: path or URI
1582 public: "yes" or "no"
1583 metadata: metadata of the image
1584 Returns the image_id
1585 """
1586 retry = 0
1587 max_retries = 3
1588
1589 while retry < max_retries:
1590 retry += 1
1591 try:
1592 self._reload_connection()
1593
1594 # determine format http://docs.openstack.org/developer/glance/formats.html
1595 if "disk_format" in image_dict:
1596 disk_format = image_dict["disk_format"]
1597 else: # autodiscover based on extension
1598 if image_dict["location"].endswith(".qcow2"):
1599 disk_format = "qcow2"
1600 elif image_dict["location"].endswith(".vhd"):
1601 disk_format = "vhd"
1602 elif image_dict["location"].endswith(".vmdk"):
1603 disk_format = "vmdk"
1604 elif image_dict["location"].endswith(".vdi"):
1605 disk_format = "vdi"
1606 elif image_dict["location"].endswith(".iso"):
1607 disk_format = "iso"
1608 elif image_dict["location"].endswith(".aki"):
1609 disk_format = "aki"
1610 elif image_dict["location"].endswith(".ari"):
1611 disk_format = "ari"
1612 elif image_dict["location"].endswith(".ami"):
1613 disk_format = "ami"
1614 else:
1615 disk_format = "raw"
1616
1617 self.logger.debug(
1618 "new_image: '%s' loading from '%s'",
1619 image_dict["name"],
1620 image_dict["location"],
1621 )
1622 if self.vim_type == "VIO":
1623 container_format = "bare"
1624 if "container_format" in image_dict:
1625 container_format = image_dict["container_format"]
1626
1627 new_image = self.glance.images.create(
1628 name=image_dict["name"],
1629 container_format=container_format,
1630 disk_format=disk_format,
1631 )
1632 else:
1633 new_image = self.glance.images.create(name=image_dict["name"])
1634
1635 if image_dict["location"].startswith("http"):
1636 # TODO there is not a method to direct download. It must be downloaded locally with requests
1637 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1638 else: # local path
1639 with open(image_dict["location"]) as fimage:
1640 self.glance.images.upload(new_image.id, fimage)
1641 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1642 # image_dict.get("public","yes")=="yes",
1643 # container_format="bare", data=fimage, disk_format=disk_format)
1644
1645 metadata_to_load = image_dict.get("metadata")
1646
1647 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1648 # for openstack
1649 if self.vim_type == "VIO":
1650 metadata_to_load["upload_location"] = image_dict["location"]
1651 else:
1652 metadata_to_load["location"] = image_dict["location"]
1653
1654 self.glance.images.update(new_image.id, **metadata_to_load)
1655
1656 return new_image.id
1657 except (
1658 nvExceptions.Conflict,
1659 ksExceptions.ClientException,
1660 nvExceptions.ClientException,
1661 ) as e:
1662 self._format_exception(e)
1663 except (
1664 HTTPException,
1665 gl1Exceptions.HTTPException,
1666 gl1Exceptions.CommunicationError,
1667 ConnectionError,
1668 ) as e:
1669 if retry == max_retries:
1670 continue
1671
1672 self._format_exception(e)
1673 except IOError as e: # can not open the file
1674 raise vimconn.VimConnConnectionException(
1675 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1676 http_code=vimconn.HTTP_Bad_Request,
1677 )
1678
1679 def delete_image(self, image_id):
1680 """Deletes a tenant image from openstack VIM. Returns the old id"""
1681 try:
1682 self._reload_connection()
1683 self.glance.images.delete(image_id)
1684
1685 return image_id
1686 except (
1687 nvExceptions.NotFound,
1688 ksExceptions.ClientException,
1689 nvExceptions.ClientException,
1690 gl1Exceptions.CommunicationError,
1691 gl1Exceptions.HTTPNotFound,
1692 ConnectionError,
1693 ) as e: # TODO remove
1694 self._format_exception(e)
1695
1696 def get_image_id_from_path(self, path):
1697 """Get the image id from image path in the VIM database. Returns the image_id"""
1698 try:
1699 self._reload_connection()
1700 images = self.glance.images.list()
1701
1702 for image in images:
1703 if image.metadata.get("location") == path:
1704 return image.id
1705
1706 raise vimconn.VimConnNotFoundException(
1707 "image with location '{}' not found".format(path)
1708 )
1709 except (
1710 ksExceptions.ClientException,
1711 nvExceptions.ClientException,
1712 gl1Exceptions.CommunicationError,
1713 ConnectionError,
1714 ) as e:
1715 self._format_exception(e)
1716
1717 def get_image_list(self, filter_dict={}):
1718 """Obtain tenant images from VIM
1719 Filter_dict can be:
1720 id: image id
1721 name: image name
1722 checksum: image checksum
1723 Returns the image list of dictionaries:
1724 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1725 List can be empty
1726 """
1727 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1728
1729 try:
1730 self._reload_connection()
1731 # filter_dict_os = filter_dict.copy()
1732 # First we filter by the available filter fields: name, id. The others are removed.
1733 image_list = self.glance.images.list()
1734 filtered_list = []
1735
1736 for image in image_list:
1737 try:
1738 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1739 continue
1740
1741 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1742 continue
1743
1744 if (
1745 filter_dict.get("checksum")
1746 and image["checksum"] != filter_dict["checksum"]
1747 ):
1748 continue
1749
1750 filtered_list.append(image.copy())
1751 except gl1Exceptions.HTTPNotFound:
1752 pass
1753
1754 return filtered_list
1755 except (
1756 ksExceptions.ClientException,
1757 nvExceptions.ClientException,
1758 gl1Exceptions.CommunicationError,
1759 ConnectionError,
1760 ) as e:
1761 self._format_exception(e)
1762
1763 def __wait_for_vm(self, vm_id, status):
1764 """wait until vm is in the desired status and return True.
1765 If the VM gets in ERROR status, return false.
1766 If the timeout is reached generate an exception"""
1767 elapsed_time = 0
1768 while elapsed_time < server_timeout:
1769 vm_status = self.nova.servers.get(vm_id).status
1770
1771 if vm_status == status:
1772 return True
1773
1774 if vm_status == "ERROR":
1775 return False
1776
1777 time.sleep(5)
1778 elapsed_time += 5
1779
1780 # if we exceeded the timeout rollback
1781 if elapsed_time >= server_timeout:
1782 raise vimconn.VimConnException(
1783 "Timeout waiting for instance " + vm_id + " to get " + status,
1784 http_code=vimconn.HTTP_Request_Timeout,
1785 )
1786
1787 def _get_openstack_availablity_zones(self):
1788 """
1789 Get from openstack availability zones available
1790 :return:
1791 """
1792 try:
1793 openstack_availability_zone = self.nova.availability_zones.list()
1794 openstack_availability_zone = [
1795 str(zone.zoneName)
1796 for zone in openstack_availability_zone
1797 if zone.zoneName != "internal"
1798 ]
1799
1800 return openstack_availability_zone
1801 except Exception:
1802 return None
1803
1804 def _set_availablity_zones(self):
1805 """
1806 Set vim availablity zone
1807 :return:
1808 """
1809 if "availability_zone" in self.config:
1810 vim_availability_zones = self.config.get("availability_zone")
1811
1812 if isinstance(vim_availability_zones, str):
1813 self.availability_zone = [vim_availability_zones]
1814 elif isinstance(vim_availability_zones, list):
1815 self.availability_zone = vim_availability_zones
1816 else:
1817 self.availability_zone = self._get_openstack_availablity_zones()
1818
1819 def _get_vm_availability_zone(
1820 self, availability_zone_index, availability_zone_list
1821 ):
1822 """
1823 Return thge availability zone to be used by the created VM.
1824 :return: The VIM availability zone to be used or None
1825 """
1826 if availability_zone_index is None:
1827 if not self.config.get("availability_zone"):
1828 return None
1829 elif isinstance(self.config.get("availability_zone"), str):
1830 return self.config["availability_zone"]
1831 else:
1832 # TODO consider using a different parameter at config for default AV and AV list match
1833 return self.config["availability_zone"][0]
1834
1835 vim_availability_zones = self.availability_zone
1836 # check if VIM offer enough availability zones describe in the VNFD
1837 if vim_availability_zones and len(availability_zone_list) <= len(
1838 vim_availability_zones
1839 ):
1840 # check if all the names of NFV AV match VIM AV names
1841 match_by_index = False
1842 for av in availability_zone_list:
1843 if av not in vim_availability_zones:
1844 match_by_index = True
1845 break
1846
1847 if match_by_index:
1848 return vim_availability_zones[availability_zone_index]
1849 else:
1850 return availability_zone_list[availability_zone_index]
1851 else:
1852 raise vimconn.VimConnConflictException(
1853 "No enough availability zones at VIM for this deployment"
1854 )
1855
1856 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1857 """Fill up the security_groups in the port_dict.
1858
1859 Args:
1860 net (dict): Network details
1861 port_dict (dict): Port details
1862
1863 """
1864 if (
1865 self.config.get("security_groups")
1866 and net.get("port_security") is not False
1867 and not self.config.get("no_port_security_extension")
1868 ):
1869 if not self.security_groups_id:
1870 self._get_ids_from_name()
1871
1872 port_dict["security_groups"] = self.security_groups_id
1873
1874 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1875 """Fill up the network binding depending on network type in the port_dict.
1876
1877 Args:
1878 net (dict): Network details
1879 port_dict (dict): Port details
1880
1881 """
1882 if not net.get("type"):
1883 raise vimconn.VimConnException("Type is missing in the network details.")
1884
1885 if net["type"] == "virtual":
1886 pass
1887
1888 # For VF
1889 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1890 port_dict["binding:vnic_type"] = "direct"
1891
1892 # VIO specific Changes
1893 if self.vim_type == "VIO":
1894 # Need to create port with port_security_enabled = False and no-security-groups
1895 port_dict["port_security_enabled"] = False
1896 port_dict["provider_security_groups"] = []
1897 port_dict["security_groups"] = []
1898
1899 else:
1900 # For PT PCI-PASSTHROUGH
1901 port_dict["binding:vnic_type"] = "direct-physical"
1902
1903 @staticmethod
1904 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1905 """Set the "ip" parameter in net dictionary.
1906
1907 Args:
1908 new_port (dict): New created port
1909 net (dict): Network details
1910
1911 """
1912 fixed_ips = new_port["port"].get("fixed_ips")
1913
1914 if fixed_ips:
1915 net["ip"] = fixed_ips[0].get("ip_address")
1916 else:
1917 net["ip"] = None
1918
1919 @staticmethod
1920 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1921 """Fill up the mac_address and fixed_ips in port_dict.
1922
1923 Args:
1924 net (dict): Network details
1925 port_dict (dict): Port details
1926
1927 """
1928 if net.get("mac_address"):
1929 port_dict["mac_address"] = net["mac_address"]
1930
1931 if net.get("ip_address"):
1932 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1933 # TODO add "subnet_id": <subnet_id>
1934
1935 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1936 """Create new port using neutron.
1937
1938 Args:
1939 port_dict (dict): Port details
1940 created_items (dict): All created items
1941 net (dict): Network details
1942
1943 Returns:
1944 new_port (dict): New created port
1945
1946 """
1947 new_port = self.neutron.create_port({"port": port_dict})
1948 created_items["port:" + str(new_port["port"]["id"])] = True
1949 net["mac_adress"] = new_port["port"]["mac_address"]
1950 net["vim_id"] = new_port["port"]["id"]
1951
1952 return new_port
1953
1954 def _create_port(
1955 self, net: dict, name: str, created_items: dict
1956 ) -> Tuple[dict, dict]:
1957 """Create port using net details.
1958
1959 Args:
1960 net (dict): Network details
1961 name (str): Name to be used as network name if net dict does not include name
1962 created_items (dict): All created items
1963
1964 Returns:
1965 new_port, port New created port, port dictionary
1966
1967 """
1968
1969 port_dict = {
1970 "network_id": net["net_id"],
1971 "name": net.get("name"),
1972 "admin_state_up": True,
1973 }
1974
1975 if not port_dict["name"]:
1976 port_dict["name"] = name
1977
1978 self._prepare_port_dict_security_groups(net, port_dict)
1979
1980 self._prepare_port_dict_binding(net, port_dict)
1981
1982 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1983
1984 new_port = self._create_new_port(port_dict, created_items, net)
1985
1986 vimconnector._set_fixed_ip(new_port, net)
1987
1988 port = {"port-id": new_port["port"]["id"]}
1989
1990 if float(self.nova.api_version.get_string()) >= 2.32:
1991 port["tag"] = new_port["port"]["name"]
1992
1993 return new_port, port
1994
1995 def _prepare_network_for_vminstance(
1996 self,
1997 name: str,
1998 net_list: list,
1999 created_items: dict,
2000 net_list_vim: list,
2001 external_network: list,
2002 no_secured_ports: list,
2003 ) -> None:
2004 """Create port and fill up net dictionary for new VM instance creation.
2005
2006 Args:
2007 name (str): Name of network
2008 net_list (list): List of networks
2009 created_items (dict): All created items belongs to a VM
2010 net_list_vim (list): List of ports
2011 external_network (list): List of external-networks
2012 no_secured_ports (list): Port security disabled ports
2013 """
2014
2015 self._reload_connection()
2016
2017 for net in net_list:
2018 # Skip non-connected iface
2019 if not net.get("net_id"):
2020 continue
2021
2022 new_port, port = self._create_port(net, name, created_items)
2023
2024 net_list_vim.append(port)
2025
2026 if net.get("floating_ip", False):
2027 net["exit_on_floating_ip_error"] = True
2028 external_network.append(net)
2029
2030 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2031 net["exit_on_floating_ip_error"] = False
2032 external_network.append(net)
2033 net["floating_ip"] = self.config.get("use_floating_ip")
2034
2035 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2036 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2037 if net.get("port_security") is False and not self.config.get(
2038 "no_port_security_extension"
2039 ):
2040 no_secured_ports.append(
2041 (
2042 new_port["port"]["id"],
2043 net.get("port_security_disable_strategy"),
2044 )
2045 )
2046
2047 def _prepare_persistent_root_volumes(
2048 self,
2049 name: str,
2050 vm_av_zone: list,
2051 disk: dict,
2052 base_disk_index: int,
2053 block_device_mapping: dict,
2054 existing_vim_volumes: list,
2055 created_items: dict,
2056 ) -> Optional[str]:
2057 """Prepare persistent root volumes for new VM instance.
2058
2059 Args:
2060 name (str): Name of VM instance
2061 vm_av_zone (list): List of availability zones
2062 disk (dict): Disk details
2063 base_disk_index (int): Disk index
2064 block_device_mapping (dict): Block device details
2065 existing_vim_volumes (list): Existing disk details
2066 created_items (dict): All created items belongs to VM
2067
2068 Returns:
2069 boot_volume_id (str): ID of boot volume
2070
2071 """
2072 # Disk may include only vim_volume_id or only vim_id."
2073 # Use existing persistent root volume finding with volume_id or vim_id
2074 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2075
2076 if disk.get(key_id):
2077 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2078 existing_vim_volumes.append({"id": disk[key_id]})
2079
2080 else:
2081 # Create persistent root volume
2082 volume = self.cinder.volumes.create(
2083 size=disk["size"],
2084 name=name + "vd" + chr(base_disk_index),
2085 imageRef=disk["image_id"],
2086 # Make sure volume is in the same AZ as the VM to be attached to
2087 availability_zone=vm_av_zone,
2088 )
2089 boot_volume_id = volume.id
2090 self.update_block_device_mapping(
2091 volume=volume,
2092 block_device_mapping=block_device_mapping,
2093 base_disk_index=base_disk_index,
2094 disk=disk,
2095 created_items=created_items,
2096 )
2097
2098 return boot_volume_id
2099
2100 @staticmethod
2101 def update_block_device_mapping(
2102 volume: object,
2103 block_device_mapping: dict,
2104 base_disk_index: int,
2105 disk: dict,
2106 created_items: dict,
2107 ) -> None:
2108 """Add volume information to block device mapping dict.
2109 Args:
2110 volume (object): Created volume object
2111 block_device_mapping (dict): Block device details
2112 base_disk_index (int): Disk index
2113 disk (dict): Disk details
2114 created_items (dict): All created items belongs to VM
2115 """
2116 if not volume:
2117 raise vimconn.VimConnException("Volume is empty.")
2118
2119 if not hasattr(volume, "id"):
2120 raise vimconn.VimConnException(
2121 "Created volume is not valid, does not have id attribute."
2122 )
2123
2124 volume_txt = "volume:" + str(volume.id)
2125 if disk.get("keep"):
2126 volume_txt += ":keep"
2127 created_items[volume_txt] = True
2128 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2129
2130 def _prepare_non_root_persistent_volumes(
2131 self,
2132 name: str,
2133 disk: dict,
2134 vm_av_zone: list,
2135 block_device_mapping: dict,
2136 base_disk_index: int,
2137 existing_vim_volumes: list,
2138 created_items: dict,
2139 ) -> None:
2140 """Prepare persistent volumes for new VM instance.
2141
2142 Args:
2143 name (str): Name of VM instance
2144 disk (dict): Disk details
2145 vm_av_zone (list): List of availability zones
2146 block_device_mapping (dict): Block device details
2147 base_disk_index (int): Disk index
2148 existing_vim_volumes (list): Existing disk details
2149 created_items (dict): All created items belongs to VM
2150 """
2151 # Non-root persistent volumes
2152 # Disk may include only vim_volume_id or only vim_id."
2153 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2154
2155 if disk.get(key_id):
2156 # Use existing persistent volume
2157 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2158 existing_vim_volumes.append({"id": disk[key_id]})
2159
2160 else:
2161 # Create persistent volume
2162 volume = self.cinder.volumes.create(
2163 size=disk["size"],
2164 name=name + "vd" + chr(base_disk_index),
2165 # Make sure volume is in the same AZ as the VM to be attached to
2166 availability_zone=vm_av_zone,
2167 )
2168 self.update_block_device_mapping(
2169 volume=volume,
2170 block_device_mapping=block_device_mapping,
2171 base_disk_index=base_disk_index,
2172 disk=disk,
2173 created_items=created_items,
2174 )
2175
2176 def _wait_for_created_volumes_availability(
2177 self, elapsed_time: int, created_items: dict
2178 ) -> Optional[int]:
2179 """Wait till created volumes become available.
2180
2181 Args:
2182 elapsed_time (int): Passed time while waiting
2183 created_items (dict): All created items belongs to VM
2184
2185 Returns:
2186 elapsed_time (int): Time spent while waiting
2187
2188 """
2189
2190 while elapsed_time < volume_timeout:
2191 for created_item in created_items:
2192 v, volume_id = (
2193 created_item.split(":")[0],
2194 created_item.split(":")[1],
2195 )
2196 if v == "volume":
2197 if self.cinder.volumes.get(volume_id).status != "available":
2198 break
2199 else:
2200 # All ready: break from while
2201 break
2202
2203 time.sleep(5)
2204 elapsed_time += 5
2205
2206 return elapsed_time
2207
2208 def _wait_for_existing_volumes_availability(
2209 self, elapsed_time: int, existing_vim_volumes: list
2210 ) -> Optional[int]:
2211 """Wait till existing volumes become available.
2212
2213 Args:
2214 elapsed_time (int): Passed time while waiting
2215 existing_vim_volumes (list): Existing volume details
2216
2217 Returns:
2218 elapsed_time (int): Time spent while waiting
2219
2220 """
2221
2222 while elapsed_time < volume_timeout:
2223 for volume in existing_vim_volumes:
2224 if self.cinder.volumes.get(volume["id"]).status != "available":
2225 break
2226 else: # all ready: break from while
2227 break
2228
2229 time.sleep(5)
2230 elapsed_time += 5
2231
2232 return elapsed_time
2233
2234 def _prepare_disk_for_vminstance(
2235 self,
2236 name: str,
2237 existing_vim_volumes: list,
2238 created_items: dict,
2239 vm_av_zone: list,
2240 block_device_mapping: dict,
2241 disk_list: list = None,
2242 ) -> None:
2243 """Prepare all volumes for new VM instance.
2244
2245 Args:
2246 name (str): Name of Instance
2247 existing_vim_volumes (list): List of existing volumes
2248 created_items (dict): All created items belongs to VM
2249 vm_av_zone (list): VM availability zone
2250 block_device_mapping (dict): Block devices to be attached to VM
2251 disk_list (list): List of disks
2252
2253 """
2254 # Create additional volumes in case these are present in disk_list
2255 base_disk_index = ord("b")
2256 boot_volume_id = None
2257 elapsed_time = 0
2258
2259 for disk in disk_list:
2260 if "image_id" in disk:
2261 # Root persistent volume
2262 base_disk_index = ord("a")
2263 boot_volume_id = self._prepare_persistent_root_volumes(
2264 name=name,
2265 vm_av_zone=vm_av_zone,
2266 disk=disk,
2267 base_disk_index=base_disk_index,
2268 block_device_mapping=block_device_mapping,
2269 existing_vim_volumes=existing_vim_volumes,
2270 created_items=created_items,
2271 )
2272 else:
2273 # Non-root persistent volume
2274 self._prepare_non_root_persistent_volumes(
2275 name=name,
2276 disk=disk,
2277 vm_av_zone=vm_av_zone,
2278 block_device_mapping=block_device_mapping,
2279 base_disk_index=base_disk_index,
2280 existing_vim_volumes=existing_vim_volumes,
2281 created_items=created_items,
2282 )
2283 base_disk_index += 1
2284
2285 # Wait until created volumes are with status available
2286 elapsed_time = self._wait_for_created_volumes_availability(
2287 elapsed_time, created_items
2288 )
2289 # Wait until existing volumes in vim are with status available
2290 elapsed_time = self._wait_for_existing_volumes_availability(
2291 elapsed_time, existing_vim_volumes
2292 )
2293 # If we exceeded the timeout rollback
2294 if elapsed_time >= volume_timeout:
2295 raise vimconn.VimConnException(
2296 "Timeout creating volumes for instance " + name,
2297 http_code=vimconn.HTTP_Request_Timeout,
2298 )
2299 if boot_volume_id:
2300 self.cinder.volumes.set_bootable(boot_volume_id, True)
2301
2302 def _find_the_external_network_for_floating_ip(self):
2303 """Get the external network ip in order to create floating IP.
2304
2305 Returns:
2306 pool_id (str): External network pool ID
2307
2308 """
2309
2310 # Find the external network
2311 external_nets = list()
2312
2313 for net in self.neutron.list_networks()["networks"]:
2314 if net["router:external"]:
2315 external_nets.append(net)
2316
2317 if len(external_nets) == 0:
2318 raise vimconn.VimConnException(
2319 "Cannot create floating_ip automatically since "
2320 "no external network is present",
2321 http_code=vimconn.HTTP_Conflict,
2322 )
2323
2324 if len(external_nets) > 1:
2325 raise vimconn.VimConnException(
2326 "Cannot create floating_ip automatically since "
2327 "multiple external networks are present",
2328 http_code=vimconn.HTTP_Conflict,
2329 )
2330
2331 # Pool ID
2332 return external_nets[0].get("id")
2333
2334 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2335 """Trigger neutron to create a new floating IP using external network ID.
2336
2337 Args:
2338 param (dict): Input parameters to create a floating IP
2339 created_items (dict): All created items belongs to new VM instance
2340
2341 Raises:
2342
2343 VimConnException
2344 """
2345 try:
2346 self.logger.debug("Creating floating IP")
2347 new_floating_ip = self.neutron.create_floatingip(param)
2348 free_floating_ip = new_floating_ip["floatingip"]["id"]
2349 created_items["floating_ip:" + str(free_floating_ip)] = True
2350
2351 except Exception as e:
2352 raise vimconn.VimConnException(
2353 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2354 http_code=vimconn.HTTP_Conflict,
2355 )
2356
2357 def _create_floating_ip(
2358 self, floating_network: dict, server: object, created_items: dict
2359 ) -> None:
2360 """Get the available Pool ID and create a new floating IP.
2361
2362 Args:
2363 floating_network (dict): Dict including external network ID
2364 server (object): Server object
2365 created_items (dict): All created items belongs to new VM instance
2366
2367 """
2368
2369 # Pool_id is available
2370 if (
2371 isinstance(floating_network["floating_ip"], str)
2372 and floating_network["floating_ip"].lower() != "true"
2373 ):
2374 pool_id = floating_network["floating_ip"]
2375
2376 # Find the Pool_id
2377 else:
2378 pool_id = self._find_the_external_network_for_floating_ip()
2379
2380 param = {
2381 "floatingip": {
2382 "floating_network_id": pool_id,
2383 "tenant_id": server.tenant_id,
2384 }
2385 }
2386
2387 self._neutron_create_float_ip(param, created_items)
2388
2389 def _find_floating_ip(
2390 self,
2391 server: object,
2392 floating_ips: list,
2393 floating_network: dict,
2394 ) -> Optional[str]:
2395 """Find the available free floating IPs if there are.
2396
2397 Args:
2398 server (object): Server object
2399 floating_ips (list): List of floating IPs
2400 floating_network (dict): Details of floating network such as ID
2401
2402 Returns:
2403 free_floating_ip (str): Free floating ip address
2404
2405 """
2406 for fip in floating_ips:
2407 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2408 continue
2409
2410 if isinstance(floating_network["floating_ip"], str):
2411 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2412 continue
2413
2414 return fip["id"]
2415
2416 def _assign_floating_ip(
2417 self, free_floating_ip: str, floating_network: dict
2418 ) -> Dict:
2419 """Assign the free floating ip address to port.
2420
2421 Args:
2422 free_floating_ip (str): Floating IP to be assigned
2423 floating_network (dict): ID of floating network
2424
2425 Returns:
2426 fip (dict) (dict): Floating ip details
2427
2428 """
2429 # The vim_id key contains the neutron.port_id
2430 self.neutron.update_floatingip(
2431 free_floating_ip,
2432 {"floatingip": {"port_id": floating_network["vim_id"]}},
2433 )
2434 # For race condition ensure not re-assigned to other VM after 5 seconds
2435 time.sleep(5)
2436
2437 return self.neutron.show_floatingip(free_floating_ip)
2438
2439 def _get_free_floating_ip(
2440 self, server: object, floating_network: dict
2441 ) -> Optional[str]:
2442 """Get the free floating IP address.
2443
2444 Args:
2445 server (object): Server Object
2446 floating_network (dict): Floating network details
2447
2448 Returns:
2449 free_floating_ip (str): Free floating ip addr
2450
2451 """
2452
2453 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2454
2455 # Randomize
2456 random.shuffle(floating_ips)
2457
2458 return self._find_floating_ip(server, floating_ips, floating_network)
2459
2460 def _prepare_external_network_for_vminstance(
2461 self,
2462 external_network: list,
2463 server: object,
2464 created_items: dict,
2465 vm_start_time: float,
2466 ) -> None:
2467 """Assign floating IP address for VM instance.
2468
2469 Args:
2470 external_network (list): ID of External network
2471 server (object): Server Object
2472 created_items (dict): All created items belongs to new VM instance
2473 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2474
2475 Raises:
2476 VimConnException
2477
2478 """
2479 for floating_network in external_network:
2480 try:
2481 assigned = False
2482 floating_ip_retries = 3
2483 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2484 # several times
2485 while not assigned:
2486 free_floating_ip = self._get_free_floating_ip(
2487 server, floating_network
2488 )
2489
2490 if not free_floating_ip:
2491 self._create_floating_ip(
2492 floating_network, server, created_items
2493 )
2494
2495 try:
2496 # For race condition ensure not already assigned
2497 fip = self.neutron.show_floatingip(free_floating_ip)
2498
2499 if fip["floatingip"].get("port_id"):
2500 continue
2501
2502 # Assign floating ip
2503 fip = self._assign_floating_ip(
2504 free_floating_ip, floating_network
2505 )
2506
2507 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2508 self.logger.warning(
2509 "floating_ip {} re-assigned to other port".format(
2510 free_floating_ip
2511 )
2512 )
2513 continue
2514
2515 self.logger.debug(
2516 "Assigned floating_ip {} to VM {}".format(
2517 free_floating_ip, server.id
2518 )
2519 )
2520
2521 assigned = True
2522
2523 except Exception as e:
2524 # Openstack need some time after VM creation to assign an IP. So retry if fails
2525 vm_status = self.nova.servers.get(server.id).status
2526
2527 if vm_status not in ("ACTIVE", "ERROR"):
2528 if time.time() - vm_start_time < server_timeout:
2529 time.sleep(5)
2530 continue
2531 elif floating_ip_retries > 0:
2532 floating_ip_retries -= 1
2533 continue
2534
2535 raise vimconn.VimConnException(
2536 "Cannot create floating_ip: {} {}".format(
2537 type(e).__name__, e
2538 ),
2539 http_code=vimconn.HTTP_Conflict,
2540 )
2541
2542 except Exception as e:
2543 if not floating_network["exit_on_floating_ip_error"]:
2544 self.logger.error("Cannot create floating_ip. %s", str(e))
2545 continue
2546
2547 raise
2548
2549 def _update_port_security_for_vminstance(
2550 self,
2551 no_secured_ports: list,
2552 server: object,
2553 ) -> None:
2554 """Updates the port security according to no_secured_ports list.
2555
2556 Args:
2557 no_secured_ports (list): List of ports that security will be disabled
2558 server (object): Server Object
2559
2560 Raises:
2561 VimConnException
2562
2563 """
2564 # Wait until the VM is active and then disable the port-security
2565 if no_secured_ports:
2566 self.__wait_for_vm(server.id, "ACTIVE")
2567
2568 for port in no_secured_ports:
2569 port_update = {
2570 "port": {"port_security_enabled": False, "security_groups": None}
2571 }
2572
2573 if port[1] == "allow-address-pairs":
2574 port_update = {
2575 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2576 }
2577
2578 try:
2579 self.neutron.update_port(port[0], port_update)
2580
2581 except Exception:
2582 raise vimconn.VimConnException(
2583 "It was not possible to disable port security for port {}".format(
2584 port[0]
2585 )
2586 )
2587
2588 def new_vminstance(
2589 self,
2590 name: str,
2591 description: str,
2592 start: bool,
2593 image_id: str,
2594 flavor_id: str,
2595 affinity_group_list: list,
2596 net_list: list,
2597 cloud_config=None,
2598 disk_list=None,
2599 availability_zone_index=None,
2600 availability_zone_list=None,
2601 ) -> tuple:
2602 """Adds a VM instance to VIM.
2603
2604 Args:
2605 name (str): name of VM
2606 description (str): description
2607 start (bool): indicates if VM must start or boot in pause mode. Ignored
2608 image_id (str) image uuid
2609 flavor_id (str) flavor uuid
2610 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2611 net_list (list): list of interfaces, each one is a dictionary with:
2612 name: name of network
2613 net_id: network uuid to connect
2614 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2615 model: interface model, ignored #TODO
2616 mac_address: used for SR-IOV ifaces #TODO for other types
2617 use: 'data', 'bridge', 'mgmt'
2618 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2619 vim_id: filled/added by this function
2620 floating_ip: True/False (or it can be None)
2621 port_security: True/False
2622 cloud_config (dict): (optional) dictionary with:
2623 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2624 users: (optional) list of users to be inserted, each item is a dict with:
2625 name: (mandatory) user name,
2626 key-pairs: (optional) list of strings with the public key to be inserted to the user
2627 user-data: (optional) string is a text script to be passed directly to cloud-init
2628 config-files: (optional). List of files to be transferred. Each item is a dict with:
2629 dest: (mandatory) string with the destination absolute path
2630 encoding: (optional, by default text). Can be one of:
2631 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2632 content : (mandatory) string with the content of the file
2633 permissions: (optional) string with file permissions, typically octal notation '0644'
2634 owner: (optional) file owner, string with the format 'owner:group'
2635 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2636 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2637 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2638 size: (mandatory) string with the size of the disk in GB
2639 vim_id: (optional) should use this existing volume id
2640 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2641 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2642 availability_zone_index is None
2643 #TODO ip, security groups
2644
2645 Returns:
2646 A tuple with the instance identifier and created_items or raises an exception on error
2647 created_items can be None or a dictionary where this method can include key-values that will be passed to
2648 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2649 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2650 as not present.
2651
2652 """
2653 self.logger.debug(
2654 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2655 image_id,
2656 flavor_id,
2657 str(net_list),
2658 )
2659
2660 try:
2661 server = None
2662 created_items = {}
2663 net_list_vim = []
2664 # list of external networks to be connected to instance, later on used to create floating_ip
2665 external_network = []
2666 # List of ports with port-security disabled
2667 no_secured_ports = []
2668 block_device_mapping = {}
2669 existing_vim_volumes = []
2670 server_group_id = None
2671 scheduller_hints = {}
2672
2673 # Check the Openstack Connection
2674 self._reload_connection()
2675
2676 # Prepare network list
2677 self._prepare_network_for_vminstance(
2678 name=name,
2679 net_list=net_list,
2680 created_items=created_items,
2681 net_list_vim=net_list_vim,
2682 external_network=external_network,
2683 no_secured_ports=no_secured_ports,
2684 )
2685
2686 # Cloud config
2687 config_drive, userdata = self._create_user_data(cloud_config)
2688
2689 # Get availability Zone
2690 vm_av_zone = self._get_vm_availability_zone(
2691 availability_zone_index, availability_zone_list
2692 )
2693
2694 if disk_list:
2695 # Prepare disks
2696 self._prepare_disk_for_vminstance(
2697 name=name,
2698 existing_vim_volumes=existing_vim_volumes,
2699 created_items=created_items,
2700 vm_av_zone=vm_av_zone,
2701 block_device_mapping=block_device_mapping,
2702 disk_list=disk_list,
2703 )
2704
2705 if affinity_group_list:
2706 # Only first id on the list will be used. Openstack restriction
2707 server_group_id = affinity_group_list[0]["affinity_group_id"]
2708 scheduller_hints["group"] = server_group_id
2709
2710 self.logger.debug(
2711 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2712 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2713 "block_device_mapping={}, server_group={})".format(
2714 name,
2715 image_id,
2716 flavor_id,
2717 net_list_vim,
2718 self.config.get("security_groups"),
2719 vm_av_zone,
2720 self.config.get("keypair"),
2721 userdata,
2722 config_drive,
2723 block_device_mapping,
2724 server_group_id,
2725 )
2726 )
2727
2728 # Create VM
2729 server = self.nova.servers.create(
2730 name=name,
2731 image=image_id,
2732 flavor=flavor_id,
2733 nics=net_list_vim,
2734 security_groups=self.config.get("security_groups"),
2735 # TODO remove security_groups in future versions. Already at neutron port
2736 availability_zone=vm_av_zone,
2737 key_name=self.config.get("keypair"),
2738 userdata=userdata,
2739 config_drive=config_drive,
2740 block_device_mapping=block_device_mapping,
2741 scheduler_hints=scheduller_hints,
2742 )
2743
2744 vm_start_time = time.time()
2745
2746 self._update_port_security_for_vminstance(no_secured_ports, server)
2747
2748 self._prepare_external_network_for_vminstance(
2749 external_network=external_network,
2750 server=server,
2751 created_items=created_items,
2752 vm_start_time=vm_start_time,
2753 )
2754
2755 return server.id, created_items
2756
2757 except Exception as e:
2758 server_id = None
2759 if server:
2760 server_id = server.id
2761
2762 try:
2763 created_items = self.remove_keep_tag_from_persistent_volumes(
2764 created_items
2765 )
2766
2767 self.delete_vminstance(server_id, created_items)
2768
2769 except Exception as e2:
2770 self.logger.error("new_vminstance rollback fail {}".format(e2))
2771
2772 self._format_exception(e)
2773
2774 @staticmethod
2775 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2776 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2777
2778 Args:
2779 created_items (dict): All created items belongs to VM
2780
2781 Returns:
2782 updated_created_items (dict): Dict which does not include keep flag for volumes.
2783
2784 """
2785 return {
2786 key.replace(":keep", ""): value for (key, value) in created_items.items()
2787 }
2788
2789 def get_vminstance(self, vm_id):
2790 """Returns the VM instance information from VIM"""
2791 # self.logger.debug("Getting VM from VIM")
2792 try:
2793 self._reload_connection()
2794 server = self.nova.servers.find(id=vm_id)
2795 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2796
2797 return server.to_dict()
2798 except (
2799 ksExceptions.ClientException,
2800 nvExceptions.ClientException,
2801 nvExceptions.NotFound,
2802 ConnectionError,
2803 ) as e:
2804 self._format_exception(e)
2805
2806 def get_vminstance_console(self, vm_id, console_type="vnc"):
2807 """
2808 Get a console for the virtual machine
2809 Params:
2810 vm_id: uuid of the VM
2811 console_type, can be:
2812 "novnc" (by default), "xvpvnc" for VNC types,
2813 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2814 Returns dict with the console parameters:
2815 protocol: ssh, ftp, http, https, ...
2816 server: usually ip address
2817 port: the http, ssh, ... port
2818 suffix: extra text, e.g. the http path and query string
2819 """
2820 self.logger.debug("Getting VM CONSOLE from VIM")
2821
2822 try:
2823 self._reload_connection()
2824 server = self.nova.servers.find(id=vm_id)
2825
2826 if console_type is None or console_type == "novnc":
2827 console_dict = server.get_vnc_console("novnc")
2828 elif console_type == "xvpvnc":
2829 console_dict = server.get_vnc_console(console_type)
2830 elif console_type == "rdp-html5":
2831 console_dict = server.get_rdp_console(console_type)
2832 elif console_type == "spice-html5":
2833 console_dict = server.get_spice_console(console_type)
2834 else:
2835 raise vimconn.VimConnException(
2836 "console type '{}' not allowed".format(console_type),
2837 http_code=vimconn.HTTP_Bad_Request,
2838 )
2839
2840 console_dict1 = console_dict.get("console")
2841
2842 if console_dict1:
2843 console_url = console_dict1.get("url")
2844
2845 if console_url:
2846 # parse console_url
2847 protocol_index = console_url.find("//")
2848 suffix_index = (
2849 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2850 )
2851 port_index = (
2852 console_url[protocol_index + 2 : suffix_index].find(":")
2853 + protocol_index
2854 + 2
2855 )
2856
2857 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2858 return (
2859 -vimconn.HTTP_Internal_Server_Error,
2860 "Unexpected response from VIM",
2861 )
2862
2863 console_dict = {
2864 "protocol": console_url[0:protocol_index],
2865 "server": console_url[protocol_index + 2 : port_index],
2866 "port": console_url[port_index:suffix_index],
2867 "suffix": console_url[suffix_index + 1 :],
2868 }
2869 protocol_index += 2
2870
2871 return console_dict
2872 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2873 except (
2874 nvExceptions.NotFound,
2875 ksExceptions.ClientException,
2876 nvExceptions.ClientException,
2877 nvExceptions.BadRequest,
2878 ConnectionError,
2879 ) as e:
2880 self._format_exception(e)
2881
2882 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2883 """Neutron delete ports by id.
2884 Args:
2885 k_id (str): Port id in the VIM
2886 """
2887 try:
2888 port_dict = self.neutron.list_ports()
2889 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2890
2891 if k_id in existing_ports:
2892 self.neutron.delete_port(k_id)
2893
2894 except Exception as e:
2895 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2896
2897 def _delete_volumes_by_id_wth_cinder(
2898 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2899 ) -> bool:
2900 """Cinder delete volume by id.
2901 Args:
2902 k (str): Full item name in created_items
2903 k_id (str): ID of floating ip in VIM
2904 volumes_to_hold (list): Volumes not to delete
2905 created_items (dict): All created items belongs to VM
2906 """
2907 try:
2908 if k_id in volumes_to_hold:
2909 return
2910
2911 if self.cinder.volumes.get(k_id).status != "available":
2912 return True
2913
2914 else:
2915 self.cinder.volumes.delete(k_id)
2916 created_items[k] = None
2917
2918 except Exception as e:
2919 self.logger.error(
2920 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2921 )
2922
2923 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2924 """Neutron delete floating ip by id.
2925 Args:
2926 k (str): Full item name in created_items
2927 k_id (str): ID of floating ip in VIM
2928 created_items (dict): All created items belongs to VM
2929 """
2930 try:
2931 self.neutron.delete_floatingip(k_id)
2932 created_items[k] = None
2933
2934 except Exception as e:
2935 self.logger.error(
2936 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2937 )
2938
2939 @staticmethod
2940 def _get_item_name_id(k: str) -> Tuple[str, str]:
2941 k_item, _, k_id = k.partition(":")
2942 return k_item, k_id
2943
2944 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2945 """Delete VM ports attached to the networks before deleting virtual machine.
2946 Args:
2947 created_items (dict): All created items belongs to VM
2948 """
2949
2950 for k, v in created_items.items():
2951 if not v: # skip already deleted
2952 continue
2953
2954 try:
2955 k_item, k_id = self._get_item_name_id(k)
2956 if k_item == "port":
2957 self._delete_ports_by_id_wth_neutron(k_id)
2958
2959 except Exception as e:
2960 self.logger.error(
2961 "Error deleting port: {}: {}".format(type(e).__name__, e)
2962 )
2963
2964 def _delete_created_items(
2965 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2966 ) -> bool:
2967 """Delete Volumes and floating ip if they exist in created_items."""
2968 for k, v in created_items.items():
2969 if not v: # skip already deleted
2970 continue
2971
2972 try:
2973 k_item, k_id = self._get_item_name_id(k)
2974
2975 if k_item == "volume":
2976 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2977 k, k_id, volumes_to_hold, created_items
2978 )
2979
2980 if unavailable_vol:
2981 keep_waiting = True
2982
2983 elif k_item == "floating_ip":
2984 self._delete_floating_ip_by_id(k, k_id, created_items)
2985
2986 except Exception as e:
2987 self.logger.error("Error deleting {}: {}".format(k, e))
2988
2989 return keep_waiting
2990
2991 @staticmethod
2992 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
2993 """Remove the volumes which has key flag from created_items
2994
2995 Args:
2996 created_items (dict): All created items belongs to VM
2997
2998 Returns:
2999 created_items (dict): Persistent volumes eliminated created_items
3000 """
3001 return {
3002 key: value
3003 for (key, value) in created_items.items()
3004 if len(key.split(":")) == 2
3005 }
3006
3007 def delete_vminstance(
3008 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3009 ) -> None:
3010 """Removes a VM instance from VIM. Returns the old identifier.
3011 Args:
3012 vm_id (str): Identifier of VM instance
3013 created_items (dict): All created items belongs to VM
3014 volumes_to_hold (list): Volumes_to_hold
3015 """
3016 if created_items is None:
3017 created_items = {}
3018 if volumes_to_hold is None:
3019 volumes_to_hold = []
3020
3021 try:
3022 created_items = self._extract_items_wth_keep_flag_from_created_items(
3023 created_items
3024 )
3025
3026 self._reload_connection()
3027
3028 # Delete VM ports attached to the networks before the virtual machine
3029 if created_items:
3030 self._delete_vm_ports_attached_to_network(created_items)
3031
3032 if vm_id:
3033 self.nova.servers.delete(vm_id)
3034
3035 # Although having detached, volumes should have in active status before deleting.
3036 # We ensure in this loop
3037 keep_waiting = True
3038 elapsed_time = 0
3039
3040 while keep_waiting and elapsed_time < volume_timeout:
3041 keep_waiting = False
3042
3043 # Delete volumes and floating IP.
3044 keep_waiting = self._delete_created_items(
3045 created_items, volumes_to_hold, keep_waiting
3046 )
3047
3048 if keep_waiting:
3049 time.sleep(1)
3050 elapsed_time += 1
3051
3052 except (
3053 nvExceptions.NotFound,
3054 ksExceptions.ClientException,
3055 nvExceptions.ClientException,
3056 ConnectionError,
3057 ) as e:
3058 self._format_exception(e)
3059
3060 def refresh_vms_status(self, vm_list):
3061 """Get the status of the virtual machines and their interfaces/ports
3062 Params: the list of VM identifiers
3063 Returns a dictionary with:
3064 vm_id: #VIM id of this Virtual Machine
3065 status: #Mandatory. Text with one of:
3066 # DELETED (not found at vim)
3067 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3068 # OTHER (Vim reported other status not understood)
3069 # ERROR (VIM indicates an ERROR status)
3070 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3071 # CREATING (on building process), ERROR
3072 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3073 #
3074 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3075 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3076 interfaces:
3077 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3078 mac_address: #Text format XX:XX:XX:XX:XX:XX
3079 vim_net_id: #network id where this interface is connected
3080 vim_interface_id: #interface/port VIM id
3081 ip_address: #null, or text with IPv4, IPv6 address
3082 compute_node: #identification of compute node where PF,VF interface is allocated
3083 pci: #PCI address of the NIC that hosts the PF,VF
3084 vlan: #physical VLAN used for VF
3085 """
3086 vm_dict = {}
3087 self.logger.debug(
3088 "refresh_vms status: Getting tenant VM instance information from VIM"
3089 )
3090
3091 for vm_id in vm_list:
3092 vm = {}
3093
3094 try:
3095 vm_vim = self.get_vminstance(vm_id)
3096
3097 if vm_vim["status"] in vmStatus2manoFormat:
3098 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3099 else:
3100 vm["status"] = "OTHER"
3101 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3102
3103 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3104 vm_vim.pop("user_data", None)
3105 vm["vim_info"] = self.serialize(vm_vim)
3106
3107 vm["interfaces"] = []
3108 if vm_vim.get("fault"):
3109 vm["error_msg"] = str(vm_vim["fault"])
3110
3111 # get interfaces
3112 try:
3113 self._reload_connection()
3114 port_dict = self.neutron.list_ports(device_id=vm_id)
3115
3116 for port in port_dict["ports"]:
3117 interface = {}
3118 interface["vim_info"] = self.serialize(port)
3119 interface["mac_address"] = port.get("mac_address")
3120 interface["vim_net_id"] = port["network_id"]
3121 interface["vim_interface_id"] = port["id"]
3122 # check if OS-EXT-SRV-ATTR:host is there,
3123 # in case of non-admin credentials, it will be missing
3124
3125 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3126 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3127
3128 interface["pci"] = None
3129
3130 # check if binding:profile is there,
3131 # in case of non-admin credentials, it will be missing
3132 if port.get("binding:profile"):
3133 if port["binding:profile"].get("pci_slot"):
3134 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3135 # the slot to 0x00
3136 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3137 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3138 pci = port["binding:profile"]["pci_slot"]
3139 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3140 interface["pci"] = pci
3141
3142 interface["vlan"] = None
3143
3144 if port.get("binding:vif_details"):
3145 interface["vlan"] = port["binding:vif_details"].get("vlan")
3146
3147 # Get vlan from network in case not present in port for those old openstacks and cases where
3148 # it is needed vlan at PT
3149 if not interface["vlan"]:
3150 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3151 network = self.neutron.show_network(port["network_id"])
3152
3153 if (
3154 network["network"].get("provider:network_type")
3155 == "vlan"
3156 ):
3157 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3158 interface["vlan"] = network["network"].get(
3159 "provider:segmentation_id"
3160 )
3161
3162 ips = []
3163 # look for floating ip address
3164 try:
3165 floating_ip_dict = self.neutron.list_floatingips(
3166 port_id=port["id"]
3167 )
3168
3169 if floating_ip_dict.get("floatingips"):
3170 ips.append(
3171 floating_ip_dict["floatingips"][0].get(
3172 "floating_ip_address"
3173 )
3174 )
3175 except Exception:
3176 pass
3177
3178 for subnet in port["fixed_ips"]:
3179 ips.append(subnet["ip_address"])
3180
3181 interface["ip_address"] = ";".join(ips)
3182 vm["interfaces"].append(interface)
3183 except Exception as e:
3184 self.logger.error(
3185 "Error getting vm interface information {}: {}".format(
3186 type(e).__name__, e
3187 ),
3188 exc_info=True,
3189 )
3190 except vimconn.VimConnNotFoundException as e:
3191 self.logger.error("Exception getting vm status: %s", str(e))
3192 vm["status"] = "DELETED"
3193 vm["error_msg"] = str(e)
3194 except vimconn.VimConnException as e:
3195 self.logger.error("Exception getting vm status: %s", str(e))
3196 vm["status"] = "VIM_ERROR"
3197 vm["error_msg"] = str(e)
3198
3199 vm_dict[vm_id] = vm
3200
3201 return vm_dict
3202
3203 def action_vminstance(self, vm_id, action_dict, created_items={}):
3204 """Send and action over a VM instance from VIM
3205 Returns None or the console dict if the action was successfully sent to the VIM
3206 """
3207 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3208
3209 try:
3210 self._reload_connection()
3211 server = self.nova.servers.find(id=vm_id)
3212
3213 if "start" in action_dict:
3214 if action_dict["start"] == "rebuild":
3215 server.rebuild()
3216 else:
3217 if server.status == "PAUSED":
3218 server.unpause()
3219 elif server.status == "SUSPENDED":
3220 server.resume()
3221 elif server.status == "SHUTOFF":
3222 server.start()
3223 else:
3224 self.logger.debug(
3225 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3226 )
3227 raise vimconn.VimConnException(
3228 "Cannot 'start' instance while it is in active state",
3229 http_code=vimconn.HTTP_Bad_Request,
3230 )
3231
3232 elif "pause" in action_dict:
3233 server.pause()
3234 elif "resume" in action_dict:
3235 server.resume()
3236 elif "shutoff" in action_dict or "shutdown" in action_dict:
3237 self.logger.debug("server status %s", server.status)
3238 if server.status == "ACTIVE":
3239 server.stop()
3240 else:
3241 self.logger.debug("ERROR: VM is not in Active state")
3242 raise vimconn.VimConnException(
3243 "VM is not in active state, stop operation is not allowed",
3244 http_code=vimconn.HTTP_Bad_Request,
3245 )
3246 elif "forceOff" in action_dict:
3247 server.stop() # TODO
3248 elif "terminate" in action_dict:
3249 server.delete()
3250 elif "createImage" in action_dict:
3251 server.create_image()
3252 # "path":path_schema,
3253 # "description":description_schema,
3254 # "name":name_schema,
3255 # "metadata":metadata_schema,
3256 # "imageRef": id_schema,
3257 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3258 elif "rebuild" in action_dict:
3259 server.rebuild(server.image["id"])
3260 elif "reboot" in action_dict:
3261 server.reboot() # reboot_type="SOFT"
3262 elif "console" in action_dict:
3263 console_type = action_dict["console"]
3264
3265 if console_type is None or console_type == "novnc":
3266 console_dict = server.get_vnc_console("novnc")
3267 elif console_type == "xvpvnc":
3268 console_dict = server.get_vnc_console(console_type)
3269 elif console_type == "rdp-html5":
3270 console_dict = server.get_rdp_console(console_type)
3271 elif console_type == "spice-html5":
3272 console_dict = server.get_spice_console(console_type)
3273 else:
3274 raise vimconn.VimConnException(
3275 "console type '{}' not allowed".format(console_type),
3276 http_code=vimconn.HTTP_Bad_Request,
3277 )
3278
3279 try:
3280 console_url = console_dict["console"]["url"]
3281 # parse console_url
3282 protocol_index = console_url.find("//")
3283 suffix_index = (
3284 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3285 )
3286 port_index = (
3287 console_url[protocol_index + 2 : suffix_index].find(":")
3288 + protocol_index
3289 + 2
3290 )
3291
3292 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3293 raise vimconn.VimConnException(
3294 "Unexpected response from VIM " + str(console_dict)
3295 )
3296
3297 console_dict2 = {
3298 "protocol": console_url[0:protocol_index],
3299 "server": console_url[protocol_index + 2 : port_index],
3300 "port": int(console_url[port_index + 1 : suffix_index]),
3301 "suffix": console_url[suffix_index + 1 :],
3302 }
3303
3304 return console_dict2
3305 except Exception:
3306 raise vimconn.VimConnException(
3307 "Unexpected response from VIM " + str(console_dict)
3308 )
3309
3310 return None
3311 except (
3312 ksExceptions.ClientException,
3313 nvExceptions.ClientException,
3314 nvExceptions.NotFound,
3315 ConnectionError,
3316 ) as e:
3317 self._format_exception(e)
3318 # TODO insert exception vimconn.HTTP_Unauthorized
3319
3320 # ###### VIO Specific Changes #########
3321 def _generate_vlanID(self):
3322 """
3323 Method to get unused vlanID
3324 Args:
3325 None
3326 Returns:
3327 vlanID
3328 """
3329 # Get used VLAN IDs
3330 usedVlanIDs = []
3331 networks = self.get_network_list()
3332
3333 for net in networks:
3334 if net.get("provider:segmentation_id"):
3335 usedVlanIDs.append(net.get("provider:segmentation_id"))
3336
3337 used_vlanIDs = set(usedVlanIDs)
3338
3339 # find unused VLAN ID
3340 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3341 try:
3342 start_vlanid, end_vlanid = map(
3343 int, vlanID_range.replace(" ", "").split("-")
3344 )
3345
3346 for vlanID in range(start_vlanid, end_vlanid + 1):
3347 if vlanID not in used_vlanIDs:
3348 return vlanID
3349 except Exception as exp:
3350 raise vimconn.VimConnException(
3351 "Exception {} occurred while generating VLAN ID.".format(exp)
3352 )
3353 else:
3354 raise vimconn.VimConnConflictException(
3355 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3356 self.config.get("dataplane_net_vlan_range")
3357 )
3358 )
3359
3360 def _generate_multisegment_vlanID(self):
3361 """
3362 Method to get unused vlanID
3363 Args:
3364 None
3365 Returns:
3366 vlanID
3367 """
3368 # Get used VLAN IDs
3369 usedVlanIDs = []
3370 networks = self.get_network_list()
3371 for net in networks:
3372 if net.get("provider:network_type") == "vlan" and net.get(
3373 "provider:segmentation_id"
3374 ):
3375 usedVlanIDs.append(net.get("provider:segmentation_id"))
3376 elif net.get("segments"):
3377 for segment in net.get("segments"):
3378 if segment.get("provider:network_type") == "vlan" and segment.get(
3379 "provider:segmentation_id"
3380 ):
3381 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3382
3383 used_vlanIDs = set(usedVlanIDs)
3384
3385 # find unused VLAN ID
3386 for vlanID_range in self.config.get("multisegment_vlan_range"):
3387 try:
3388 start_vlanid, end_vlanid = map(
3389 int, vlanID_range.replace(" ", "").split("-")
3390 )
3391
3392 for vlanID in range(start_vlanid, end_vlanid + 1):
3393 if vlanID not in used_vlanIDs:
3394 return vlanID
3395 except Exception as exp:
3396 raise vimconn.VimConnException(
3397 "Exception {} occurred while generating VLAN ID.".format(exp)
3398 )
3399 else:
3400 raise vimconn.VimConnConflictException(
3401 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3402 self.config.get("multisegment_vlan_range")
3403 )
3404 )
3405
3406 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3407 """
3408 Method to validate user given vlanID ranges
3409 Args: None
3410 Returns: None
3411 """
3412 for vlanID_range in input_vlan_range:
3413 vlan_range = vlanID_range.replace(" ", "")
3414 # validate format
3415 vlanID_pattern = r"(\d)*-(\d)*$"
3416 match_obj = re.match(vlanID_pattern, vlan_range)
3417 if not match_obj:
3418 raise vimconn.VimConnConflictException(
3419 "Invalid VLAN range for {}: {}.You must provide "
3420 "'{}' in format [start_ID - end_ID].".format(
3421 text_vlan_range, vlanID_range, text_vlan_range
3422 )
3423 )
3424
3425 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3426 if start_vlanid <= 0:
3427 raise vimconn.VimConnConflictException(
3428 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3429 "networks valid IDs are 1 to 4094 ".format(
3430 text_vlan_range, vlanID_range
3431 )
3432 )
3433
3434 if end_vlanid > 4094:
3435 raise vimconn.VimConnConflictException(
3436 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3437 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3438 text_vlan_range, vlanID_range
3439 )
3440 )
3441
3442 if start_vlanid > end_vlanid:
3443 raise vimconn.VimConnConflictException(
3444 "Invalid VLAN range for {}: {}. You must provide '{}'"
3445 " in format start_ID - end_ID and start_ID < end_ID ".format(
3446 text_vlan_range, vlanID_range, text_vlan_range
3447 )
3448 )
3449
3450 # NOT USED FUNCTIONS
3451
3452 def new_external_port(self, port_data):
3453 """Adds a external port to VIM
3454 Returns the port identifier"""
3455 # TODO openstack if needed
3456 return (
3457 -vimconn.HTTP_Internal_Server_Error,
3458 "osconnector.new_external_port() not implemented",
3459 )
3460
3461 def connect_port_network(self, port_id, network_id, admin=False):
3462 """Connects a external port to a network
3463 Returns status code of the VIM response"""
3464 # TODO openstack if needed
3465 return (
3466 -vimconn.HTTP_Internal_Server_Error,
3467 "osconnector.connect_port_network() not implemented",
3468 )
3469
3470 def new_user(self, user_name, user_passwd, tenant_id=None):
3471 """Adds a new user to openstack VIM
3472 Returns the user identifier"""
3473 self.logger.debug("osconnector: Adding a new user to VIM")
3474
3475 try:
3476 self._reload_connection()
3477 user = self.keystone.users.create(
3478 user_name, password=user_passwd, default_project=tenant_id
3479 )
3480 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3481
3482 return user.id
3483 except ksExceptions.ConnectionError as e:
3484 error_value = -vimconn.HTTP_Bad_Request
3485 error_text = (
3486 type(e).__name__
3487 + ": "
3488 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3489 )
3490 except ksExceptions.ClientException as e: # TODO remove
3491 error_value = -vimconn.HTTP_Bad_Request
3492 error_text = (
3493 type(e).__name__
3494 + ": "
3495 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3496 )
3497
3498 # TODO insert exception vimconn.HTTP_Unauthorized
3499 # if reaching here is because an exception
3500 self.logger.debug("new_user " + error_text)
3501
3502 return error_value, error_text
3503
3504 def delete_user(self, user_id):
3505 """Delete a user from openstack VIM
3506 Returns the user identifier"""
3507 if self.debug:
3508 print("osconnector: Deleting a user from VIM")
3509
3510 try:
3511 self._reload_connection()
3512 self.keystone.users.delete(user_id)
3513
3514 return 1, user_id
3515 except ksExceptions.ConnectionError as e:
3516 error_value = -vimconn.HTTP_Bad_Request
3517 error_text = (
3518 type(e).__name__
3519 + ": "
3520 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3521 )
3522 except ksExceptions.NotFound as e:
3523 error_value = -vimconn.HTTP_Not_Found
3524 error_text = (
3525 type(e).__name__
3526 + ": "
3527 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3528 )
3529 except ksExceptions.ClientException as e: # TODO remove
3530 error_value = -vimconn.HTTP_Bad_Request
3531 error_text = (
3532 type(e).__name__
3533 + ": "
3534 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3535 )
3536
3537 # TODO insert exception vimconn.HTTP_Unauthorized
3538 # if reaching here is because an exception
3539 self.logger.debug("delete_tenant " + error_text)
3540
3541 return error_value, error_text
3542
3543 def get_hosts_info(self):
3544 """Get the information of deployed hosts
3545 Returns the hosts content"""
3546 if self.debug:
3547 print("osconnector: Getting Host info from VIM")
3548
3549 try:
3550 h_list = []
3551 self._reload_connection()
3552 hypervisors = self.nova.hypervisors.list()
3553
3554 for hype in hypervisors:
3555 h_list.append(hype.to_dict())
3556
3557 return 1, {"hosts": h_list}
3558 except nvExceptions.NotFound as e:
3559 error_value = -vimconn.HTTP_Not_Found
3560 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3561 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3562 error_value = -vimconn.HTTP_Bad_Request
3563 error_text = (
3564 type(e).__name__
3565 + ": "
3566 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3567 )
3568
3569 # TODO insert exception vimconn.HTTP_Unauthorized
3570 # if reaching here is because an exception
3571 self.logger.debug("get_hosts_info " + error_text)
3572
3573 return error_value, error_text
3574
3575 def get_hosts(self, vim_tenant):
3576 """Get the hosts and deployed instances
3577 Returns the hosts content"""
3578 r, hype_dict = self.get_hosts_info()
3579
3580 if r < 0:
3581 return r, hype_dict
3582
3583 hypervisors = hype_dict["hosts"]
3584
3585 try:
3586 servers = self.nova.servers.list()
3587 for hype in hypervisors:
3588 for server in servers:
3589 if (
3590 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3591 == hype["hypervisor_hostname"]
3592 ):
3593 if "vm" in hype:
3594 hype["vm"].append(server.id)
3595 else:
3596 hype["vm"] = [server.id]
3597
3598 return 1, hype_dict
3599 except nvExceptions.NotFound as e:
3600 error_value = -vimconn.HTTP_Not_Found
3601 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3602 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3603 error_value = -vimconn.HTTP_Bad_Request
3604 error_text = (
3605 type(e).__name__
3606 + ": "
3607 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3608 )
3609
3610 # TODO insert exception vimconn.HTTP_Unauthorized
3611 # if reaching here is because an exception
3612 self.logger.debug("get_hosts " + error_text)
3613
3614 return error_value, error_text
3615
3616 def new_classification(self, name, ctype, definition):
3617 self.logger.debug(
3618 "Adding a new (Traffic) Classification to VIM, named %s", name
3619 )
3620
3621 try:
3622 new_class = None
3623 self._reload_connection()
3624
3625 if ctype not in supportedClassificationTypes:
3626 raise vimconn.VimConnNotSupportedException(
3627 "OpenStack VIM connector does not support provided "
3628 "Classification Type {}, supported ones are: {}".format(
3629 ctype, supportedClassificationTypes
3630 )
3631 )
3632
3633 if not self._validate_classification(ctype, definition):
3634 raise vimconn.VimConnException(
3635 "Incorrect Classification definition for the type specified."
3636 )
3637
3638 classification_dict = definition
3639 classification_dict["name"] = name
3640 new_class = self.neutron.create_sfc_flow_classifier(
3641 {"flow_classifier": classification_dict}
3642 )
3643
3644 return new_class["flow_classifier"]["id"]
3645 except (
3646 neExceptions.ConnectionFailed,
3647 ksExceptions.ClientException,
3648 neExceptions.NeutronException,
3649 ConnectionError,
3650 ) as e:
3651 self.logger.error("Creation of Classification failed.")
3652 self._format_exception(e)
3653
3654 def get_classification(self, class_id):
3655 self.logger.debug(" Getting Classification %s from VIM", class_id)
3656 filter_dict = {"id": class_id}
3657 class_list = self.get_classification_list(filter_dict)
3658
3659 if len(class_list) == 0:
3660 raise vimconn.VimConnNotFoundException(
3661 "Classification '{}' not found".format(class_id)
3662 )
3663 elif len(class_list) > 1:
3664 raise vimconn.VimConnConflictException(
3665 "Found more than one Classification with this criteria"
3666 )
3667
3668 classification = class_list[0]
3669
3670 return classification
3671
3672 def get_classification_list(self, filter_dict={}):
3673 self.logger.debug(
3674 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3675 )
3676
3677 try:
3678 filter_dict_os = filter_dict.copy()
3679 self._reload_connection()
3680
3681 if self.api_version3 and "tenant_id" in filter_dict_os:
3682 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3683
3684 classification_dict = self.neutron.list_sfc_flow_classifiers(
3685 **filter_dict_os
3686 )
3687 classification_list = classification_dict["flow_classifiers"]
3688 self.__classification_os2mano(classification_list)
3689
3690 return classification_list
3691 except (
3692 neExceptions.ConnectionFailed,
3693 ksExceptions.ClientException,
3694 neExceptions.NeutronException,
3695 ConnectionError,
3696 ) as e:
3697 self._format_exception(e)
3698
3699 def delete_classification(self, class_id):
3700 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3701
3702 try:
3703 self._reload_connection()
3704 self.neutron.delete_sfc_flow_classifier(class_id)
3705
3706 return class_id
3707 except (
3708 neExceptions.ConnectionFailed,
3709 neExceptions.NeutronException,
3710 ksExceptions.ClientException,
3711 neExceptions.NeutronException,
3712 ConnectionError,
3713 ) as e:
3714 self._format_exception(e)
3715
3716 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3717 self.logger.debug(
3718 "Adding a new Service Function Instance to VIM, named '%s'", name
3719 )
3720
3721 try:
3722 new_sfi = None
3723 self._reload_connection()
3724 correlation = None
3725
3726 if sfc_encap:
3727 correlation = "nsh"
3728
3729 if len(ingress_ports) != 1:
3730 raise vimconn.VimConnNotSupportedException(
3731 "OpenStack VIM connector can only have 1 ingress port per SFI"
3732 )
3733
3734 if len(egress_ports) != 1:
3735 raise vimconn.VimConnNotSupportedException(
3736 "OpenStack VIM connector can only have 1 egress port per SFI"
3737 )
3738
3739 sfi_dict = {
3740 "name": name,
3741 "ingress": ingress_ports[0],
3742 "egress": egress_ports[0],
3743 "service_function_parameters": {"correlation": correlation},
3744 }
3745 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3746
3747 return new_sfi["port_pair"]["id"]
3748 except (
3749 neExceptions.ConnectionFailed,
3750 ksExceptions.ClientException,
3751 neExceptions.NeutronException,
3752 ConnectionError,
3753 ) as e:
3754 if new_sfi:
3755 try:
3756 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3757 except Exception:
3758 self.logger.error(
3759 "Creation of Service Function Instance failed, with "
3760 "subsequent deletion failure as well."
3761 )
3762
3763 self._format_exception(e)
3764
3765 def get_sfi(self, sfi_id):
3766 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3767 filter_dict = {"id": sfi_id}
3768 sfi_list = self.get_sfi_list(filter_dict)
3769
3770 if len(sfi_list) == 0:
3771 raise vimconn.VimConnNotFoundException(
3772 "Service Function Instance '{}' not found".format(sfi_id)
3773 )
3774 elif len(sfi_list) > 1:
3775 raise vimconn.VimConnConflictException(
3776 "Found more than one Service Function Instance with this criteria"
3777 )
3778
3779 sfi = sfi_list[0]
3780
3781 return sfi
3782
3783 def get_sfi_list(self, filter_dict={}):
3784 self.logger.debug(
3785 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3786 )
3787
3788 try:
3789 self._reload_connection()
3790 filter_dict_os = filter_dict.copy()
3791
3792 if self.api_version3 and "tenant_id" in filter_dict_os:
3793 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3794
3795 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3796 sfi_list = sfi_dict["port_pairs"]
3797 self.__sfi_os2mano(sfi_list)
3798
3799 return sfi_list
3800 except (
3801 neExceptions.ConnectionFailed,
3802 ksExceptions.ClientException,
3803 neExceptions.NeutronException,
3804 ConnectionError,
3805 ) as e:
3806 self._format_exception(e)
3807
3808 def delete_sfi(self, sfi_id):
3809 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3810
3811 try:
3812 self._reload_connection()
3813 self.neutron.delete_sfc_port_pair(sfi_id)
3814
3815 return sfi_id
3816 except (
3817 neExceptions.ConnectionFailed,
3818 neExceptions.NeutronException,
3819 ksExceptions.ClientException,
3820 neExceptions.NeutronException,
3821 ConnectionError,
3822 ) as e:
3823 self._format_exception(e)
3824
3825 def new_sf(self, name, sfis, sfc_encap=True):
3826 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3827
3828 try:
3829 new_sf = None
3830 self._reload_connection()
3831 # correlation = None
3832 # if sfc_encap:
3833 # correlation = "nsh"
3834
3835 for instance in sfis:
3836 sfi = self.get_sfi(instance)
3837
3838 if sfi.get("sfc_encap") != sfc_encap:
3839 raise vimconn.VimConnNotSupportedException(
3840 "OpenStack VIM connector requires all SFIs of the "
3841 "same SF to share the same SFC Encapsulation"
3842 )
3843
3844 sf_dict = {"name": name, "port_pairs": sfis}
3845 new_sf = self.neutron.create_sfc_port_pair_group(
3846 {"port_pair_group": sf_dict}
3847 )
3848
3849 return new_sf["port_pair_group"]["id"]
3850 except (
3851 neExceptions.ConnectionFailed,
3852 ksExceptions.ClientException,
3853 neExceptions.NeutronException,
3854 ConnectionError,
3855 ) as e:
3856 if new_sf:
3857 try:
3858 self.neutron.delete_sfc_port_pair_group(
3859 new_sf["port_pair_group"]["id"]
3860 )
3861 except Exception:
3862 self.logger.error(
3863 "Creation of Service Function failed, with "
3864 "subsequent deletion failure as well."
3865 )
3866
3867 self._format_exception(e)
3868
3869 def get_sf(self, sf_id):
3870 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3871 filter_dict = {"id": sf_id}
3872 sf_list = self.get_sf_list(filter_dict)
3873
3874 if len(sf_list) == 0:
3875 raise vimconn.VimConnNotFoundException(
3876 "Service Function '{}' not found".format(sf_id)
3877 )
3878 elif len(sf_list) > 1:
3879 raise vimconn.VimConnConflictException(
3880 "Found more than one Service Function with this criteria"
3881 )
3882
3883 sf = sf_list[0]
3884
3885 return sf
3886
3887 def get_sf_list(self, filter_dict={}):
3888 self.logger.debug(
3889 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3890 )
3891
3892 try:
3893 self._reload_connection()
3894 filter_dict_os = filter_dict.copy()
3895
3896 if self.api_version3 and "tenant_id" in filter_dict_os:
3897 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3898
3899 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3900 sf_list = sf_dict["port_pair_groups"]
3901 self.__sf_os2mano(sf_list)
3902
3903 return sf_list
3904 except (
3905 neExceptions.ConnectionFailed,
3906 ksExceptions.ClientException,
3907 neExceptions.NeutronException,
3908 ConnectionError,
3909 ) as e:
3910 self._format_exception(e)
3911
3912 def delete_sf(self, sf_id):
3913 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3914
3915 try:
3916 self._reload_connection()
3917 self.neutron.delete_sfc_port_pair_group(sf_id)
3918
3919 return sf_id
3920 except (
3921 neExceptions.ConnectionFailed,
3922 neExceptions.NeutronException,
3923 ksExceptions.ClientException,
3924 neExceptions.NeutronException,
3925 ConnectionError,
3926 ) as e:
3927 self._format_exception(e)
3928
3929 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3930 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3931
3932 try:
3933 new_sfp = None
3934 self._reload_connection()
3935 # In networking-sfc the MPLS encapsulation is legacy
3936 # should be used when no full SFC Encapsulation is intended
3937 correlation = "mpls"
3938
3939 if sfc_encap:
3940 correlation = "nsh"
3941
3942 sfp_dict = {
3943 "name": name,
3944 "flow_classifiers": classifications,
3945 "port_pair_groups": sfs,
3946 "chain_parameters": {"correlation": correlation},
3947 }
3948
3949 if spi:
3950 sfp_dict["chain_id"] = spi
3951
3952 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3953
3954 return new_sfp["port_chain"]["id"]
3955 except (
3956 neExceptions.ConnectionFailed,
3957 ksExceptions.ClientException,
3958 neExceptions.NeutronException,
3959 ConnectionError,
3960 ) as e:
3961 if new_sfp:
3962 try:
3963 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3964 except Exception:
3965 self.logger.error(
3966 "Creation of Service Function Path failed, with "
3967 "subsequent deletion failure as well."
3968 )
3969
3970 self._format_exception(e)
3971
3972 def get_sfp(self, sfp_id):
3973 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3974
3975 filter_dict = {"id": sfp_id}
3976 sfp_list = self.get_sfp_list(filter_dict)
3977
3978 if len(sfp_list) == 0:
3979 raise vimconn.VimConnNotFoundException(
3980 "Service Function Path '{}' not found".format(sfp_id)
3981 )
3982 elif len(sfp_list) > 1:
3983 raise vimconn.VimConnConflictException(
3984 "Found more than one Service Function Path with this criteria"
3985 )
3986
3987 sfp = sfp_list[0]
3988
3989 return sfp
3990
3991 def get_sfp_list(self, filter_dict={}):
3992 self.logger.debug(
3993 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3994 )
3995
3996 try:
3997 self._reload_connection()
3998 filter_dict_os = filter_dict.copy()
3999
4000 if self.api_version3 and "tenant_id" in filter_dict_os:
4001 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
4002
4003 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
4004 sfp_list = sfp_dict["port_chains"]
4005 self.__sfp_os2mano(sfp_list)
4006
4007 return sfp_list
4008 except (
4009 neExceptions.ConnectionFailed,
4010 ksExceptions.ClientException,
4011 neExceptions.NeutronException,
4012 ConnectionError,
4013 ) as e:
4014 self._format_exception(e)
4015
4016 def delete_sfp(self, sfp_id):
4017 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
4018
4019 try:
4020 self._reload_connection()
4021 self.neutron.delete_sfc_port_chain(sfp_id)
4022
4023 return sfp_id
4024 except (
4025 neExceptions.ConnectionFailed,
4026 neExceptions.NeutronException,
4027 ksExceptions.ClientException,
4028 neExceptions.NeutronException,
4029 ConnectionError,
4030 ) as e:
4031 self._format_exception(e)
4032
4033 def refresh_sfps_status(self, sfp_list):
4034 """Get the status of the service function path
4035 Params: the list of sfp identifiers
4036 Returns a dictionary with:
4037 vm_id: #VIM id of this service function path
4038 status: #Mandatory. Text with one of:
4039 # DELETED (not found at vim)
4040 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4041 # OTHER (Vim reported other status not understood)
4042 # ERROR (VIM indicates an ERROR status)
4043 # ACTIVE,
4044 # CREATING (on building process)
4045 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4046 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
4047 """
4048 sfp_dict = {}
4049 self.logger.debug(
4050 "refresh_sfps status: Getting tenant SFP information from VIM"
4051 )
4052
4053 for sfp_id in sfp_list:
4054 sfp = {}
4055
4056 try:
4057 sfp_vim = self.get_sfp(sfp_id)
4058
4059 if sfp_vim["spi"]:
4060 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4061 else:
4062 sfp["status"] = "OTHER"
4063 sfp["error_msg"] = "VIM status reported " + sfp["status"]
4064
4065 sfp["vim_info"] = self.serialize(sfp_vim)
4066
4067 if sfp_vim.get("fault"):
4068 sfp["error_msg"] = str(sfp_vim["fault"])
4069 except vimconn.VimConnNotFoundException as e:
4070 self.logger.error("Exception getting sfp status: %s", str(e))
4071 sfp["status"] = "DELETED"
4072 sfp["error_msg"] = str(e)
4073 except vimconn.VimConnException as e:
4074 self.logger.error("Exception getting sfp status: %s", str(e))
4075 sfp["status"] = "VIM_ERROR"
4076 sfp["error_msg"] = str(e)
4077
4078 sfp_dict[sfp_id] = sfp
4079
4080 return sfp_dict
4081
4082 def refresh_sfis_status(self, sfi_list):
4083 """Get the status of the service function instances
4084 Params: the list of sfi identifiers
4085 Returns a dictionary with:
4086 vm_id: #VIM id of this service function instance
4087 status: #Mandatory. Text with one of:
4088 # DELETED (not found at vim)
4089 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4090 # OTHER (Vim reported other status not understood)
4091 # ERROR (VIM indicates an ERROR status)
4092 # ACTIVE,
4093 # CREATING (on building process)
4094 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4095 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4096 """
4097 sfi_dict = {}
4098 self.logger.debug(
4099 "refresh_sfis status: Getting tenant sfi information from VIM"
4100 )
4101
4102 for sfi_id in sfi_list:
4103 sfi = {}
4104
4105 try:
4106 sfi_vim = self.get_sfi(sfi_id)
4107
4108 if sfi_vim:
4109 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4110 else:
4111 sfi["status"] = "OTHER"
4112 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4113
4114 sfi["vim_info"] = self.serialize(sfi_vim)
4115
4116 if sfi_vim.get("fault"):
4117 sfi["error_msg"] = str(sfi_vim["fault"])
4118 except vimconn.VimConnNotFoundException as e:
4119 self.logger.error("Exception getting sfi status: %s", str(e))
4120 sfi["status"] = "DELETED"
4121 sfi["error_msg"] = str(e)
4122 except vimconn.VimConnException as e:
4123 self.logger.error("Exception getting sfi status: %s", str(e))
4124 sfi["status"] = "VIM_ERROR"
4125 sfi["error_msg"] = str(e)
4126
4127 sfi_dict[sfi_id] = sfi
4128
4129 return sfi_dict
4130
4131 def refresh_sfs_status(self, sf_list):
4132 """Get the status of the service functions
4133 Params: the list of sf identifiers
4134 Returns a dictionary with:
4135 vm_id: #VIM id of this service function
4136 status: #Mandatory. Text with one of:
4137 # DELETED (not found at vim)
4138 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4139 # OTHER (Vim reported other status not understood)
4140 # ERROR (VIM indicates an ERROR status)
4141 # ACTIVE,
4142 # CREATING (on building process)
4143 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4144 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4145 """
4146 sf_dict = {}
4147 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4148
4149 for sf_id in sf_list:
4150 sf = {}
4151
4152 try:
4153 sf_vim = self.get_sf(sf_id)
4154
4155 if sf_vim:
4156 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4157 else:
4158 sf["status"] = "OTHER"
4159 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4160
4161 sf["vim_info"] = self.serialize(sf_vim)
4162
4163 if sf_vim.get("fault"):
4164 sf["error_msg"] = str(sf_vim["fault"])
4165 except vimconn.VimConnNotFoundException as e:
4166 self.logger.error("Exception getting sf status: %s", str(e))
4167 sf["status"] = "DELETED"
4168 sf["error_msg"] = str(e)
4169 except vimconn.VimConnException as e:
4170 self.logger.error("Exception getting sf status: %s", str(e))
4171 sf["status"] = "VIM_ERROR"
4172 sf["error_msg"] = str(e)
4173
4174 sf_dict[sf_id] = sf
4175
4176 return sf_dict
4177
4178 def refresh_classifications_status(self, classification_list):
4179 """Get the status of the classifications
4180 Params: the list of classification identifiers
4181 Returns a dictionary with:
4182 vm_id: #VIM id of this classifier
4183 status: #Mandatory. Text with one of:
4184 # DELETED (not found at vim)
4185 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4186 # OTHER (Vim reported other status not understood)
4187 # ERROR (VIM indicates an ERROR status)
4188 # ACTIVE,
4189 # CREATING (on building process)
4190 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4191 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4192 """
4193 classification_dict = {}
4194 self.logger.debug(
4195 "refresh_classifications status: Getting tenant classification information from VIM"
4196 )
4197
4198 for classification_id in classification_list:
4199 classification = {}
4200
4201 try:
4202 classification_vim = self.get_classification(classification_id)
4203
4204 if classification_vim:
4205 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4206 else:
4207 classification["status"] = "OTHER"
4208 classification["error_msg"] = (
4209 "VIM status reported " + classification["status"]
4210 )
4211
4212 classification["vim_info"] = self.serialize(classification_vim)
4213
4214 if classification_vim.get("fault"):
4215 classification["error_msg"] = str(classification_vim["fault"])
4216 except vimconn.VimConnNotFoundException as e:
4217 self.logger.error("Exception getting classification status: %s", str(e))
4218 classification["status"] = "DELETED"
4219 classification["error_msg"] = str(e)
4220 except vimconn.VimConnException as e:
4221 self.logger.error("Exception getting classification status: %s", str(e))
4222 classification["status"] = "VIM_ERROR"
4223 classification["error_msg"] = str(e)
4224
4225 classification_dict[classification_id] = classification
4226
4227 return classification_dict
4228
4229 def new_affinity_group(self, affinity_group_data):
4230 """Adds a server group to VIM
4231 affinity_group_data contains a dictionary with information, keys:
4232 name: name in VIM for the server group
4233 type: affinity or anti-affinity
4234 scope: Only nfvi-node allowed
4235 Returns the server group identifier"""
4236 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4237
4238 try:
4239 name = affinity_group_data["name"]
4240 policy = affinity_group_data["type"]
4241
4242 self._reload_connection()
4243 new_server_group = self.nova.server_groups.create(name, policy)
4244
4245 return new_server_group.id
4246 except (
4247 ksExceptions.ClientException,
4248 nvExceptions.ClientException,
4249 ConnectionError,
4250 KeyError,
4251 ) as e:
4252 self._format_exception(e)
4253
4254 def get_affinity_group(self, affinity_group_id):
4255 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4256 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4257 try:
4258 self._reload_connection()
4259 server_group = self.nova.server_groups.find(id=affinity_group_id)
4260
4261 return server_group.to_dict()
4262 except (
4263 nvExceptions.NotFound,
4264 nvExceptions.ClientException,
4265 ksExceptions.ClientException,
4266 ConnectionError,
4267 ) as e:
4268 self._format_exception(e)
4269
4270 def delete_affinity_group(self, affinity_group_id):
4271 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4272 self.logger.debug("Getting server group '%s'", affinity_group_id)
4273 try:
4274 self._reload_connection()
4275 self.nova.server_groups.delete(affinity_group_id)
4276
4277 return affinity_group_id
4278 except (
4279 nvExceptions.NotFound,
4280 ksExceptions.ClientException,
4281 nvExceptions.ClientException,
4282 ConnectionError,
4283 ) as e:
4284 self._format_exception(e)
4285
4286 def get_vdu_state(self, vm_id):
4287 """
4288 Getting the state of a vdu
4289 param:
4290 vm_id: ID of an instance
4291 """
4292 self.logger.debug("Getting the status of VM")
4293 self.logger.debug("VIM VM ID %s", vm_id)
4294 self._reload_connection()
4295 server = self.nova.servers.find(id=vm_id)
4296 server_dict = server.to_dict()
4297 vdu_data = [
4298 server_dict["status"],
4299 server_dict["flavor"]["id"],
4300 server_dict["OS-EXT-SRV-ATTR:host"],
4301 server_dict["OS-EXT-AZ:availability_zone"],
4302 ]
4303 self.logger.debug("vdu_data %s", vdu_data)
4304 return vdu_data
4305
4306 def check_compute_availability(self, host, server_flavor_details):
4307 self._reload_connection()
4308 hypervisor_search = self.nova.hypervisors.search(
4309 hypervisor_match=host, servers=True
4310 )
4311 for hypervisor in hypervisor_search:
4312 hypervisor_id = hypervisor.to_dict()["id"]
4313 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4314 hypervisor_dict = hypervisor_details.to_dict()
4315 hypervisor_temp = json.dumps(hypervisor_dict)
4316 hypervisor_json = json.loads(hypervisor_temp)
4317 resources_available = [
4318 hypervisor_json["free_ram_mb"],
4319 hypervisor_json["disk_available_least"],
4320 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4321 ]
4322 compute_available = all(
4323 x > y for x, y in zip(resources_available, server_flavor_details)
4324 )
4325 if compute_available:
4326 return host
4327
4328 def check_availability_zone(
4329 self, old_az, server_flavor_details, old_host, host=None
4330 ):
4331 self._reload_connection()
4332 az_check = {"zone_check": False, "compute_availability": None}
4333 aggregates_list = self.nova.aggregates.list()
4334 for aggregate in aggregates_list:
4335 aggregate_details = aggregate.to_dict()
4336 aggregate_temp = json.dumps(aggregate_details)
4337 aggregate_json = json.loads(aggregate_temp)
4338 if aggregate_json["availability_zone"] == old_az:
4339 hosts_list = aggregate_json["hosts"]
4340 if host is not None:
4341 if host in hosts_list:
4342 az_check["zone_check"] = True
4343 available_compute_id = self.check_compute_availability(
4344 host, server_flavor_details
4345 )
4346 if available_compute_id is not None:
4347 az_check["compute_availability"] = available_compute_id
4348 else:
4349 for check_host in hosts_list:
4350 if check_host != old_host:
4351 available_compute_id = self.check_compute_availability(
4352 check_host, server_flavor_details
4353 )
4354 if available_compute_id is not None:
4355 az_check["zone_check"] = True
4356 az_check["compute_availability"] = available_compute_id
4357 break
4358 else:
4359 az_check["zone_check"] = True
4360 return az_check
4361
4362 def migrate_instance(self, vm_id, compute_host=None):
4363 """
4364 Migrate a vdu
4365 param:
4366 vm_id: ID of an instance
4367 compute_host: Host to migrate the vdu to
4368 """
4369 self._reload_connection()
4370 vm_state = False
4371 instance_state = self.get_vdu_state(vm_id)
4372 server_flavor_id = instance_state[1]
4373 server_hypervisor_name = instance_state[2]
4374 server_availability_zone = instance_state[3]
4375 try:
4376 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4377 server_flavor_details = [
4378 server_flavor["ram"],
4379 server_flavor["disk"],
4380 server_flavor["vcpus"],
4381 ]
4382 if compute_host == server_hypervisor_name:
4383 raise vimconn.VimConnException(
4384 "Unable to migrate instance '{}' to the same host '{}'".format(
4385 vm_id, compute_host
4386 ),
4387 http_code=vimconn.HTTP_Bad_Request,
4388 )
4389 az_status = self.check_availability_zone(
4390 server_availability_zone,
4391 server_flavor_details,
4392 server_hypervisor_name,
4393 compute_host,
4394 )
4395 availability_zone_check = az_status["zone_check"]
4396 available_compute_id = az_status.get("compute_availability")
4397
4398 if availability_zone_check is False:
4399 raise vimconn.VimConnException(
4400 "Unable to migrate instance '{}' to a different availability zone".format(
4401 vm_id
4402 ),
4403 http_code=vimconn.HTTP_Bad_Request,
4404 )
4405 if available_compute_id is not None:
4406 self.nova.servers.live_migrate(
4407 server=vm_id,
4408 host=available_compute_id,
4409 block_migration=True,
4410 disk_over_commit=False,
4411 )
4412 state = "MIGRATING"
4413 changed_compute_host = ""
4414 if state == "MIGRATING":
4415 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4416 changed_compute_host = self.get_vdu_state(vm_id)[2]
4417 if vm_state and changed_compute_host == available_compute_id:
4418 self.logger.debug(
4419 "Instance '{}' migrated to the new compute host '{}'".format(
4420 vm_id, changed_compute_host
4421 )
4422 )
4423 return state, available_compute_id
4424 else:
4425 raise vimconn.VimConnException(
4426 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4427 vm_id, available_compute_id
4428 ),
4429 http_code=vimconn.HTTP_Bad_Request,
4430 )
4431 else:
4432 raise vimconn.VimConnException(
4433 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4434 available_compute_id
4435 ),
4436 http_code=vimconn.HTTP_Bad_Request,
4437 )
4438 except (
4439 nvExceptions.BadRequest,
4440 nvExceptions.ClientException,
4441 nvExceptions.NotFound,
4442 ) as e:
4443 self._format_exception(e)
4444
4445 def resize_instance(self, vm_id, new_flavor_id):
4446 """
4447 For resizing the vm based on the given
4448 flavor details
4449 param:
4450 vm_id : ID of an instance
4451 new_flavor_id : Flavor id to be resized
4452 Return the status of a resized instance
4453 """
4454 self._reload_connection()
4455 self.logger.debug("resize the flavor of an instance")
4456 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4457 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4458 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4459 try:
4460 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4461 if old_flavor_disk > new_flavor_disk:
4462 raise nvExceptions.BadRequest(
4463 400,
4464 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4465 )
4466 else:
4467 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4468 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4469 if vm_state:
4470 instance_resized_status = self.confirm_resize(vm_id)
4471 return instance_resized_status
4472 else:
4473 raise nvExceptions.BadRequest(
4474 409,
4475 message="Cannot 'resize' vm_state is in ERROR",
4476 )
4477
4478 else:
4479 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4480 raise nvExceptions.BadRequest(
4481 409,
4482 message="Cannot 'resize' instance while it is in vm_state resized",
4483 )
4484 except (
4485 nvExceptions.BadRequest,
4486 nvExceptions.ClientException,
4487 nvExceptions.NotFound,
4488 ) as e:
4489 self._format_exception(e)
4490
4491 def confirm_resize(self, vm_id):
4492 """
4493 Confirm the resize of an instance
4494 param:
4495 vm_id: ID of an instance
4496 """
4497 self._reload_connection()
4498 self.nova.servers.confirm_resize(server=vm_id)
4499 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4500 self.__wait_for_vm(vm_id, "ACTIVE")
4501 instance_status = self.get_vdu_state(vm_id)[0]
4502 return instance_status