621324a429611a8f29621e1cf60de703a2583537
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358
359 if sess.get_all_version_data(service_type="volumev2"):
360 self.cinder = self.session["cinder"] = cClient.Client(
361 2,
362 session=sess,
363 endpoint_type=self.endpoint_type,
364 region_name=region_name,
365 )
366 else:
367 self.cinder = self.session["cinder"] = cClient.Client(
368 3,
369 session=sess,
370 endpoint_type=self.endpoint_type,
371 region_name=region_name,
372 )
373
374 try:
375 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
376 except Exception:
377 self.logger.error("Cannot get project_id from session", exc_info=True)
378
379 if self.endpoint_type == "internalURL":
380 glance_service_id = self.keystone.services.list(name="glance")[0].id
381 glance_endpoint = self.keystone.endpoints.list(
382 glance_service_id, interface="internal"
383 )[0].url
384 else:
385 glance_endpoint = None
386
387 self.glance = self.session["glance"] = glClient.Client(
388 2, session=sess, endpoint=glance_endpoint
389 )
390 # using version 1 of glance client in new_image()
391 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
392 # endpoint=glance_endpoint)
393 self.session["reload_client"] = False
394 self.persistent_info["session"] = self.session
395 # add availablity zone info inside self.persistent_info
396 self._set_availablity_zones()
397 self.persistent_info["availability_zone"] = self.availability_zone
398 # force to get again security_groups_ids next time they are needed
399 self.security_groups_id = None
400
401 def __net_os2mano(self, net_list_dict):
402 """Transform the net openstack format to mano format
403 net_list_dict can be a list of dict or a single dict"""
404 if type(net_list_dict) is dict:
405 net_list_ = (net_list_dict,)
406 elif type(net_list_dict) is list:
407 net_list_ = net_list_dict
408 else:
409 raise TypeError("param net_list_dict must be a list or a dictionary")
410 for net in net_list_:
411 if net.get("provider:network_type") == "vlan":
412 net["type"] = "data"
413 else:
414 net["type"] = "bridge"
415
416 def __classification_os2mano(self, class_list_dict):
417 """Transform the openstack format (Flow Classifier) to mano format
418 (Classification) class_list_dict can be a list of dict or a single dict
419 """
420 if isinstance(class_list_dict, dict):
421 class_list_ = [class_list_dict]
422 elif isinstance(class_list_dict, list):
423 class_list_ = class_list_dict
424 else:
425 raise TypeError("param class_list_dict must be a list or a dictionary")
426 for classification in class_list_:
427 id = classification.pop("id")
428 name = classification.pop("name")
429 description = classification.pop("description")
430 project_id = classification.pop("project_id")
431 tenant_id = classification.pop("tenant_id")
432 original_classification = copy.deepcopy(classification)
433 classification.clear()
434 classification["ctype"] = "legacy_flow_classifier"
435 classification["definition"] = original_classification
436 classification["id"] = id
437 classification["name"] = name
438 classification["description"] = description
439 classification["project_id"] = project_id
440 classification["tenant_id"] = tenant_id
441
442 def __sfi_os2mano(self, sfi_list_dict):
443 """Transform the openstack format (Port Pair) to mano format (SFI)
444 sfi_list_dict can be a list of dict or a single dict
445 """
446 if isinstance(sfi_list_dict, dict):
447 sfi_list_ = [sfi_list_dict]
448 elif isinstance(sfi_list_dict, list):
449 sfi_list_ = sfi_list_dict
450 else:
451 raise TypeError("param sfi_list_dict must be a list or a dictionary")
452
453 for sfi in sfi_list_:
454 sfi["ingress_ports"] = []
455 sfi["egress_ports"] = []
456
457 if sfi.get("ingress"):
458 sfi["ingress_ports"].append(sfi["ingress"])
459
460 if sfi.get("egress"):
461 sfi["egress_ports"].append(sfi["egress"])
462
463 del sfi["ingress"]
464 del sfi["egress"]
465 params = sfi.get("service_function_parameters")
466 sfc_encap = False
467
468 if params:
469 correlation = params.get("correlation")
470
471 if correlation:
472 sfc_encap = True
473
474 sfi["sfc_encap"] = sfc_encap
475 del sfi["service_function_parameters"]
476
477 def __sf_os2mano(self, sf_list_dict):
478 """Transform the openstack format (Port Pair Group) to mano format (SF)
479 sf_list_dict can be a list of dict or a single dict
480 """
481 if isinstance(sf_list_dict, dict):
482 sf_list_ = [sf_list_dict]
483 elif isinstance(sf_list_dict, list):
484 sf_list_ = sf_list_dict
485 else:
486 raise TypeError("param sf_list_dict must be a list or a dictionary")
487
488 for sf in sf_list_:
489 del sf["port_pair_group_parameters"]
490 sf["sfis"] = sf["port_pairs"]
491 del sf["port_pairs"]
492
493 def __sfp_os2mano(self, sfp_list_dict):
494 """Transform the openstack format (Port Chain) to mano format (SFP)
495 sfp_list_dict can be a list of dict or a single dict
496 """
497 if isinstance(sfp_list_dict, dict):
498 sfp_list_ = [sfp_list_dict]
499 elif isinstance(sfp_list_dict, list):
500 sfp_list_ = sfp_list_dict
501 else:
502 raise TypeError("param sfp_list_dict must be a list or a dictionary")
503
504 for sfp in sfp_list_:
505 params = sfp.pop("chain_parameters")
506 sfc_encap = False
507
508 if params:
509 correlation = params.get("correlation")
510
511 if correlation:
512 sfc_encap = True
513
514 sfp["sfc_encap"] = sfc_encap
515 sfp["spi"] = sfp.pop("chain_id")
516 sfp["classifications"] = sfp.pop("flow_classifiers")
517 sfp["service_functions"] = sfp.pop("port_pair_groups")
518
519 # placeholder for now; read TODO note below
520 def _validate_classification(self, type, definition):
521 # only legacy_flow_classifier Type is supported at this point
522 return True
523 # TODO(igordcard): this method should be an abstract method of an
524 # abstract Classification class to be implemented by the specific
525 # Types. Also, abstract vimconnector should call the validation
526 # method before the implemented VIM connectors are called.
527
528 def _format_exception(self, exception):
529 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
530 message_error = str(exception)
531 tip = ""
532
533 if isinstance(
534 exception,
535 (
536 neExceptions.NetworkNotFoundClient,
537 nvExceptions.NotFound,
538 ksExceptions.NotFound,
539 gl1Exceptions.HTTPNotFound,
540 ),
541 ):
542 raise vimconn.VimConnNotFoundException(
543 type(exception).__name__ + ": " + message_error
544 )
545 elif isinstance(
546 exception,
547 (
548 HTTPException,
549 gl1Exceptions.HTTPException,
550 gl1Exceptions.CommunicationError,
551 ConnectionError,
552 ksExceptions.ConnectionError,
553 neExceptions.ConnectionFailed,
554 ),
555 ):
556 if type(exception).__name__ == "SSLError":
557 tip = " (maybe option 'insecure' must be added to the VIM)"
558
559 raise vimconn.VimConnConnectionException(
560 "Invalid URL or credentials{}: {}".format(tip, message_error)
561 )
562 elif isinstance(
563 exception,
564 (
565 KeyError,
566 nvExceptions.BadRequest,
567 ksExceptions.BadRequest,
568 ),
569 ):
570 raise vimconn.VimConnException(
571 type(exception).__name__ + ": " + message_error
572 )
573 elif isinstance(
574 exception,
575 (
576 nvExceptions.ClientException,
577 ksExceptions.ClientException,
578 neExceptions.NeutronException,
579 ),
580 ):
581 raise vimconn.VimConnUnexpectedResponse(
582 type(exception).__name__ + ": " + message_error
583 )
584 elif isinstance(exception, nvExceptions.Conflict):
585 raise vimconn.VimConnConflictException(
586 type(exception).__name__ + ": " + message_error
587 )
588 elif isinstance(exception, vimconn.VimConnException):
589 raise exception
590 else: # ()
591 self.logger.error("General Exception " + message_error, exc_info=True)
592
593 raise vimconn.VimConnConnectionException(
594 type(exception).__name__ + ": " + message_error
595 )
596
597 def _get_ids_from_name(self):
598 """
599 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
600 :return: None
601 """
602 # get tenant_id if only tenant_name is supplied
603 self._reload_connection()
604
605 if not self.my_tenant_id:
606 raise vimconn.VimConnConnectionException(
607 "Error getting tenant information from name={} id={}".format(
608 self.tenant_name, self.tenant_id
609 )
610 )
611
612 if self.config.get("security_groups") and not self.security_groups_id:
613 # convert from name to id
614 neutron_sg_list = self.neutron.list_security_groups(
615 tenant_id=self.my_tenant_id
616 )["security_groups"]
617
618 self.security_groups_id = []
619 for sg in self.config.get("security_groups"):
620 for neutron_sg in neutron_sg_list:
621 if sg in (neutron_sg["id"], neutron_sg["name"]):
622 self.security_groups_id.append(neutron_sg["id"])
623 break
624 else:
625 self.security_groups_id = None
626
627 raise vimconn.VimConnConnectionException(
628 "Not found security group {} for this tenant".format(sg)
629 )
630
631 def check_vim_connectivity(self):
632 # just get network list to check connectivity and credentials
633 self.get_network_list(filter_dict={})
634
635 def get_tenant_list(self, filter_dict={}):
636 """Obtain tenants of VIM
637 filter_dict can contain the following keys:
638 name: filter by tenant name
639 id: filter by tenant uuid/id
640 <other VIM specific>
641 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
642 """
643 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
644
645 try:
646 self._reload_connection()
647
648 if self.api_version3:
649 project_class_list = self.keystone.projects.list(
650 name=filter_dict.get("name")
651 )
652 else:
653 project_class_list = self.keystone.tenants.findall(**filter_dict)
654
655 project_list = []
656
657 for project in project_class_list:
658 if filter_dict.get("id") and filter_dict["id"] != project.id:
659 continue
660
661 project_list.append(project.to_dict())
662
663 return project_list
664 except (
665 ksExceptions.ConnectionError,
666 ksExceptions.ClientException,
667 ConnectionError,
668 ) as e:
669 self._format_exception(e)
670
671 def new_tenant(self, tenant_name, tenant_description):
672 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
673 self.logger.debug("Adding a new tenant name: %s", tenant_name)
674
675 try:
676 self._reload_connection()
677
678 if self.api_version3:
679 project = self.keystone.projects.create(
680 tenant_name,
681 self.config.get("project_domain_id", "default"),
682 description=tenant_description,
683 is_domain=False,
684 )
685 else:
686 project = self.keystone.tenants.create(tenant_name, tenant_description)
687
688 return project.id
689 except (
690 ksExceptions.ConnectionError,
691 ksExceptions.ClientException,
692 ksExceptions.BadRequest,
693 ConnectionError,
694 ) as e:
695 self._format_exception(e)
696
697 def delete_tenant(self, tenant_id):
698 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
699 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
700
701 try:
702 self._reload_connection()
703
704 if self.api_version3:
705 self.keystone.projects.delete(tenant_id)
706 else:
707 self.keystone.tenants.delete(tenant_id)
708
709 return tenant_id
710 except (
711 ksExceptions.ConnectionError,
712 ksExceptions.ClientException,
713 ksExceptions.NotFound,
714 ConnectionError,
715 ) as e:
716 self._format_exception(e)
717
718 def new_network(
719 self,
720 net_name,
721 net_type,
722 ip_profile=None,
723 shared=False,
724 provider_network_profile=None,
725 ):
726 """Adds a tenant network to VIM
727 Params:
728 'net_name': name of the network
729 'net_type': one of:
730 'bridge': overlay isolated network
731 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
732 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
733 'ip_profile': is a dict containing the IP parameters of the network
734 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
735 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
736 'gateway_address': (Optional) ip_schema, that is X.X.X.X
737 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
738 'dhcp_enabled': True or False
739 'dhcp_start_address': ip_schema, first IP to grant
740 'dhcp_count': number of IPs to grant.
741 'shared': if this network can be seen/use by other tenants/organization
742 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
743 physical-network: physnet-label}
744 Returns a tuple with the network identifier and created_items, or raises an exception on error
745 created_items can be None or a dictionary where this method can include key-values that will be passed to
746 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
747 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
748 as not present.
749 """
750 self.logger.debug(
751 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
752 )
753 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
754
755 try:
756 vlan = None
757
758 if provider_network_profile:
759 vlan = provider_network_profile.get("segmentation-id")
760
761 new_net = None
762 created_items = {}
763 self._reload_connection()
764 network_dict = {"name": net_name, "admin_state_up": True}
765
766 if net_type in ("data", "ptp") or provider_network_profile:
767 provider_physical_network = None
768
769 if provider_network_profile and provider_network_profile.get(
770 "physical-network"
771 ):
772 provider_physical_network = provider_network_profile.get(
773 "physical-network"
774 )
775
776 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
777 # or not declared, just ignore the checking
778 if (
779 isinstance(
780 self.config.get("dataplane_physical_net"), (tuple, list)
781 )
782 and provider_physical_network
783 not in self.config["dataplane_physical_net"]
784 ):
785 raise vimconn.VimConnConflictException(
786 "Invalid parameter 'provider-network:physical-network' "
787 "for network creation. '{}' is not one of the declared "
788 "list at VIM_config:dataplane_physical_net".format(
789 provider_physical_network
790 )
791 )
792
793 # use the default dataplane_physical_net
794 if not provider_physical_network:
795 provider_physical_network = self.config.get(
796 "dataplane_physical_net"
797 )
798
799 # if it is non empty list, use the first value. If it is a string use the value directly
800 if (
801 isinstance(provider_physical_network, (tuple, list))
802 and provider_physical_network
803 ):
804 provider_physical_network = provider_physical_network[0]
805
806 if not provider_physical_network:
807 raise vimconn.VimConnConflictException(
808 "missing information needed for underlay networks. Provide "
809 "'dataplane_physical_net' configuration at VIM or use the NS "
810 "instantiation parameter 'provider-network.physical-network'"
811 " for the VLD"
812 )
813
814 if not self.config.get("multisegment_support"):
815 network_dict[
816 "provider:physical_network"
817 ] = provider_physical_network
818
819 if (
820 provider_network_profile
821 and "network-type" in provider_network_profile
822 ):
823 network_dict[
824 "provider:network_type"
825 ] = provider_network_profile["network-type"]
826 else:
827 network_dict["provider:network_type"] = self.config.get(
828 "dataplane_network_type", "vlan"
829 )
830
831 if vlan:
832 network_dict["provider:segmentation_id"] = vlan
833 else:
834 # Multi-segment case
835 segment_list = []
836 segment1_dict = {
837 "provider:physical_network": "",
838 "provider:network_type": "vxlan",
839 }
840 segment_list.append(segment1_dict)
841 segment2_dict = {
842 "provider:physical_network": provider_physical_network,
843 "provider:network_type": "vlan",
844 }
845
846 if vlan:
847 segment2_dict["provider:segmentation_id"] = vlan
848 elif self.config.get("multisegment_vlan_range"):
849 vlanID = self._generate_multisegment_vlanID()
850 segment2_dict["provider:segmentation_id"] = vlanID
851
852 # else
853 # raise vimconn.VimConnConflictException(
854 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
855 # network")
856 segment_list.append(segment2_dict)
857 network_dict["segments"] = segment_list
858
859 # VIO Specific Changes. It needs a concrete VLAN
860 if self.vim_type == "VIO" and vlan is None:
861 if self.config.get("dataplane_net_vlan_range") is None:
862 raise vimconn.VimConnConflictException(
863 "You must provide 'dataplane_net_vlan_range' in format "
864 "[start_ID - end_ID] at VIM_config for creating underlay "
865 "networks"
866 )
867
868 network_dict["provider:segmentation_id"] = self._generate_vlanID()
869
870 network_dict["shared"] = shared
871
872 if self.config.get("disable_network_port_security"):
873 network_dict["port_security_enabled"] = False
874
875 if self.config.get("neutron_availability_zone_hints"):
876 hints = self.config.get("neutron_availability_zone_hints")
877
878 if isinstance(hints, str):
879 hints = [hints]
880
881 network_dict["availability_zone_hints"] = hints
882
883 new_net = self.neutron.create_network({"network": network_dict})
884 # print new_net
885 # create subnetwork, even if there is no profile
886
887 if not ip_profile:
888 ip_profile = {}
889
890 if not ip_profile.get("subnet_address"):
891 # Fake subnet is required
892 subnet_rand = random.randint(0, 255)
893 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
894
895 if "ip_version" not in ip_profile:
896 ip_profile["ip_version"] = "IPv4"
897
898 subnet = {
899 "name": net_name + "-subnet",
900 "network_id": new_net["network"]["id"],
901 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
902 "cidr": ip_profile["subnet_address"],
903 }
904
905 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
906 if ip_profile.get("gateway_address"):
907 subnet["gateway_ip"] = ip_profile["gateway_address"]
908 else:
909 subnet["gateway_ip"] = None
910
911 if ip_profile.get("dns_address"):
912 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
913
914 if "dhcp_enabled" in ip_profile:
915 subnet["enable_dhcp"] = (
916 False
917 if ip_profile["dhcp_enabled"] == "false"
918 or ip_profile["dhcp_enabled"] is False
919 else True
920 )
921
922 if ip_profile.get("dhcp_start_address"):
923 subnet["allocation_pools"] = []
924 subnet["allocation_pools"].append(dict())
925 subnet["allocation_pools"][0]["start"] = ip_profile[
926 "dhcp_start_address"
927 ]
928
929 if ip_profile.get("dhcp_count"):
930 # parts = ip_profile["dhcp_start_address"].split(".")
931 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
932 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
933 ip_int += ip_profile["dhcp_count"] - 1
934 ip_str = str(netaddr.IPAddress(ip_int))
935 subnet["allocation_pools"][0]["end"] = ip_str
936
937 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
938 self.neutron.create_subnet({"subnet": subnet})
939
940 if net_type == "data" and self.config.get("multisegment_support"):
941 if self.config.get("l2gw_support"):
942 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
943 for l2gw in l2gw_list:
944 l2gw_conn = {
945 "l2_gateway_id": l2gw["id"],
946 "network_id": new_net["network"]["id"],
947 "segmentation_id": str(vlanID),
948 }
949 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
950 {"l2_gateway_connection": l2gw_conn}
951 )
952 created_items[
953 "l2gwconn:"
954 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
955 ] = True
956
957 return new_net["network"]["id"], created_items
958 except Exception as e:
959 # delete l2gw connections (if any) before deleting the network
960 for k, v in created_items.items():
961 if not v: # skip already deleted
962 continue
963
964 try:
965 k_item, _, k_id = k.partition(":")
966
967 if k_item == "l2gwconn":
968 self.neutron.delete_l2_gateway_connection(k_id)
969 except Exception as e2:
970 self.logger.error(
971 "Error deleting l2 gateway connection: {}: {}".format(
972 type(e2).__name__, e2
973 )
974 )
975
976 if new_net:
977 self.neutron.delete_network(new_net["network"]["id"])
978
979 self._format_exception(e)
980
981 def get_network_list(self, filter_dict={}):
982 """Obtain tenant networks of VIM
983 Filter_dict can be:
984 name: network name
985 id: network uuid
986 shared: boolean
987 tenant_id: tenant
988 admin_state_up: boolean
989 status: 'ACTIVE'
990 Returns the network list of dictionaries
991 """
992 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
993
994 try:
995 self._reload_connection()
996 filter_dict_os = filter_dict.copy()
997
998 if self.api_version3 and "tenant_id" in filter_dict_os:
999 # TODO check
1000 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
1001
1002 net_dict = self.neutron.list_networks(**filter_dict_os)
1003 net_list = net_dict["networks"]
1004 self.__net_os2mano(net_list)
1005
1006 return net_list
1007 except (
1008 neExceptions.ConnectionFailed,
1009 ksExceptions.ClientException,
1010 neExceptions.NeutronException,
1011 ConnectionError,
1012 ) as e:
1013 self._format_exception(e)
1014
1015 def get_network(self, net_id):
1016 """Obtain details of network from VIM
1017 Returns the network information from a network id"""
1018 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1019 filter_dict = {"id": net_id}
1020 net_list = self.get_network_list(filter_dict)
1021
1022 if len(net_list) == 0:
1023 raise vimconn.VimConnNotFoundException(
1024 "Network '{}' not found".format(net_id)
1025 )
1026 elif len(net_list) > 1:
1027 raise vimconn.VimConnConflictException(
1028 "Found more than one network with this criteria"
1029 )
1030
1031 net = net_list[0]
1032 subnets = []
1033 for subnet_id in net.get("subnets", ()):
1034 try:
1035 subnet = self.neutron.show_subnet(subnet_id)
1036 except Exception as e:
1037 self.logger.error(
1038 "osconnector.get_network(): Error getting subnet %s %s"
1039 % (net_id, str(e))
1040 )
1041 subnet = {"id": subnet_id, "fault": str(e)}
1042
1043 subnets.append(subnet)
1044
1045 net["subnets"] = subnets
1046 net["encapsulation"] = net.get("provider:network_type")
1047 net["encapsulation_type"] = net.get("provider:network_type")
1048 net["segmentation_id"] = net.get("provider:segmentation_id")
1049 net["encapsulation_id"] = net.get("provider:segmentation_id")
1050
1051 return net
1052
1053 def delete_network(self, net_id, created_items=None):
1054 """
1055 Removes a tenant network from VIM and its associated elements
1056 :param net_id: VIM identifier of the network, provided by method new_network
1057 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1058 Returns the network identifier or raises an exception upon error or when network is not found
1059 """
1060 self.logger.debug("Deleting network '%s' from VIM", net_id)
1061
1062 if created_items is None:
1063 created_items = {}
1064
1065 try:
1066 self._reload_connection()
1067 # delete l2gw connections (if any) before deleting the network
1068 for k, v in created_items.items():
1069 if not v: # skip already deleted
1070 continue
1071
1072 try:
1073 k_item, _, k_id = k.partition(":")
1074 if k_item == "l2gwconn":
1075 self.neutron.delete_l2_gateway_connection(k_id)
1076 except Exception as e:
1077 self.logger.error(
1078 "Error deleting l2 gateway connection: {}: {}".format(
1079 type(e).__name__, e
1080 )
1081 )
1082
1083 # delete VM ports attached to this networks before the network
1084 ports = self.neutron.list_ports(network_id=net_id)
1085 for p in ports["ports"]:
1086 try:
1087 self.neutron.delete_port(p["id"])
1088 except Exception as e:
1089 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1090
1091 self.neutron.delete_network(net_id)
1092
1093 return net_id
1094 except (
1095 neExceptions.ConnectionFailed,
1096 neExceptions.NetworkNotFoundClient,
1097 neExceptions.NeutronException,
1098 ksExceptions.ClientException,
1099 neExceptions.NeutronException,
1100 ConnectionError,
1101 ) as e:
1102 self._format_exception(e)
1103
1104 def refresh_nets_status(self, net_list):
1105 """Get the status of the networks
1106 Params: the list of network identifiers
1107 Returns a dictionary with:
1108 net_id: #VIM id of this network
1109 status: #Mandatory. Text with one of:
1110 # DELETED (not found at vim)
1111 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1112 # OTHER (Vim reported other status not understood)
1113 # ERROR (VIM indicates an ERROR status)
1114 # ACTIVE, INACTIVE, DOWN (admin down),
1115 # BUILD (on building process)
1116 #
1117 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1118 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1119 """
1120 net_dict = {}
1121
1122 for net_id in net_list:
1123 net = {}
1124
1125 try:
1126 net_vim = self.get_network(net_id)
1127
1128 if net_vim["status"] in netStatus2manoFormat:
1129 net["status"] = netStatus2manoFormat[net_vim["status"]]
1130 else:
1131 net["status"] = "OTHER"
1132 net["error_msg"] = "VIM status reported " + net_vim["status"]
1133
1134 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1135 net["status"] = "DOWN"
1136
1137 net["vim_info"] = self.serialize(net_vim)
1138
1139 if net_vim.get("fault"): # TODO
1140 net["error_msg"] = str(net_vim["fault"])
1141 except vimconn.VimConnNotFoundException as e:
1142 self.logger.error("Exception getting net status: %s", str(e))
1143 net["status"] = "DELETED"
1144 net["error_msg"] = str(e)
1145 except vimconn.VimConnException as e:
1146 self.logger.error("Exception getting net status: %s", str(e))
1147 net["status"] = "VIM_ERROR"
1148 net["error_msg"] = str(e)
1149 net_dict[net_id] = net
1150 return net_dict
1151
1152 def get_flavor(self, flavor_id):
1153 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1154 self.logger.debug("Getting flavor '%s'", flavor_id)
1155
1156 try:
1157 self._reload_connection()
1158 flavor = self.nova.flavors.find(id=flavor_id)
1159 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1160
1161 return flavor.to_dict()
1162 except (
1163 nvExceptions.NotFound,
1164 nvExceptions.ClientException,
1165 ksExceptions.ClientException,
1166 ConnectionError,
1167 ) as e:
1168 self._format_exception(e)
1169
1170 def get_flavor_id_from_data(self, flavor_dict):
1171 """Obtain flavor id that match the flavor description
1172 Returns the flavor_id or raises a vimconnNotFoundException
1173 flavor_dict: contains the required ram, vcpus, disk
1174 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1175 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1176 vimconnNotFoundException is raised
1177 """
1178 exact_match = False if self.config.get("use_existing_flavors") else True
1179
1180 try:
1181 self._reload_connection()
1182 flavor_candidate_id = None
1183 flavor_candidate_data = (10000, 10000, 10000)
1184 flavor_target = (
1185 flavor_dict["ram"],
1186 flavor_dict["vcpus"],
1187 flavor_dict["disk"],
1188 flavor_dict.get("ephemeral", 0),
1189 flavor_dict.get("swap", 0),
1190 )
1191 # numa=None
1192 extended = flavor_dict.get("extended", {})
1193 if extended:
1194 # TODO
1195 raise vimconn.VimConnNotFoundException(
1196 "Flavor with EPA still not implemented"
1197 )
1198 # if len(numas) > 1:
1199 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1200 # numa=numas[0]
1201 # numas = extended.get("numas")
1202 for flavor in self.nova.flavors.list():
1203 epa = flavor.get_keys()
1204
1205 if epa:
1206 continue
1207 # TODO
1208
1209 flavor_data = (
1210 flavor.ram,
1211 flavor.vcpus,
1212 flavor.disk,
1213 flavor.ephemeral,
1214 flavor.swap if isinstance(flavor.swap, int) else 0,
1215 )
1216 if flavor_data == flavor_target:
1217 return flavor.id
1218 elif (
1219 not exact_match
1220 and flavor_target < flavor_data < flavor_candidate_data
1221 ):
1222 flavor_candidate_id = flavor.id
1223 flavor_candidate_data = flavor_data
1224
1225 if not exact_match and flavor_candidate_id:
1226 return flavor_candidate_id
1227
1228 raise vimconn.VimConnNotFoundException(
1229 "Cannot find any flavor matching '{}'".format(flavor_dict)
1230 )
1231 except (
1232 nvExceptions.NotFound,
1233 nvExceptions.ClientException,
1234 ksExceptions.ClientException,
1235 ConnectionError,
1236 ) as e:
1237 self._format_exception(e)
1238
1239 @staticmethod
1240 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1241 """Process resource quota and fill up extra_specs.
1242 Args:
1243 quota (dict): Keeping the quota of resurces
1244 prefix (str) Prefix
1245 extra_specs (dict) Dict to be filled to be used during flavor creation
1246
1247 """
1248 if "limit" in quota:
1249 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1250
1251 if "reserve" in quota:
1252 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1253
1254 if "shares" in quota:
1255 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1256 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1257
1258 @staticmethod
1259 def process_numa_memory(
1260 numa: dict, node_id: Optional[int], extra_specs: dict
1261 ) -> None:
1262 """Set the memory in extra_specs.
1263 Args:
1264 numa (dict): A dictionary which includes numa information
1265 node_id (int): ID of numa node
1266 extra_specs (dict): To be filled.
1267
1268 """
1269 if not numa.get("memory"):
1270 return
1271 memory_mb = numa["memory"] * 1024
1272 memory = "hw:numa_mem.{}".format(node_id)
1273 extra_specs[memory] = int(memory_mb)
1274
1275 @staticmethod
1276 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1277 """Set the cpu in extra_specs.
1278 Args:
1279 numa (dict): A dictionary which includes numa information
1280 node_id (int): ID of numa node
1281 extra_specs (dict): To be filled.
1282
1283 """
1284 if not numa.get("vcpu"):
1285 return
1286 vcpu = numa["vcpu"]
1287 cpu = "hw:numa_cpus.{}".format(node_id)
1288 vcpu = ",".join(map(str, vcpu))
1289 extra_specs[cpu] = vcpu
1290
1291 @staticmethod
1292 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1293 """Fill up extra_specs if numa has paired-threads.
1294 Args:
1295 numa (dict): A dictionary which includes numa information
1296 extra_specs (dict): To be filled.
1297
1298 Returns:
1299 threads (int) Number of virtual cpus
1300
1301 """
1302 if not numa.get("paired-threads"):
1303 return
1304
1305 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1306 threads = numa["paired-threads"] * 2
1307 extra_specs["hw:cpu_thread_policy"] = "require"
1308 extra_specs["hw:cpu_policy"] = "dedicated"
1309 return threads
1310
1311 @staticmethod
1312 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1313 """Fill up extra_specs if numa has cores.
1314 Args:
1315 numa (dict): A dictionary which includes numa information
1316 extra_specs (dict): To be filled.
1317
1318 Returns:
1319 cores (int) Number of virtual cpus
1320
1321 """
1322 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1323 # architecture, or a non-SMT architecture will be emulated
1324 if not numa.get("cores"):
1325 return
1326 cores = numa["cores"]
1327 extra_specs["hw:cpu_thread_policy"] = "isolate"
1328 extra_specs["hw:cpu_policy"] = "dedicated"
1329 return cores
1330
1331 @staticmethod
1332 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1333 """Fill up extra_specs if numa has threads.
1334 Args:
1335 numa (dict): A dictionary which includes numa information
1336 extra_specs (dict): To be filled.
1337
1338 Returns:
1339 threads (int) Number of virtual cpus
1340
1341 """
1342 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1343 if not numa.get("threads"):
1344 return
1345 threads = numa["threads"]
1346 extra_specs["hw:cpu_thread_policy"] = "prefer"
1347 extra_specs["hw:cpu_policy"] = "dedicated"
1348 return threads
1349
1350 def _process_numa_parameters_of_flavor(
1351 self, numas: List, extra_specs: Dict
1352 ) -> None:
1353 """Process numa parameters and fill up extra_specs.
1354
1355 Args:
1356 numas (list): List of dictionary which includes numa information
1357 extra_specs (dict): To be filled.
1358
1359 """
1360 numa_nodes = len(numas)
1361 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1362 cpu_cores, cpu_threads = 0, 0
1363
1364 if self.vim_type == "VIO":
1365 self.process_vio_numa_nodes(numa_nodes, extra_specs)
1366
1367 for numa in numas:
1368 if "id" in numa:
1369 node_id = numa["id"]
1370 # overwrite ram and vcpus
1371 # check if key "memory" is present in numa else use ram value at flavor
1372 self.process_numa_memory(numa, node_id, extra_specs)
1373 self.process_numa_vcpu(numa, node_id, extra_specs)
1374
1375 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1376 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1377
1378 if "paired-threads" in numa:
1379 threads = self.process_numa_paired_threads(numa, extra_specs)
1380 cpu_threads += threads
1381
1382 elif "cores" in numa:
1383 cores = self.process_numa_cores(numa, extra_specs)
1384 cpu_cores += cores
1385
1386 elif "threads" in numa:
1387 threads = self.process_numa_threads(numa, extra_specs)
1388 cpu_threads += threads
1389
1390 if cpu_cores:
1391 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1392 if cpu_threads:
1393 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1394
1395 @staticmethod
1396 def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
1397 """According to number of numa nodes, updates the extra_specs for VIO.
1398
1399 Args:
1400
1401 numa_nodes (int): List keeps the numa node numbers
1402 extra_specs (dict): Extra specs dict to be updated
1403
1404 """
1405 # If there are several numas, we do not define specific affinity.
1406 extra_specs["vmware:latency_sensitivity_level"] = "high"
1407
1408 def _change_flavor_name(
1409 self, name: str, name_suffix: int, flavor_data: dict
1410 ) -> str:
1411 """Change the flavor name if the name already exists.
1412
1413 Args:
1414 name (str): Flavor name to be checked
1415 name_suffix (int): Suffix to be appended to name
1416 flavor_data (dict): Flavor dict
1417
1418 Returns:
1419 name (str): New flavor name to be used
1420
1421 """
1422 # Get used names
1423 fl = self.nova.flavors.list()
1424 fl_names = [f.name for f in fl]
1425
1426 while name in fl_names:
1427 name_suffix += 1
1428 name = flavor_data["name"] + "-" + str(name_suffix)
1429
1430 return name
1431
1432 def _process_extended_config_of_flavor(
1433 self, extended: dict, extra_specs: dict
1434 ) -> None:
1435 """Process the extended dict to fill up extra_specs.
1436 Args:
1437
1438 extended (dict): Keeping the extra specification of flavor
1439 extra_specs (dict) Dict to be filled to be used during flavor creation
1440
1441 """
1442 quotas = {
1443 "cpu-quota": "cpu",
1444 "mem-quota": "memory",
1445 "vif-quota": "vif",
1446 "disk-io-quota": "disk_io",
1447 }
1448
1449 page_sizes = {
1450 "LARGE": "large",
1451 "SMALL": "small",
1452 "SIZE_2MB": "2MB",
1453 "SIZE_1GB": "1GB",
1454 "PREFER_LARGE": "any",
1455 }
1456
1457 policies = {
1458 "cpu-pinning-policy": "hw:cpu_policy",
1459 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1460 "mem-policy": "hw:numa_mempolicy",
1461 }
1462
1463 numas = extended.get("numas")
1464 if numas:
1465 self._process_numa_parameters_of_flavor(numas, extra_specs)
1466
1467 for quota, item in quotas.items():
1468 if quota in extended.keys():
1469 self.process_resource_quota(extended.get(quota), item, extra_specs)
1470
1471 # Set the mempage size as specified in the descriptor
1472 if extended.get("mempage-size"):
1473 if extended["mempage-size"] in page_sizes.keys():
1474 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1475 else:
1476 # Normally, validations in NBI should not allow to this condition.
1477 self.logger.debug(
1478 "Invalid mempage-size %s. Will be ignored",
1479 extended.get("mempage-size"),
1480 )
1481
1482 for policy, hw_policy in policies.items():
1483 if extended.get(policy):
1484 extra_specs[hw_policy] = extended[policy].lower()
1485
1486 @staticmethod
1487 def _get_flavor_details(flavor_data: dict) -> Tuple:
1488 """Returns the details of flavor
1489 Args:
1490 flavor_data (dict): Dictionary that includes required flavor details
1491
1492 Returns:
1493 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1494
1495 """
1496 return (
1497 flavor_data.get("ram", 64),
1498 flavor_data.get("vcpus", 1),
1499 {},
1500 flavor_data.get("extended"),
1501 )
1502
1503 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1504 """Adds a tenant flavor to openstack VIM.
1505 if change_name_if_used is True, it will change name in case of conflict,
1506 because it is not supported name repetition.
1507
1508 Args:
1509 flavor_data (dict): Flavor details to be processed
1510 change_name_if_used (bool): Change name in case of conflict
1511
1512 Returns:
1513 flavor_id (str): flavor identifier
1514
1515 """
1516 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1517 retry = 0
1518 max_retries = 3
1519 name_suffix = 0
1520
1521 try:
1522 name = flavor_data["name"]
1523 while retry < max_retries:
1524 retry += 1
1525 try:
1526 self._reload_connection()
1527
1528 if change_name_if_used:
1529 name = self._change_flavor_name(name, name_suffix, flavor_data)
1530
1531 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1532 flavor_data
1533 )
1534 if extended:
1535 self._process_extended_config_of_flavor(extended, extra_specs)
1536
1537 # Create flavor
1538
1539 new_flavor = self.nova.flavors.create(
1540 name=name,
1541 ram=ram,
1542 vcpus=vcpus,
1543 disk=flavor_data.get("disk", 0),
1544 ephemeral=flavor_data.get("ephemeral", 0),
1545 swap=flavor_data.get("swap", 0),
1546 is_public=flavor_data.get("is_public", True),
1547 )
1548
1549 # Add metadata
1550 if extra_specs:
1551 new_flavor.set_keys(extra_specs)
1552
1553 return new_flavor.id
1554
1555 except nvExceptions.Conflict as e:
1556 if change_name_if_used and retry < max_retries:
1557 continue
1558
1559 self._format_exception(e)
1560
1561 except (
1562 ksExceptions.ClientException,
1563 nvExceptions.ClientException,
1564 ConnectionError,
1565 KeyError,
1566 ) as e:
1567 self._format_exception(e)
1568
1569 def delete_flavor(self, flavor_id):
1570 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1571 try:
1572 self._reload_connection()
1573 self.nova.flavors.delete(flavor_id)
1574
1575 return flavor_id
1576 # except nvExceptions.BadRequest as e:
1577 except (
1578 nvExceptions.NotFound,
1579 ksExceptions.ClientException,
1580 nvExceptions.ClientException,
1581 ConnectionError,
1582 ) as e:
1583 self._format_exception(e)
1584
1585 def new_image(self, image_dict):
1586 """
1587 Adds a tenant image to VIM. imge_dict is a dictionary with:
1588 name: name
1589 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1590 location: path or URI
1591 public: "yes" or "no"
1592 metadata: metadata of the image
1593 Returns the image_id
1594 """
1595 retry = 0
1596 max_retries = 3
1597
1598 while retry < max_retries:
1599 retry += 1
1600 try:
1601 self._reload_connection()
1602
1603 # determine format http://docs.openstack.org/developer/glance/formats.html
1604 if "disk_format" in image_dict:
1605 disk_format = image_dict["disk_format"]
1606 else: # autodiscover based on extension
1607 if image_dict["location"].endswith(".qcow2"):
1608 disk_format = "qcow2"
1609 elif image_dict["location"].endswith(".vhd"):
1610 disk_format = "vhd"
1611 elif image_dict["location"].endswith(".vmdk"):
1612 disk_format = "vmdk"
1613 elif image_dict["location"].endswith(".vdi"):
1614 disk_format = "vdi"
1615 elif image_dict["location"].endswith(".iso"):
1616 disk_format = "iso"
1617 elif image_dict["location"].endswith(".aki"):
1618 disk_format = "aki"
1619 elif image_dict["location"].endswith(".ari"):
1620 disk_format = "ari"
1621 elif image_dict["location"].endswith(".ami"):
1622 disk_format = "ami"
1623 else:
1624 disk_format = "raw"
1625
1626 self.logger.debug(
1627 "new_image: '%s' loading from '%s'",
1628 image_dict["name"],
1629 image_dict["location"],
1630 )
1631 if self.vim_type == "VIO":
1632 container_format = "bare"
1633 if "container_format" in image_dict:
1634 container_format = image_dict["container_format"]
1635
1636 new_image = self.glance.images.create(
1637 name=image_dict["name"],
1638 container_format=container_format,
1639 disk_format=disk_format,
1640 )
1641 else:
1642 new_image = self.glance.images.create(name=image_dict["name"])
1643
1644 if image_dict["location"].startswith("http"):
1645 # TODO there is not a method to direct download. It must be downloaded locally with requests
1646 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1647 else: # local path
1648 with open(image_dict["location"]) as fimage:
1649 self.glance.images.upload(new_image.id, fimage)
1650 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1651 # image_dict.get("public","yes")=="yes",
1652 # container_format="bare", data=fimage, disk_format=disk_format)
1653
1654 metadata_to_load = image_dict.get("metadata")
1655
1656 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1657 # for openstack
1658 if self.vim_type == "VIO":
1659 metadata_to_load["upload_location"] = image_dict["location"]
1660 else:
1661 metadata_to_load["location"] = image_dict["location"]
1662
1663 self.glance.images.update(new_image.id, **metadata_to_load)
1664
1665 return new_image.id
1666 except (
1667 nvExceptions.Conflict,
1668 ksExceptions.ClientException,
1669 nvExceptions.ClientException,
1670 ) as e:
1671 self._format_exception(e)
1672 except (
1673 HTTPException,
1674 gl1Exceptions.HTTPException,
1675 gl1Exceptions.CommunicationError,
1676 ConnectionError,
1677 ) as e:
1678 if retry == max_retries:
1679 continue
1680
1681 self._format_exception(e)
1682 except IOError as e: # can not open the file
1683 raise vimconn.VimConnConnectionException(
1684 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1685 http_code=vimconn.HTTP_Bad_Request,
1686 )
1687
1688 def delete_image(self, image_id):
1689 """Deletes a tenant image from openstack VIM. Returns the old id"""
1690 try:
1691 self._reload_connection()
1692 self.glance.images.delete(image_id)
1693
1694 return image_id
1695 except (
1696 nvExceptions.NotFound,
1697 ksExceptions.ClientException,
1698 nvExceptions.ClientException,
1699 gl1Exceptions.CommunicationError,
1700 gl1Exceptions.HTTPNotFound,
1701 ConnectionError,
1702 ) as e: # TODO remove
1703 self._format_exception(e)
1704
1705 def get_image_id_from_path(self, path):
1706 """Get the image id from image path in the VIM database. Returns the image_id"""
1707 try:
1708 self._reload_connection()
1709 images = self.glance.images.list()
1710
1711 for image in images:
1712 if image.metadata.get("location") == path:
1713 return image.id
1714
1715 raise vimconn.VimConnNotFoundException(
1716 "image with location '{}' not found".format(path)
1717 )
1718 except (
1719 ksExceptions.ClientException,
1720 nvExceptions.ClientException,
1721 gl1Exceptions.CommunicationError,
1722 ConnectionError,
1723 ) as e:
1724 self._format_exception(e)
1725
1726 def get_image_list(self, filter_dict={}):
1727 """Obtain tenant images from VIM
1728 Filter_dict can be:
1729 id: image id
1730 name: image name
1731 checksum: image checksum
1732 Returns the image list of dictionaries:
1733 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1734 List can be empty
1735 """
1736 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1737
1738 try:
1739 self._reload_connection()
1740 # filter_dict_os = filter_dict.copy()
1741 # First we filter by the available filter fields: name, id. The others are removed.
1742 image_list = self.glance.images.list()
1743 filtered_list = []
1744
1745 for image in image_list:
1746 try:
1747 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1748 continue
1749
1750 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1751 continue
1752
1753 if (
1754 filter_dict.get("checksum")
1755 and image["checksum"] != filter_dict["checksum"]
1756 ):
1757 continue
1758
1759 filtered_list.append(image.copy())
1760 except gl1Exceptions.HTTPNotFound:
1761 pass
1762
1763 return filtered_list
1764 except (
1765 ksExceptions.ClientException,
1766 nvExceptions.ClientException,
1767 gl1Exceptions.CommunicationError,
1768 ConnectionError,
1769 ) as e:
1770 self._format_exception(e)
1771
1772 def __wait_for_vm(self, vm_id, status):
1773 """wait until vm is in the desired status and return True.
1774 If the VM gets in ERROR status, return false.
1775 If the timeout is reached generate an exception"""
1776 elapsed_time = 0
1777 while elapsed_time < server_timeout:
1778 vm_status = self.nova.servers.get(vm_id).status
1779
1780 if vm_status == status:
1781 return True
1782
1783 if vm_status == "ERROR":
1784 return False
1785
1786 time.sleep(5)
1787 elapsed_time += 5
1788
1789 # if we exceeded the timeout rollback
1790 if elapsed_time >= server_timeout:
1791 raise vimconn.VimConnException(
1792 "Timeout waiting for instance " + vm_id + " to get " + status,
1793 http_code=vimconn.HTTP_Request_Timeout,
1794 )
1795
1796 def _get_openstack_availablity_zones(self):
1797 """
1798 Get from openstack availability zones available
1799 :return:
1800 """
1801 try:
1802 openstack_availability_zone = self.nova.availability_zones.list()
1803 openstack_availability_zone = [
1804 str(zone.zoneName)
1805 for zone in openstack_availability_zone
1806 if zone.zoneName != "internal"
1807 ]
1808
1809 return openstack_availability_zone
1810 except Exception:
1811 return None
1812
1813 def _set_availablity_zones(self):
1814 """
1815 Set vim availablity zone
1816 :return:
1817 """
1818 if "availability_zone" in self.config:
1819 vim_availability_zones = self.config.get("availability_zone")
1820
1821 if isinstance(vim_availability_zones, str):
1822 self.availability_zone = [vim_availability_zones]
1823 elif isinstance(vim_availability_zones, list):
1824 self.availability_zone = vim_availability_zones
1825 else:
1826 self.availability_zone = self._get_openstack_availablity_zones()
1827
1828 def _get_vm_availability_zone(
1829 self, availability_zone_index, availability_zone_list
1830 ):
1831 """
1832 Return thge availability zone to be used by the created VM.
1833 :return: The VIM availability zone to be used or None
1834 """
1835 if availability_zone_index is None:
1836 if not self.config.get("availability_zone"):
1837 return None
1838 elif isinstance(self.config.get("availability_zone"), str):
1839 return self.config["availability_zone"]
1840 else:
1841 # TODO consider using a different parameter at config for default AV and AV list match
1842 return self.config["availability_zone"][0]
1843
1844 vim_availability_zones = self.availability_zone
1845 # check if VIM offer enough availability zones describe in the VNFD
1846 if vim_availability_zones and len(availability_zone_list) <= len(
1847 vim_availability_zones
1848 ):
1849 # check if all the names of NFV AV match VIM AV names
1850 match_by_index = False
1851 for av in availability_zone_list:
1852 if av not in vim_availability_zones:
1853 match_by_index = True
1854 break
1855
1856 if match_by_index:
1857 return vim_availability_zones[availability_zone_index]
1858 else:
1859 return availability_zone_list[availability_zone_index]
1860 else:
1861 raise vimconn.VimConnConflictException(
1862 "No enough availability zones at VIM for this deployment"
1863 )
1864
1865 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1866 """Fill up the security_groups in the port_dict.
1867
1868 Args:
1869 net (dict): Network details
1870 port_dict (dict): Port details
1871
1872 """
1873 if (
1874 self.config.get("security_groups")
1875 and net.get("port_security") is not False
1876 and not self.config.get("no_port_security_extension")
1877 ):
1878 if not self.security_groups_id:
1879 self._get_ids_from_name()
1880
1881 port_dict["security_groups"] = self.security_groups_id
1882
1883 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1884 """Fill up the network binding depending on network type in the port_dict.
1885
1886 Args:
1887 net (dict): Network details
1888 port_dict (dict): Port details
1889
1890 """
1891 if not net.get("type"):
1892 raise vimconn.VimConnException("Type is missing in the network details.")
1893
1894 if net["type"] == "virtual":
1895 pass
1896
1897 # For VF
1898 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1899 port_dict["binding:vnic_type"] = "direct"
1900
1901 # VIO specific Changes
1902 if self.vim_type == "VIO":
1903 # Need to create port with port_security_enabled = False and no-security-groups
1904 port_dict["port_security_enabled"] = False
1905 port_dict["provider_security_groups"] = []
1906 port_dict["security_groups"] = []
1907
1908 else:
1909 # For PT PCI-PASSTHROUGH
1910 port_dict["binding:vnic_type"] = "direct-physical"
1911
1912 @staticmethod
1913 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1914 """Set the "ip" parameter in net dictionary.
1915
1916 Args:
1917 new_port (dict): New created port
1918 net (dict): Network details
1919
1920 """
1921 fixed_ips = new_port["port"].get("fixed_ips")
1922
1923 if fixed_ips:
1924 net["ip"] = fixed_ips[0].get("ip_address")
1925 else:
1926 net["ip"] = None
1927
1928 @staticmethod
1929 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1930 """Fill up the mac_address and fixed_ips in port_dict.
1931
1932 Args:
1933 net (dict): Network details
1934 port_dict (dict): Port details
1935
1936 """
1937 if net.get("mac_address"):
1938 port_dict["mac_address"] = net["mac_address"]
1939
1940 if net.get("ip_address"):
1941 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1942 # TODO add "subnet_id": <subnet_id>
1943
1944 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1945 """Create new port using neutron.
1946
1947 Args:
1948 port_dict (dict): Port details
1949 created_items (dict): All created items
1950 net (dict): Network details
1951
1952 Returns:
1953 new_port (dict): New created port
1954
1955 """
1956 new_port = self.neutron.create_port({"port": port_dict})
1957 created_items["port:" + str(new_port["port"]["id"])] = True
1958 net["mac_adress"] = new_port["port"]["mac_address"]
1959 net["vim_id"] = new_port["port"]["id"]
1960
1961 return new_port
1962
1963 def _create_port(
1964 self, net: dict, name: str, created_items: dict
1965 ) -> Tuple[dict, dict]:
1966 """Create port using net details.
1967
1968 Args:
1969 net (dict): Network details
1970 name (str): Name to be used as network name if net dict does not include name
1971 created_items (dict): All created items
1972
1973 Returns:
1974 new_port, port New created port, port dictionary
1975
1976 """
1977
1978 port_dict = {
1979 "network_id": net["net_id"],
1980 "name": net.get("name"),
1981 "admin_state_up": True,
1982 }
1983
1984 if not port_dict["name"]:
1985 port_dict["name"] = name
1986
1987 self._prepare_port_dict_security_groups(net, port_dict)
1988
1989 self._prepare_port_dict_binding(net, port_dict)
1990
1991 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1992
1993 new_port = self._create_new_port(port_dict, created_items, net)
1994
1995 vimconnector._set_fixed_ip(new_port, net)
1996
1997 port = {"port-id": new_port["port"]["id"]}
1998
1999 if float(self.nova.api_version.get_string()) >= 2.32:
2000 port["tag"] = new_port["port"]["name"]
2001
2002 return new_port, port
2003
2004 def _prepare_network_for_vminstance(
2005 self,
2006 name: str,
2007 net_list: list,
2008 created_items: dict,
2009 net_list_vim: list,
2010 external_network: list,
2011 no_secured_ports: list,
2012 ) -> None:
2013 """Create port and fill up net dictionary for new VM instance creation.
2014
2015 Args:
2016 name (str): Name of network
2017 net_list (list): List of networks
2018 created_items (dict): All created items belongs to a VM
2019 net_list_vim (list): List of ports
2020 external_network (list): List of external-networks
2021 no_secured_ports (list): Port security disabled ports
2022 """
2023
2024 self._reload_connection()
2025
2026 for net in net_list:
2027 # Skip non-connected iface
2028 if not net.get("net_id"):
2029 continue
2030
2031 new_port, port = self._create_port(net, name, created_items)
2032
2033 net_list_vim.append(port)
2034
2035 if net.get("floating_ip", False):
2036 net["exit_on_floating_ip_error"] = True
2037 external_network.append(net)
2038
2039 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2040 net["exit_on_floating_ip_error"] = False
2041 external_network.append(net)
2042 net["floating_ip"] = self.config.get("use_floating_ip")
2043
2044 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2045 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2046 if net.get("port_security") is False and not self.config.get(
2047 "no_port_security_extension"
2048 ):
2049 no_secured_ports.append(
2050 (
2051 new_port["port"]["id"],
2052 net.get("port_security_disable_strategy"),
2053 )
2054 )
2055
2056 def _prepare_persistent_root_volumes(
2057 self,
2058 name: str,
2059 vm_av_zone: list,
2060 disk: dict,
2061 base_disk_index: int,
2062 block_device_mapping: dict,
2063 existing_vim_volumes: list,
2064 created_items: dict,
2065 ) -> Optional[str]:
2066 """Prepare persistent root volumes for new VM instance.
2067
2068 Args:
2069 name (str): Name of VM instance
2070 vm_av_zone (list): List of availability zones
2071 disk (dict): Disk details
2072 base_disk_index (int): Disk index
2073 block_device_mapping (dict): Block device details
2074 existing_vim_volumes (list): Existing disk details
2075 created_items (dict): All created items belongs to VM
2076
2077 Returns:
2078 boot_volume_id (str): ID of boot volume
2079
2080 """
2081 # Disk may include only vim_volume_id or only vim_id."
2082 # Use existing persistent root volume finding with volume_id or vim_id
2083 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2084
2085 if disk.get(key_id):
2086 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2087 existing_vim_volumes.append({"id": disk[key_id]})
2088
2089 else:
2090 # Create persistent root volume
2091 volume = self.cinder.volumes.create(
2092 size=disk["size"],
2093 name=name + "vd" + chr(base_disk_index),
2094 imageRef=disk["image_id"],
2095 # Make sure volume is in the same AZ as the VM to be attached to
2096 availability_zone=vm_av_zone,
2097 )
2098 boot_volume_id = volume.id
2099 self.update_block_device_mapping(
2100 volume=volume,
2101 block_device_mapping=block_device_mapping,
2102 base_disk_index=base_disk_index,
2103 disk=disk,
2104 created_items=created_items,
2105 )
2106
2107 return boot_volume_id
2108
2109 @staticmethod
2110 def update_block_device_mapping(
2111 volume: object,
2112 block_device_mapping: dict,
2113 base_disk_index: int,
2114 disk: dict,
2115 created_items: dict,
2116 ) -> None:
2117 """Add volume information to block device mapping dict.
2118 Args:
2119 volume (object): Created volume object
2120 block_device_mapping (dict): Block device details
2121 base_disk_index (int): Disk index
2122 disk (dict): Disk details
2123 created_items (dict): All created items belongs to VM
2124 """
2125 if not volume:
2126 raise vimconn.VimConnException("Volume is empty.")
2127
2128 if not hasattr(volume, "id"):
2129 raise vimconn.VimConnException(
2130 "Created volume is not valid, does not have id attribute."
2131 )
2132
2133 volume_txt = "volume:" + str(volume.id)
2134 if disk.get("keep"):
2135 volume_txt += ":keep"
2136 created_items[volume_txt] = True
2137 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2138
2139 def _prepare_non_root_persistent_volumes(
2140 self,
2141 name: str,
2142 disk: dict,
2143 vm_av_zone: list,
2144 block_device_mapping: dict,
2145 base_disk_index: int,
2146 existing_vim_volumes: list,
2147 created_items: dict,
2148 ) -> None:
2149 """Prepare persistent volumes for new VM instance.
2150
2151 Args:
2152 name (str): Name of VM instance
2153 disk (dict): Disk details
2154 vm_av_zone (list): List of availability zones
2155 block_device_mapping (dict): Block device details
2156 base_disk_index (int): Disk index
2157 existing_vim_volumes (list): Existing disk details
2158 created_items (dict): All created items belongs to VM
2159 """
2160 # Non-root persistent volumes
2161 # Disk may include only vim_volume_id or only vim_id."
2162 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2163
2164 if disk.get(key_id):
2165 # Use existing persistent volume
2166 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2167 existing_vim_volumes.append({"id": disk[key_id]})
2168
2169 else:
2170 # Create persistent volume
2171 volume = self.cinder.volumes.create(
2172 size=disk["size"],
2173 name=name + "vd" + chr(base_disk_index),
2174 # Make sure volume is in the same AZ as the VM to be attached to
2175 availability_zone=vm_av_zone,
2176 )
2177 self.update_block_device_mapping(
2178 volume=volume,
2179 block_device_mapping=block_device_mapping,
2180 base_disk_index=base_disk_index,
2181 disk=disk,
2182 created_items=created_items,
2183 )
2184
2185 def _wait_for_created_volumes_availability(
2186 self, elapsed_time: int, created_items: dict
2187 ) -> Optional[int]:
2188 """Wait till created volumes become available.
2189
2190 Args:
2191 elapsed_time (int): Passed time while waiting
2192 created_items (dict): All created items belongs to VM
2193
2194 Returns:
2195 elapsed_time (int): Time spent while waiting
2196
2197 """
2198
2199 while elapsed_time < volume_timeout:
2200 for created_item in created_items:
2201 v, volume_id = (
2202 created_item.split(":")[0],
2203 created_item.split(":")[1],
2204 )
2205 if v == "volume":
2206 if self.cinder.volumes.get(volume_id).status != "available":
2207 break
2208 else:
2209 # All ready: break from while
2210 break
2211
2212 time.sleep(5)
2213 elapsed_time += 5
2214
2215 return elapsed_time
2216
2217 def _wait_for_existing_volumes_availability(
2218 self, elapsed_time: int, existing_vim_volumes: list
2219 ) -> Optional[int]:
2220 """Wait till existing volumes become available.
2221
2222 Args:
2223 elapsed_time (int): Passed time while waiting
2224 existing_vim_volumes (list): Existing volume details
2225
2226 Returns:
2227 elapsed_time (int): Time spent while waiting
2228
2229 """
2230
2231 while elapsed_time < volume_timeout:
2232 for volume in existing_vim_volumes:
2233 if self.cinder.volumes.get(volume["id"]).status != "available":
2234 break
2235 else: # all ready: break from while
2236 break
2237
2238 time.sleep(5)
2239 elapsed_time += 5
2240
2241 return elapsed_time
2242
2243 def _prepare_disk_for_vminstance(
2244 self,
2245 name: str,
2246 existing_vim_volumes: list,
2247 created_items: dict,
2248 vm_av_zone: list,
2249 block_device_mapping: dict,
2250 disk_list: list = None,
2251 ) -> None:
2252 """Prepare all volumes for new VM instance.
2253
2254 Args:
2255 name (str): Name of Instance
2256 existing_vim_volumes (list): List of existing volumes
2257 created_items (dict): All created items belongs to VM
2258 vm_av_zone (list): VM availability zone
2259 block_device_mapping (dict): Block devices to be attached to VM
2260 disk_list (list): List of disks
2261
2262 """
2263 # Create additional volumes in case these are present in disk_list
2264 base_disk_index = ord("b")
2265 boot_volume_id = None
2266 elapsed_time = 0
2267
2268 for disk in disk_list:
2269 if "image_id" in disk:
2270 # Root persistent volume
2271 base_disk_index = ord("a")
2272 boot_volume_id = self._prepare_persistent_root_volumes(
2273 name=name,
2274 vm_av_zone=vm_av_zone,
2275 disk=disk,
2276 base_disk_index=base_disk_index,
2277 block_device_mapping=block_device_mapping,
2278 existing_vim_volumes=existing_vim_volumes,
2279 created_items=created_items,
2280 )
2281 else:
2282 # Non-root persistent volume
2283 self._prepare_non_root_persistent_volumes(
2284 name=name,
2285 disk=disk,
2286 vm_av_zone=vm_av_zone,
2287 block_device_mapping=block_device_mapping,
2288 base_disk_index=base_disk_index,
2289 existing_vim_volumes=existing_vim_volumes,
2290 created_items=created_items,
2291 )
2292 base_disk_index += 1
2293
2294 # Wait until created volumes are with status available
2295 elapsed_time = self._wait_for_created_volumes_availability(
2296 elapsed_time, created_items
2297 )
2298 # Wait until existing volumes in vim are with status available
2299 elapsed_time = self._wait_for_existing_volumes_availability(
2300 elapsed_time, existing_vim_volumes
2301 )
2302 # If we exceeded the timeout rollback
2303 if elapsed_time >= volume_timeout:
2304 raise vimconn.VimConnException(
2305 "Timeout creating volumes for instance " + name,
2306 http_code=vimconn.HTTP_Request_Timeout,
2307 )
2308 if boot_volume_id:
2309 self.cinder.volumes.set_bootable(boot_volume_id, True)
2310
2311 def _find_the_external_network_for_floating_ip(self):
2312 """Get the external network ip in order to create floating IP.
2313
2314 Returns:
2315 pool_id (str): External network pool ID
2316
2317 """
2318
2319 # Find the external network
2320 external_nets = list()
2321
2322 for net in self.neutron.list_networks()["networks"]:
2323 if net["router:external"]:
2324 external_nets.append(net)
2325
2326 if len(external_nets) == 0:
2327 raise vimconn.VimConnException(
2328 "Cannot create floating_ip automatically since "
2329 "no external network is present",
2330 http_code=vimconn.HTTP_Conflict,
2331 )
2332
2333 if len(external_nets) > 1:
2334 raise vimconn.VimConnException(
2335 "Cannot create floating_ip automatically since "
2336 "multiple external networks are present",
2337 http_code=vimconn.HTTP_Conflict,
2338 )
2339
2340 # Pool ID
2341 return external_nets[0].get("id")
2342
2343 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2344 """Trigger neutron to create a new floating IP using external network ID.
2345
2346 Args:
2347 param (dict): Input parameters to create a floating IP
2348 created_items (dict): All created items belongs to new VM instance
2349
2350 Raises:
2351
2352 VimConnException
2353 """
2354 try:
2355 self.logger.debug("Creating floating IP")
2356 new_floating_ip = self.neutron.create_floatingip(param)
2357 free_floating_ip = new_floating_ip["floatingip"]["id"]
2358 created_items["floating_ip:" + str(free_floating_ip)] = True
2359
2360 except Exception as e:
2361 raise vimconn.VimConnException(
2362 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2363 http_code=vimconn.HTTP_Conflict,
2364 )
2365
2366 def _create_floating_ip(
2367 self, floating_network: dict, server: object, created_items: dict
2368 ) -> None:
2369 """Get the available Pool ID and create a new floating IP.
2370
2371 Args:
2372 floating_network (dict): Dict including external network ID
2373 server (object): Server object
2374 created_items (dict): All created items belongs to new VM instance
2375
2376 """
2377
2378 # Pool_id is available
2379 if (
2380 isinstance(floating_network["floating_ip"], str)
2381 and floating_network["floating_ip"].lower() != "true"
2382 ):
2383 pool_id = floating_network["floating_ip"]
2384
2385 # Find the Pool_id
2386 else:
2387 pool_id = self._find_the_external_network_for_floating_ip()
2388
2389 param = {
2390 "floatingip": {
2391 "floating_network_id": pool_id,
2392 "tenant_id": server.tenant_id,
2393 }
2394 }
2395
2396 self._neutron_create_float_ip(param, created_items)
2397
2398 def _find_floating_ip(
2399 self,
2400 server: object,
2401 floating_ips: list,
2402 floating_network: dict,
2403 ) -> Optional[str]:
2404 """Find the available free floating IPs if there are.
2405
2406 Args:
2407 server (object): Server object
2408 floating_ips (list): List of floating IPs
2409 floating_network (dict): Details of floating network such as ID
2410
2411 Returns:
2412 free_floating_ip (str): Free floating ip address
2413
2414 """
2415 for fip in floating_ips:
2416 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2417 continue
2418
2419 if isinstance(floating_network["floating_ip"], str):
2420 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2421 continue
2422
2423 return fip["id"]
2424
2425 def _assign_floating_ip(
2426 self, free_floating_ip: str, floating_network: dict
2427 ) -> Dict:
2428 """Assign the free floating ip address to port.
2429
2430 Args:
2431 free_floating_ip (str): Floating IP to be assigned
2432 floating_network (dict): ID of floating network
2433
2434 Returns:
2435 fip (dict) (dict): Floating ip details
2436
2437 """
2438 # The vim_id key contains the neutron.port_id
2439 self.neutron.update_floatingip(
2440 free_floating_ip,
2441 {"floatingip": {"port_id": floating_network["vim_id"]}},
2442 )
2443 # For race condition ensure not re-assigned to other VM after 5 seconds
2444 time.sleep(5)
2445
2446 return self.neutron.show_floatingip(free_floating_ip)
2447
2448 def _get_free_floating_ip(
2449 self, server: object, floating_network: dict
2450 ) -> Optional[str]:
2451 """Get the free floating IP address.
2452
2453 Args:
2454 server (object): Server Object
2455 floating_network (dict): Floating network details
2456
2457 Returns:
2458 free_floating_ip (str): Free floating ip addr
2459
2460 """
2461
2462 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2463
2464 # Randomize
2465 random.shuffle(floating_ips)
2466
2467 return self._find_floating_ip(server, floating_ips, floating_network)
2468
2469 def _prepare_external_network_for_vminstance(
2470 self,
2471 external_network: list,
2472 server: object,
2473 created_items: dict,
2474 vm_start_time: float,
2475 ) -> None:
2476 """Assign floating IP address for VM instance.
2477
2478 Args:
2479 external_network (list): ID of External network
2480 server (object): Server Object
2481 created_items (dict): All created items belongs to new VM instance
2482 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2483
2484 Raises:
2485 VimConnException
2486
2487 """
2488 for floating_network in external_network:
2489 try:
2490 assigned = False
2491 floating_ip_retries = 3
2492 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2493 # several times
2494 while not assigned:
2495 free_floating_ip = self._get_free_floating_ip(
2496 server, floating_network
2497 )
2498
2499 if not free_floating_ip:
2500 self._create_floating_ip(
2501 floating_network, server, created_items
2502 )
2503
2504 try:
2505 # For race condition ensure not already assigned
2506 fip = self.neutron.show_floatingip(free_floating_ip)
2507
2508 if fip["floatingip"].get("port_id"):
2509 continue
2510
2511 # Assign floating ip
2512 fip = self._assign_floating_ip(
2513 free_floating_ip, floating_network
2514 )
2515
2516 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2517 self.logger.warning(
2518 "floating_ip {} re-assigned to other port".format(
2519 free_floating_ip
2520 )
2521 )
2522 continue
2523
2524 self.logger.debug(
2525 "Assigned floating_ip {} to VM {}".format(
2526 free_floating_ip, server.id
2527 )
2528 )
2529
2530 assigned = True
2531
2532 except Exception as e:
2533 # Openstack need some time after VM creation to assign an IP. So retry if fails
2534 vm_status = self.nova.servers.get(server.id).status
2535
2536 if vm_status not in ("ACTIVE", "ERROR"):
2537 if time.time() - vm_start_time < server_timeout:
2538 time.sleep(5)
2539 continue
2540 elif floating_ip_retries > 0:
2541 floating_ip_retries -= 1
2542 continue
2543
2544 raise vimconn.VimConnException(
2545 "Cannot create floating_ip: {} {}".format(
2546 type(e).__name__, e
2547 ),
2548 http_code=vimconn.HTTP_Conflict,
2549 )
2550
2551 except Exception as e:
2552 if not floating_network["exit_on_floating_ip_error"]:
2553 self.logger.error("Cannot create floating_ip. %s", str(e))
2554 continue
2555
2556 raise
2557
2558 def _update_port_security_for_vminstance(
2559 self,
2560 no_secured_ports: list,
2561 server: object,
2562 ) -> None:
2563 """Updates the port security according to no_secured_ports list.
2564
2565 Args:
2566 no_secured_ports (list): List of ports that security will be disabled
2567 server (object): Server Object
2568
2569 Raises:
2570 VimConnException
2571
2572 """
2573 # Wait until the VM is active and then disable the port-security
2574 if no_secured_ports:
2575 self.__wait_for_vm(server.id, "ACTIVE")
2576
2577 for port in no_secured_ports:
2578 port_update = {
2579 "port": {"port_security_enabled": False, "security_groups": None}
2580 }
2581
2582 if port[1] == "allow-address-pairs":
2583 port_update = {
2584 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2585 }
2586
2587 try:
2588 self.neutron.update_port(port[0], port_update)
2589
2590 except Exception:
2591 raise vimconn.VimConnException(
2592 "It was not possible to disable port security for port {}".format(
2593 port[0]
2594 )
2595 )
2596
2597 def new_vminstance(
2598 self,
2599 name: str,
2600 description: str,
2601 start: bool,
2602 image_id: str,
2603 flavor_id: str,
2604 affinity_group_list: list,
2605 net_list: list,
2606 cloud_config=None,
2607 disk_list=None,
2608 availability_zone_index=None,
2609 availability_zone_list=None,
2610 ) -> tuple:
2611 """Adds a VM instance to VIM.
2612
2613 Args:
2614 name (str): name of VM
2615 description (str): description
2616 start (bool): indicates if VM must start or boot in pause mode. Ignored
2617 image_id (str) image uuid
2618 flavor_id (str) flavor uuid
2619 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2620 net_list (list): list of interfaces, each one is a dictionary with:
2621 name: name of network
2622 net_id: network uuid to connect
2623 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2624 model: interface model, ignored #TODO
2625 mac_address: used for SR-IOV ifaces #TODO for other types
2626 use: 'data', 'bridge', 'mgmt'
2627 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2628 vim_id: filled/added by this function
2629 floating_ip: True/False (or it can be None)
2630 port_security: True/False
2631 cloud_config (dict): (optional) dictionary with:
2632 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2633 users: (optional) list of users to be inserted, each item is a dict with:
2634 name: (mandatory) user name,
2635 key-pairs: (optional) list of strings with the public key to be inserted to the user
2636 user-data: (optional) string is a text script to be passed directly to cloud-init
2637 config-files: (optional). List of files to be transferred. Each item is a dict with:
2638 dest: (mandatory) string with the destination absolute path
2639 encoding: (optional, by default text). Can be one of:
2640 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2641 content : (mandatory) string with the content of the file
2642 permissions: (optional) string with file permissions, typically octal notation '0644'
2643 owner: (optional) file owner, string with the format 'owner:group'
2644 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2645 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2646 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2647 size: (mandatory) string with the size of the disk in GB
2648 vim_id: (optional) should use this existing volume id
2649 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2650 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2651 availability_zone_index is None
2652 #TODO ip, security groups
2653
2654 Returns:
2655 A tuple with the instance identifier and created_items or raises an exception on error
2656 created_items can be None or a dictionary where this method can include key-values that will be passed to
2657 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2658 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2659 as not present.
2660
2661 """
2662 self.logger.debug(
2663 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2664 image_id,
2665 flavor_id,
2666 str(net_list),
2667 )
2668
2669 try:
2670 server = None
2671 created_items = {}
2672 net_list_vim = []
2673 # list of external networks to be connected to instance, later on used to create floating_ip
2674 external_network = []
2675 # List of ports with port-security disabled
2676 no_secured_ports = []
2677 block_device_mapping = {}
2678 existing_vim_volumes = []
2679 server_group_id = None
2680 scheduller_hints = {}
2681
2682 # Check the Openstack Connection
2683 self._reload_connection()
2684
2685 # Prepare network list
2686 self._prepare_network_for_vminstance(
2687 name=name,
2688 net_list=net_list,
2689 created_items=created_items,
2690 net_list_vim=net_list_vim,
2691 external_network=external_network,
2692 no_secured_ports=no_secured_ports,
2693 )
2694
2695 # Cloud config
2696 config_drive, userdata = self._create_user_data(cloud_config)
2697
2698 # Get availability Zone
2699 vm_av_zone = self._get_vm_availability_zone(
2700 availability_zone_index, availability_zone_list
2701 )
2702
2703 if disk_list:
2704 # Prepare disks
2705 self._prepare_disk_for_vminstance(
2706 name=name,
2707 existing_vim_volumes=existing_vim_volumes,
2708 created_items=created_items,
2709 vm_av_zone=vm_av_zone,
2710 block_device_mapping=block_device_mapping,
2711 disk_list=disk_list,
2712 )
2713
2714 if affinity_group_list:
2715 # Only first id on the list will be used. Openstack restriction
2716 server_group_id = affinity_group_list[0]["affinity_group_id"]
2717 scheduller_hints["group"] = server_group_id
2718
2719 self.logger.debug(
2720 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2721 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2722 "block_device_mapping={}, server_group={})".format(
2723 name,
2724 image_id,
2725 flavor_id,
2726 net_list_vim,
2727 self.config.get("security_groups"),
2728 vm_av_zone,
2729 self.config.get("keypair"),
2730 userdata,
2731 config_drive,
2732 block_device_mapping,
2733 server_group_id,
2734 )
2735 )
2736
2737 # Create VM
2738 server = self.nova.servers.create(
2739 name=name,
2740 image=image_id,
2741 flavor=flavor_id,
2742 nics=net_list_vim,
2743 security_groups=self.config.get("security_groups"),
2744 # TODO remove security_groups in future versions. Already at neutron port
2745 availability_zone=vm_av_zone,
2746 key_name=self.config.get("keypair"),
2747 userdata=userdata,
2748 config_drive=config_drive,
2749 block_device_mapping=block_device_mapping,
2750 scheduler_hints=scheduller_hints,
2751 )
2752
2753 vm_start_time = time.time()
2754
2755 self._update_port_security_for_vminstance(no_secured_ports, server)
2756
2757 self._prepare_external_network_for_vminstance(
2758 external_network=external_network,
2759 server=server,
2760 created_items=created_items,
2761 vm_start_time=vm_start_time,
2762 )
2763
2764 return server.id, created_items
2765
2766 except Exception as e:
2767 server_id = None
2768 if server:
2769 server_id = server.id
2770
2771 try:
2772 created_items = self.remove_keep_tag_from_persistent_volumes(
2773 created_items
2774 )
2775
2776 self.delete_vminstance(server_id, created_items)
2777
2778 except Exception as e2:
2779 self.logger.error("new_vminstance rollback fail {}".format(e2))
2780
2781 self._format_exception(e)
2782
2783 @staticmethod
2784 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2785 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2786
2787 Args:
2788 created_items (dict): All created items belongs to VM
2789
2790 Returns:
2791 updated_created_items (dict): Dict which does not include keep flag for volumes.
2792
2793 """
2794 return {
2795 key.replace(":keep", ""): value for (key, value) in created_items.items()
2796 }
2797
2798 def get_vminstance(self, vm_id):
2799 """Returns the VM instance information from VIM"""
2800 # self.logger.debug("Getting VM from VIM")
2801 try:
2802 self._reload_connection()
2803 server = self.nova.servers.find(id=vm_id)
2804 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2805
2806 return server.to_dict()
2807 except (
2808 ksExceptions.ClientException,
2809 nvExceptions.ClientException,
2810 nvExceptions.NotFound,
2811 ConnectionError,
2812 ) as e:
2813 self._format_exception(e)
2814
2815 def get_vminstance_console(self, vm_id, console_type="vnc"):
2816 """
2817 Get a console for the virtual machine
2818 Params:
2819 vm_id: uuid of the VM
2820 console_type, can be:
2821 "novnc" (by default), "xvpvnc" for VNC types,
2822 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2823 Returns dict with the console parameters:
2824 protocol: ssh, ftp, http, https, ...
2825 server: usually ip address
2826 port: the http, ssh, ... port
2827 suffix: extra text, e.g. the http path and query string
2828 """
2829 self.logger.debug("Getting VM CONSOLE from VIM")
2830
2831 try:
2832 self._reload_connection()
2833 server = self.nova.servers.find(id=vm_id)
2834
2835 if console_type is None or console_type == "novnc":
2836 console_dict = server.get_vnc_console("novnc")
2837 elif console_type == "xvpvnc":
2838 console_dict = server.get_vnc_console(console_type)
2839 elif console_type == "rdp-html5":
2840 console_dict = server.get_rdp_console(console_type)
2841 elif console_type == "spice-html5":
2842 console_dict = server.get_spice_console(console_type)
2843 else:
2844 raise vimconn.VimConnException(
2845 "console type '{}' not allowed".format(console_type),
2846 http_code=vimconn.HTTP_Bad_Request,
2847 )
2848
2849 console_dict1 = console_dict.get("console")
2850
2851 if console_dict1:
2852 console_url = console_dict1.get("url")
2853
2854 if console_url:
2855 # parse console_url
2856 protocol_index = console_url.find("//")
2857 suffix_index = (
2858 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2859 )
2860 port_index = (
2861 console_url[protocol_index + 2 : suffix_index].find(":")
2862 + protocol_index
2863 + 2
2864 )
2865
2866 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2867 return (
2868 -vimconn.HTTP_Internal_Server_Error,
2869 "Unexpected response from VIM",
2870 )
2871
2872 console_dict = {
2873 "protocol": console_url[0:protocol_index],
2874 "server": console_url[protocol_index + 2 : port_index],
2875 "port": console_url[port_index:suffix_index],
2876 "suffix": console_url[suffix_index + 1 :],
2877 }
2878 protocol_index += 2
2879
2880 return console_dict
2881 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2882 except (
2883 nvExceptions.NotFound,
2884 ksExceptions.ClientException,
2885 nvExceptions.ClientException,
2886 nvExceptions.BadRequest,
2887 ConnectionError,
2888 ) as e:
2889 self._format_exception(e)
2890
2891 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2892 """Neutron delete ports by id.
2893 Args:
2894 k_id (str): Port id in the VIM
2895 """
2896 try:
2897 port_dict = self.neutron.list_ports()
2898 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2899
2900 if k_id in existing_ports:
2901 self.neutron.delete_port(k_id)
2902
2903 except Exception as e:
2904 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2905
2906 def _delete_volumes_by_id_wth_cinder(
2907 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2908 ) -> bool:
2909 """Cinder delete volume by id.
2910 Args:
2911 k (str): Full item name in created_items
2912 k_id (str): ID of floating ip in VIM
2913 volumes_to_hold (list): Volumes not to delete
2914 created_items (dict): All created items belongs to VM
2915 """
2916 try:
2917 if k_id in volumes_to_hold:
2918 return
2919
2920 if self.cinder.volumes.get(k_id).status != "available":
2921 return True
2922
2923 else:
2924 self.cinder.volumes.delete(k_id)
2925 created_items[k] = None
2926
2927 except Exception as e:
2928 self.logger.error(
2929 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2930 )
2931
2932 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2933 """Neutron delete floating ip by id.
2934 Args:
2935 k (str): Full item name in created_items
2936 k_id (str): ID of floating ip in VIM
2937 created_items (dict): All created items belongs to VM
2938 """
2939 try:
2940 self.neutron.delete_floatingip(k_id)
2941 created_items[k] = None
2942
2943 except Exception as e:
2944 self.logger.error(
2945 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2946 )
2947
2948 @staticmethod
2949 def _get_item_name_id(k: str) -> Tuple[str, str]:
2950 k_item, _, k_id = k.partition(":")
2951 return k_item, k_id
2952
2953 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2954 """Delete VM ports attached to the networks before deleting virtual machine.
2955 Args:
2956 created_items (dict): All created items belongs to VM
2957 """
2958
2959 for k, v in created_items.items():
2960 if not v: # skip already deleted
2961 continue
2962
2963 try:
2964 k_item, k_id = self._get_item_name_id(k)
2965 if k_item == "port":
2966 self._delete_ports_by_id_wth_neutron(k_id)
2967
2968 except Exception as e:
2969 self.logger.error(
2970 "Error deleting port: {}: {}".format(type(e).__name__, e)
2971 )
2972
2973 def _delete_created_items(
2974 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2975 ) -> bool:
2976 """Delete Volumes and floating ip if they exist in created_items."""
2977 for k, v in created_items.items():
2978 if not v: # skip already deleted
2979 continue
2980
2981 try:
2982 k_item, k_id = self._get_item_name_id(k)
2983
2984 if k_item == "volume":
2985 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2986 k, k_id, volumes_to_hold, created_items
2987 )
2988
2989 if unavailable_vol:
2990 keep_waiting = True
2991
2992 elif k_item == "floating_ip":
2993 self._delete_floating_ip_by_id(k, k_id, created_items)
2994
2995 except Exception as e:
2996 self.logger.error("Error deleting {}: {}".format(k, e))
2997
2998 return keep_waiting
2999
3000 @staticmethod
3001 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
3002 """Remove the volumes which has key flag from created_items
3003
3004 Args:
3005 created_items (dict): All created items belongs to VM
3006
3007 Returns:
3008 created_items (dict): Persistent volumes eliminated created_items
3009 """
3010 return {
3011 key: value
3012 for (key, value) in created_items.items()
3013 if len(key.split(":")) == 2
3014 }
3015
3016 def delete_vminstance(
3017 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
3018 ) -> None:
3019 """Removes a VM instance from VIM. Returns the old identifier.
3020 Args:
3021 vm_id (str): Identifier of VM instance
3022 created_items (dict): All created items belongs to VM
3023 volumes_to_hold (list): Volumes_to_hold
3024 """
3025 if created_items is None:
3026 created_items = {}
3027 if volumes_to_hold is None:
3028 volumes_to_hold = []
3029
3030 try:
3031 created_items = self._extract_items_wth_keep_flag_from_created_items(
3032 created_items
3033 )
3034
3035 self._reload_connection()
3036
3037 # Delete VM ports attached to the networks before the virtual machine
3038 if created_items:
3039 self._delete_vm_ports_attached_to_network(created_items)
3040
3041 if vm_id:
3042 self.nova.servers.delete(vm_id)
3043
3044 # Although having detached, volumes should have in active status before deleting.
3045 # We ensure in this loop
3046 keep_waiting = True
3047 elapsed_time = 0
3048
3049 while keep_waiting and elapsed_time < volume_timeout:
3050 keep_waiting = False
3051
3052 # Delete volumes and floating IP.
3053 keep_waiting = self._delete_created_items(
3054 created_items, volumes_to_hold, keep_waiting
3055 )
3056
3057 if keep_waiting:
3058 time.sleep(1)
3059 elapsed_time += 1
3060
3061 except (
3062 nvExceptions.NotFound,
3063 ksExceptions.ClientException,
3064 nvExceptions.ClientException,
3065 ConnectionError,
3066 ) as e:
3067 self._format_exception(e)
3068
3069 def refresh_vms_status(self, vm_list):
3070 """Get the status of the virtual machines and their interfaces/ports
3071 Params: the list of VM identifiers
3072 Returns a dictionary with:
3073 vm_id: #VIM id of this Virtual Machine
3074 status: #Mandatory. Text with one of:
3075 # DELETED (not found at vim)
3076 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3077 # OTHER (Vim reported other status not understood)
3078 # ERROR (VIM indicates an ERROR status)
3079 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3080 # CREATING (on building process), ERROR
3081 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3082 #
3083 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3084 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3085 interfaces:
3086 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3087 mac_address: #Text format XX:XX:XX:XX:XX:XX
3088 vim_net_id: #network id where this interface is connected
3089 vim_interface_id: #interface/port VIM id
3090 ip_address: #null, or text with IPv4, IPv6 address
3091 compute_node: #identification of compute node where PF,VF interface is allocated
3092 pci: #PCI address of the NIC that hosts the PF,VF
3093 vlan: #physical VLAN used for VF
3094 """
3095 vm_dict = {}
3096 self.logger.debug(
3097 "refresh_vms status: Getting tenant VM instance information from VIM"
3098 )
3099
3100 for vm_id in vm_list:
3101 vm = {}
3102
3103 try:
3104 vm_vim = self.get_vminstance(vm_id)
3105
3106 if vm_vim["status"] in vmStatus2manoFormat:
3107 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3108 else:
3109 vm["status"] = "OTHER"
3110 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3111
3112 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3113 vm_vim.pop("user_data", None)
3114 vm["vim_info"] = self.serialize(vm_vim)
3115
3116 vm["interfaces"] = []
3117 if vm_vim.get("fault"):
3118 vm["error_msg"] = str(vm_vim["fault"])
3119
3120 # get interfaces
3121 try:
3122 self._reload_connection()
3123 port_dict = self.neutron.list_ports(device_id=vm_id)
3124
3125 for port in port_dict["ports"]:
3126 interface = {}
3127 interface["vim_info"] = self.serialize(port)
3128 interface["mac_address"] = port.get("mac_address")
3129 interface["vim_net_id"] = port["network_id"]
3130 interface["vim_interface_id"] = port["id"]
3131 # check if OS-EXT-SRV-ATTR:host is there,
3132 # in case of non-admin credentials, it will be missing
3133
3134 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3135 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3136
3137 interface["pci"] = None
3138
3139 # check if binding:profile is there,
3140 # in case of non-admin credentials, it will be missing
3141 if port.get("binding:profile"):
3142 if port["binding:profile"].get("pci_slot"):
3143 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3144 # the slot to 0x00
3145 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3146 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3147 pci = port["binding:profile"]["pci_slot"]
3148 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3149 interface["pci"] = pci
3150
3151 interface["vlan"] = None
3152
3153 if port.get("binding:vif_details"):
3154 interface["vlan"] = port["binding:vif_details"].get("vlan")
3155
3156 # Get vlan from network in case not present in port for those old openstacks and cases where
3157 # it is needed vlan at PT
3158 if not interface["vlan"]:
3159 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3160 network = self.neutron.show_network(port["network_id"])
3161
3162 if (
3163 network["network"].get("provider:network_type")
3164 == "vlan"
3165 ):
3166 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3167 interface["vlan"] = network["network"].get(
3168 "provider:segmentation_id"
3169 )
3170
3171 ips = []
3172 # look for floating ip address
3173 try:
3174 floating_ip_dict = self.neutron.list_floatingips(
3175 port_id=port["id"]
3176 )
3177
3178 if floating_ip_dict.get("floatingips"):
3179 ips.append(
3180 floating_ip_dict["floatingips"][0].get(
3181 "floating_ip_address"
3182 )
3183 )
3184 except Exception:
3185 pass
3186
3187 for subnet in port["fixed_ips"]:
3188 ips.append(subnet["ip_address"])
3189
3190 interface["ip_address"] = ";".join(ips)
3191 vm["interfaces"].append(interface)
3192 except Exception as e:
3193 self.logger.error(
3194 "Error getting vm interface information {}: {}".format(
3195 type(e).__name__, e
3196 ),
3197 exc_info=True,
3198 )
3199 except vimconn.VimConnNotFoundException as e:
3200 self.logger.error("Exception getting vm status: %s", str(e))
3201 vm["status"] = "DELETED"
3202 vm["error_msg"] = str(e)
3203 except vimconn.VimConnException as e:
3204 self.logger.error("Exception getting vm status: %s", str(e))
3205 vm["status"] = "VIM_ERROR"
3206 vm["error_msg"] = str(e)
3207
3208 vm_dict[vm_id] = vm
3209
3210 return vm_dict
3211
3212 def action_vminstance(self, vm_id, action_dict, created_items={}):
3213 """Send and action over a VM instance from VIM
3214 Returns None or the console dict if the action was successfully sent to the VIM
3215 """
3216 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3217
3218 try:
3219 self._reload_connection()
3220 server = self.nova.servers.find(id=vm_id)
3221
3222 if "start" in action_dict:
3223 if action_dict["start"] == "rebuild":
3224 server.rebuild()
3225 else:
3226 if server.status == "PAUSED":
3227 server.unpause()
3228 elif server.status == "SUSPENDED":
3229 server.resume()
3230 elif server.status == "SHUTOFF":
3231 server.start()
3232 else:
3233 self.logger.debug(
3234 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3235 )
3236 raise vimconn.VimConnException(
3237 "Cannot 'start' instance while it is in active state",
3238 http_code=vimconn.HTTP_Bad_Request,
3239 )
3240
3241 elif "pause" in action_dict:
3242 server.pause()
3243 elif "resume" in action_dict:
3244 server.resume()
3245 elif "shutoff" in action_dict or "shutdown" in action_dict:
3246 self.logger.debug("server status %s", server.status)
3247 if server.status == "ACTIVE":
3248 server.stop()
3249 else:
3250 self.logger.debug("ERROR: VM is not in Active state")
3251 raise vimconn.VimConnException(
3252 "VM is not in active state, stop operation is not allowed",
3253 http_code=vimconn.HTTP_Bad_Request,
3254 )
3255 elif "forceOff" in action_dict:
3256 server.stop() # TODO
3257 elif "terminate" in action_dict:
3258 server.delete()
3259 elif "createImage" in action_dict:
3260 server.create_image()
3261 # "path":path_schema,
3262 # "description":description_schema,
3263 # "name":name_schema,
3264 # "metadata":metadata_schema,
3265 # "imageRef": id_schema,
3266 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3267 elif "rebuild" in action_dict:
3268 server.rebuild(server.image["id"])
3269 elif "reboot" in action_dict:
3270 server.reboot() # reboot_type="SOFT"
3271 elif "console" in action_dict:
3272 console_type = action_dict["console"]
3273
3274 if console_type is None or console_type == "novnc":
3275 console_dict = server.get_vnc_console("novnc")
3276 elif console_type == "xvpvnc":
3277 console_dict = server.get_vnc_console(console_type)
3278 elif console_type == "rdp-html5":
3279 console_dict = server.get_rdp_console(console_type)
3280 elif console_type == "spice-html5":
3281 console_dict = server.get_spice_console(console_type)
3282 else:
3283 raise vimconn.VimConnException(
3284 "console type '{}' not allowed".format(console_type),
3285 http_code=vimconn.HTTP_Bad_Request,
3286 )
3287
3288 try:
3289 console_url = console_dict["console"]["url"]
3290 # parse console_url
3291 protocol_index = console_url.find("//")
3292 suffix_index = (
3293 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3294 )
3295 port_index = (
3296 console_url[protocol_index + 2 : suffix_index].find(":")
3297 + protocol_index
3298 + 2
3299 )
3300
3301 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3302 raise vimconn.VimConnException(
3303 "Unexpected response from VIM " + str(console_dict)
3304 )
3305
3306 console_dict2 = {
3307 "protocol": console_url[0:protocol_index],
3308 "server": console_url[protocol_index + 2 : port_index],
3309 "port": int(console_url[port_index + 1 : suffix_index]),
3310 "suffix": console_url[suffix_index + 1 :],
3311 }
3312
3313 return console_dict2
3314 except Exception:
3315 raise vimconn.VimConnException(
3316 "Unexpected response from VIM " + str(console_dict)
3317 )
3318
3319 return None
3320 except (
3321 ksExceptions.ClientException,
3322 nvExceptions.ClientException,
3323 nvExceptions.NotFound,
3324 ConnectionError,
3325 ) as e:
3326 self._format_exception(e)
3327 # TODO insert exception vimconn.HTTP_Unauthorized
3328
3329 # ###### VIO Specific Changes #########
3330 def _generate_vlanID(self):
3331 """
3332 Method to get unused vlanID
3333 Args:
3334 None
3335 Returns:
3336 vlanID
3337 """
3338 # Get used VLAN IDs
3339 usedVlanIDs = []
3340 networks = self.get_network_list()
3341
3342 for net in networks:
3343 if net.get("provider:segmentation_id"):
3344 usedVlanIDs.append(net.get("provider:segmentation_id"))
3345
3346 used_vlanIDs = set(usedVlanIDs)
3347
3348 # find unused VLAN ID
3349 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3350 try:
3351 start_vlanid, end_vlanid = map(
3352 int, vlanID_range.replace(" ", "").split("-")
3353 )
3354
3355 for vlanID in range(start_vlanid, end_vlanid + 1):
3356 if vlanID not in used_vlanIDs:
3357 return vlanID
3358 except Exception as exp:
3359 raise vimconn.VimConnException(
3360 "Exception {} occurred while generating VLAN ID.".format(exp)
3361 )
3362 else:
3363 raise vimconn.VimConnConflictException(
3364 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3365 self.config.get("dataplane_net_vlan_range")
3366 )
3367 )
3368
3369 def _generate_multisegment_vlanID(self):
3370 """
3371 Method to get unused vlanID
3372 Args:
3373 None
3374 Returns:
3375 vlanID
3376 """
3377 # Get used VLAN IDs
3378 usedVlanIDs = []
3379 networks = self.get_network_list()
3380 for net in networks:
3381 if net.get("provider:network_type") == "vlan" and net.get(
3382 "provider:segmentation_id"
3383 ):
3384 usedVlanIDs.append(net.get("provider:segmentation_id"))
3385 elif net.get("segments"):
3386 for segment in net.get("segments"):
3387 if segment.get("provider:network_type") == "vlan" and segment.get(
3388 "provider:segmentation_id"
3389 ):
3390 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3391
3392 used_vlanIDs = set(usedVlanIDs)
3393
3394 # find unused VLAN ID
3395 for vlanID_range in self.config.get("multisegment_vlan_range"):
3396 try:
3397 start_vlanid, end_vlanid = map(
3398 int, vlanID_range.replace(" ", "").split("-")
3399 )
3400
3401 for vlanID in range(start_vlanid, end_vlanid + 1):
3402 if vlanID not in used_vlanIDs:
3403 return vlanID
3404 except Exception as exp:
3405 raise vimconn.VimConnException(
3406 "Exception {} occurred while generating VLAN ID.".format(exp)
3407 )
3408 else:
3409 raise vimconn.VimConnConflictException(
3410 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3411 self.config.get("multisegment_vlan_range")
3412 )
3413 )
3414
3415 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3416 """
3417 Method to validate user given vlanID ranges
3418 Args: None
3419 Returns: None
3420 """
3421 for vlanID_range in input_vlan_range:
3422 vlan_range = vlanID_range.replace(" ", "")
3423 # validate format
3424 vlanID_pattern = r"(\d)*-(\d)*$"
3425 match_obj = re.match(vlanID_pattern, vlan_range)
3426 if not match_obj:
3427 raise vimconn.VimConnConflictException(
3428 "Invalid VLAN range for {}: {}.You must provide "
3429 "'{}' in format [start_ID - end_ID].".format(
3430 text_vlan_range, vlanID_range, text_vlan_range
3431 )
3432 )
3433
3434 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3435 if start_vlanid <= 0:
3436 raise vimconn.VimConnConflictException(
3437 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3438 "networks valid IDs are 1 to 4094 ".format(
3439 text_vlan_range, vlanID_range
3440 )
3441 )
3442
3443 if end_vlanid > 4094:
3444 raise vimconn.VimConnConflictException(
3445 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3446 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3447 text_vlan_range, vlanID_range
3448 )
3449 )
3450
3451 if start_vlanid > end_vlanid:
3452 raise vimconn.VimConnConflictException(
3453 "Invalid VLAN range for {}: {}. You must provide '{}'"
3454 " in format start_ID - end_ID and start_ID < end_ID ".format(
3455 text_vlan_range, vlanID_range, text_vlan_range
3456 )
3457 )
3458
3459 def get_hosts_info(self):
3460 """Get the information of deployed hosts
3461 Returns the hosts content"""
3462 if self.debug:
3463 print("osconnector: Getting Host info from VIM")
3464
3465 try:
3466 h_list = []
3467 self._reload_connection()
3468 hypervisors = self.nova.hypervisors.list()
3469
3470 for hype in hypervisors:
3471 h_list.append(hype.to_dict())
3472
3473 return 1, {"hosts": h_list}
3474 except nvExceptions.NotFound as e:
3475 error_value = -vimconn.HTTP_Not_Found
3476 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3477 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3478 error_value = -vimconn.HTTP_Bad_Request
3479 error_text = (
3480 type(e).__name__
3481 + ": "
3482 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3483 )
3484
3485 # TODO insert exception vimconn.HTTP_Unauthorized
3486 # if reaching here is because an exception
3487 self.logger.debug("get_hosts_info " + error_text)
3488
3489 return error_value, error_text
3490
3491 def get_hosts(self, vim_tenant):
3492 """Get the hosts and deployed instances
3493 Returns the hosts content"""
3494 r, hype_dict = self.get_hosts_info()
3495
3496 if r < 0:
3497 return r, hype_dict
3498
3499 hypervisors = hype_dict["hosts"]
3500
3501 try:
3502 servers = self.nova.servers.list()
3503 for hype in hypervisors:
3504 for server in servers:
3505 if (
3506 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3507 == hype["hypervisor_hostname"]
3508 ):
3509 if "vm" in hype:
3510 hype["vm"].append(server.id)
3511 else:
3512 hype["vm"] = [server.id]
3513
3514 return 1, hype_dict
3515 except nvExceptions.NotFound as e:
3516 error_value = -vimconn.HTTP_Not_Found
3517 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3518 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3519 error_value = -vimconn.HTTP_Bad_Request
3520 error_text = (
3521 type(e).__name__
3522 + ": "
3523 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3524 )
3525
3526 # TODO insert exception vimconn.HTTP_Unauthorized
3527 # if reaching here is because an exception
3528 self.logger.debug("get_hosts " + error_text)
3529
3530 return error_value, error_text
3531
3532 def new_affinity_group(self, affinity_group_data):
3533 """Adds a server group to VIM
3534 affinity_group_data contains a dictionary with information, keys:
3535 name: name in VIM for the server group
3536 type: affinity or anti-affinity
3537 scope: Only nfvi-node allowed
3538 Returns the server group identifier"""
3539 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3540
3541 try:
3542 name = affinity_group_data["name"]
3543 policy = affinity_group_data["type"]
3544
3545 self._reload_connection()
3546 new_server_group = self.nova.server_groups.create(name, policy)
3547
3548 return new_server_group.id
3549 except (
3550 ksExceptions.ClientException,
3551 nvExceptions.ClientException,
3552 ConnectionError,
3553 KeyError,
3554 ) as e:
3555 self._format_exception(e)
3556
3557 def get_affinity_group(self, affinity_group_id):
3558 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3559 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3560 try:
3561 self._reload_connection()
3562 server_group = self.nova.server_groups.find(id=affinity_group_id)
3563
3564 return server_group.to_dict()
3565 except (
3566 nvExceptions.NotFound,
3567 nvExceptions.ClientException,
3568 ksExceptions.ClientException,
3569 ConnectionError,
3570 ) as e:
3571 self._format_exception(e)
3572
3573 def delete_affinity_group(self, affinity_group_id):
3574 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3575 self.logger.debug("Getting server group '%s'", affinity_group_id)
3576 try:
3577 self._reload_connection()
3578 self.nova.server_groups.delete(affinity_group_id)
3579
3580 return affinity_group_id
3581 except (
3582 nvExceptions.NotFound,
3583 ksExceptions.ClientException,
3584 nvExceptions.ClientException,
3585 ConnectionError,
3586 ) as e:
3587 self._format_exception(e)
3588
3589 def get_vdu_state(self, vm_id):
3590 """
3591 Getting the state of a vdu
3592 param:
3593 vm_id: ID of an instance
3594 """
3595 self.logger.debug("Getting the status of VM")
3596 self.logger.debug("VIM VM ID %s", vm_id)
3597 self._reload_connection()
3598 server = self.nova.servers.find(id=vm_id)
3599 server_dict = server.to_dict()
3600 vdu_data = [
3601 server_dict["status"],
3602 server_dict["flavor"]["id"],
3603 server_dict["OS-EXT-SRV-ATTR:host"],
3604 server_dict["OS-EXT-AZ:availability_zone"],
3605 ]
3606 self.logger.debug("vdu_data %s", vdu_data)
3607 return vdu_data
3608
3609 def check_compute_availability(self, host, server_flavor_details):
3610 self._reload_connection()
3611 hypervisor_search = self.nova.hypervisors.search(
3612 hypervisor_match=host, servers=True
3613 )
3614 for hypervisor in hypervisor_search:
3615 hypervisor_id = hypervisor.to_dict()["id"]
3616 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3617 hypervisor_dict = hypervisor_details.to_dict()
3618 hypervisor_temp = json.dumps(hypervisor_dict)
3619 hypervisor_json = json.loads(hypervisor_temp)
3620 resources_available = [
3621 hypervisor_json["free_ram_mb"],
3622 hypervisor_json["disk_available_least"],
3623 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3624 ]
3625 compute_available = all(
3626 x > y for x, y in zip(resources_available, server_flavor_details)
3627 )
3628 if compute_available:
3629 return host
3630
3631 def check_availability_zone(
3632 self, old_az, server_flavor_details, old_host, host=None
3633 ):
3634 self._reload_connection()
3635 az_check = {"zone_check": False, "compute_availability": None}
3636 aggregates_list = self.nova.aggregates.list()
3637 for aggregate in aggregates_list:
3638 aggregate_details = aggregate.to_dict()
3639 aggregate_temp = json.dumps(aggregate_details)
3640 aggregate_json = json.loads(aggregate_temp)
3641 if aggregate_json["availability_zone"] == old_az:
3642 hosts_list = aggregate_json["hosts"]
3643 if host is not None:
3644 if host in hosts_list:
3645 az_check["zone_check"] = True
3646 available_compute_id = self.check_compute_availability(
3647 host, server_flavor_details
3648 )
3649 if available_compute_id is not None:
3650 az_check["compute_availability"] = available_compute_id
3651 else:
3652 for check_host in hosts_list:
3653 if check_host != old_host:
3654 available_compute_id = self.check_compute_availability(
3655 check_host, server_flavor_details
3656 )
3657 if available_compute_id is not None:
3658 az_check["zone_check"] = True
3659 az_check["compute_availability"] = available_compute_id
3660 break
3661 else:
3662 az_check["zone_check"] = True
3663 return az_check
3664
3665 def migrate_instance(self, vm_id, compute_host=None):
3666 """
3667 Migrate a vdu
3668 param:
3669 vm_id: ID of an instance
3670 compute_host: Host to migrate the vdu to
3671 """
3672 self._reload_connection()
3673 vm_state = False
3674 instance_state = self.get_vdu_state(vm_id)
3675 server_flavor_id = instance_state[1]
3676 server_hypervisor_name = instance_state[2]
3677 server_availability_zone = instance_state[3]
3678 try:
3679 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3680 server_flavor_details = [
3681 server_flavor["ram"],
3682 server_flavor["disk"],
3683 server_flavor["vcpus"],
3684 ]
3685 if compute_host == server_hypervisor_name:
3686 raise vimconn.VimConnException(
3687 "Unable to migrate instance '{}' to the same host '{}'".format(
3688 vm_id, compute_host
3689 ),
3690 http_code=vimconn.HTTP_Bad_Request,
3691 )
3692 az_status = self.check_availability_zone(
3693 server_availability_zone,
3694 server_flavor_details,
3695 server_hypervisor_name,
3696 compute_host,
3697 )
3698 availability_zone_check = az_status["zone_check"]
3699 available_compute_id = az_status.get("compute_availability")
3700
3701 if availability_zone_check is False:
3702 raise vimconn.VimConnException(
3703 "Unable to migrate instance '{}' to a different availability zone".format(
3704 vm_id
3705 ),
3706 http_code=vimconn.HTTP_Bad_Request,
3707 )
3708 if available_compute_id is not None:
3709 self.nova.servers.live_migrate(
3710 server=vm_id,
3711 host=available_compute_id,
3712 block_migration=True,
3713 disk_over_commit=False,
3714 )
3715 state = "MIGRATING"
3716 changed_compute_host = ""
3717 if state == "MIGRATING":
3718 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3719 changed_compute_host = self.get_vdu_state(vm_id)[2]
3720 if vm_state and changed_compute_host == available_compute_id:
3721 self.logger.debug(
3722 "Instance '{}' migrated to the new compute host '{}'".format(
3723 vm_id, changed_compute_host
3724 )
3725 )
3726 return state, available_compute_id
3727 else:
3728 raise vimconn.VimConnException(
3729 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3730 vm_id, available_compute_id
3731 ),
3732 http_code=vimconn.HTTP_Bad_Request,
3733 )
3734 else:
3735 raise vimconn.VimConnException(
3736 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3737 available_compute_id
3738 ),
3739 http_code=vimconn.HTTP_Bad_Request,
3740 )
3741 except (
3742 nvExceptions.BadRequest,
3743 nvExceptions.ClientException,
3744 nvExceptions.NotFound,
3745 ) as e:
3746 self._format_exception(e)
3747
3748 def resize_instance(self, vm_id, new_flavor_id):
3749 """
3750 For resizing the vm based on the given
3751 flavor details
3752 param:
3753 vm_id : ID of an instance
3754 new_flavor_id : Flavor id to be resized
3755 Return the status of a resized instance
3756 """
3757 self._reload_connection()
3758 self.logger.debug("resize the flavor of an instance")
3759 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3760 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3761 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3762 try:
3763 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3764 if old_flavor_disk > new_flavor_disk:
3765 raise nvExceptions.BadRequest(
3766 400,
3767 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3768 )
3769 else:
3770 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3771 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3772 if vm_state:
3773 instance_resized_status = self.confirm_resize(vm_id)
3774 return instance_resized_status
3775 else:
3776 raise nvExceptions.BadRequest(
3777 409,
3778 message="Cannot 'resize' vm_state is in ERROR",
3779 )
3780
3781 else:
3782 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3783 raise nvExceptions.BadRequest(
3784 409,
3785 message="Cannot 'resize' instance while it is in vm_state resized",
3786 )
3787 except (
3788 nvExceptions.BadRequest,
3789 nvExceptions.ClientException,
3790 nvExceptions.NotFound,
3791 ) as e:
3792 self._format_exception(e)
3793
3794 def confirm_resize(self, vm_id):
3795 """
3796 Confirm the resize of an instance
3797 param:
3798 vm_id: ID of an instance
3799 """
3800 self._reload_connection()
3801 self.nova.servers.confirm_resize(server=vm_id)
3802 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3803 self.__wait_for_vm(vm_id, "ACTIVE")
3804 instance_status = self.get_vdu_state(vm_id)[0]
3805 return instance_status
3806
3807 def get_monitoring_data(self):
3808 try:
3809 self.logger.debug("Getting servers and ports data from Openstack VIMs.")
3810 self._reload_connection()
3811 all_servers = self.nova.servers.list(detailed=True)
3812 all_ports = self.neutron.list_ports()
3813 return all_servers, all_ports
3814 except (
3815 vimconn.VimConnException,
3816 vimconn.VimConnNotFoundException,
3817 vimconn.VimConnConnectionException,
3818 ) as e:
3819 raise vimconn.VimConnException(
3820 f"Exception in monitoring while getting VMs and ports status: {str(e)}"
3821 )