Correcting invalid vcpu calculation and vcpu pinning policy evaluation.
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, List, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 @staticmethod
1231 def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
1232 """Process resource quota and fill up extra_specs.
1233 Args:
1234 quota (dict): Keeping the quota of resurces
1235 prefix (str) Prefix
1236 extra_specs (dict) Dict to be filled to be used during flavor creation
1237
1238 """
1239 if "limit" in quota:
1240 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1241
1242 if "reserve" in quota:
1243 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1244
1245 if "shares" in quota:
1246 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1247 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1248
1249 @staticmethod
1250 def process_numa_memory(
1251 numa: dict, node_id: Optional[int], extra_specs: dict
1252 ) -> None:
1253 """Set the memory in extra_specs.
1254 Args:
1255 numa (dict): A dictionary which includes numa information
1256 node_id (int): ID of numa node
1257 extra_specs (dict): To be filled.
1258
1259 """
1260 if not numa.get("memory"):
1261 return
1262 memory_mb = numa["memory"] * 1024
1263 memory = "hw:numa_mem.{}".format(node_id)
1264 extra_specs[memory] = int(memory_mb)
1265
1266 @staticmethod
1267 def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
1268 """Set the cpu in extra_specs.
1269 Args:
1270 numa (dict): A dictionary which includes numa information
1271 node_id (int): ID of numa node
1272 extra_specs (dict): To be filled.
1273
1274 """
1275 if not numa.get("vcpu"):
1276 return
1277 vcpu = numa["vcpu"]
1278 cpu = "hw:numa_cpus.{}".format(node_id)
1279 vcpu = ",".join(map(str, vcpu))
1280 extra_specs[cpu] = vcpu
1281
1282 @staticmethod
1283 def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1284 """Fill up extra_specs if numa has paired-threads.
1285 Args:
1286 numa (dict): A dictionary which includes numa information
1287 extra_specs (dict): To be filled.
1288
1289 Returns:
1290 threads (int) Number of virtual cpus
1291
1292 """
1293 if not numa.get("paired-threads"):
1294 return
1295
1296 # cpu_thread_policy "require" implies that compute node must have an STM architecture
1297 threads = numa["paired-threads"] * 2
1298 extra_specs["hw:cpu_thread_policy"] = "require"
1299 extra_specs["hw:cpu_policy"] = "dedicated"
1300 return threads
1301
1302 @staticmethod
1303 def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
1304 """Fill up extra_specs if numa has cores.
1305 Args:
1306 numa (dict): A dictionary which includes numa information
1307 extra_specs (dict): To be filled.
1308
1309 Returns:
1310 cores (int) Number of virtual cpus
1311
1312 """
1313 # cpu_thread_policy "isolate" implies that the host must not have an SMT
1314 # architecture, or a non-SMT architecture will be emulated
1315 if not numa.get("cores"):
1316 return
1317 cores = numa["cores"]
1318 extra_specs["hw:cpu_thread_policy"] = "isolate"
1319 extra_specs["hw:cpu_policy"] = "dedicated"
1320 return cores
1321
1322 @staticmethod
1323 def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
1324 """Fill up extra_specs if numa has threads.
1325 Args:
1326 numa (dict): A dictionary which includes numa information
1327 extra_specs (dict): To be filled.
1328
1329 Returns:
1330 threads (int) Number of virtual cpus
1331
1332 """
1333 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
1334 if not numa.get("threads"):
1335 return
1336 threads = numa["threads"]
1337 extra_specs["hw:cpu_thread_policy"] = "prefer"
1338 extra_specs["hw:cpu_policy"] = "dedicated"
1339 return threads
1340
1341 def _process_numa_parameters_of_flavor(
1342 self, numas: List, extra_specs: Dict
1343 ) -> None:
1344 """Process numa parameters and fill up extra_specs.
1345
1346 Args:
1347 numas (list): List of dictionary which includes numa information
1348 extra_specs (dict): To be filled.
1349
1350 """
1351 numa_nodes = len(numas)
1352 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1353 cpu_cores, cpu_threads = 0, 0
1354
1355 if self.vim_type == "VIO":
1356 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
1357 extra_specs["vmware:latency_sensitivity_level"] = "high"
1358
1359 for numa in numas:
1360 if "id" in numa:
1361 node_id = numa["id"]
1362 # overwrite ram and vcpus
1363 # check if key "memory" is present in numa else use ram value at flavor
1364 self.process_numa_memory(numa, node_id, extra_specs)
1365 self.process_numa_vcpu(numa, node_id, extra_specs)
1366
1367 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
1368 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1369
1370 if "paired-threads" in numa:
1371 threads = self.process_numa_paired_threads(numa, extra_specs)
1372 cpu_threads += threads
1373
1374 elif "cores" in numa:
1375 cores = self.process_numa_cores(numa, extra_specs)
1376 cpu_cores += cores
1377
1378 elif "threads" in numa:
1379 threads = self.process_numa_threads(numa, extra_specs)
1380 cpu_threads += threads
1381
1382 if cpu_cores:
1383 extra_specs["hw:cpu_cores"] = str(cpu_cores)
1384 if cpu_threads:
1385 extra_specs["hw:cpu_threads"] = str(cpu_threads)
1386
1387 def _change_flavor_name(
1388 self, name: str, name_suffix: int, flavor_data: dict
1389 ) -> str:
1390 """Change the flavor name if the name already exists.
1391
1392 Args:
1393 name (str): Flavor name to be checked
1394 name_suffix (int): Suffix to be appended to name
1395 flavor_data (dict): Flavor dict
1396
1397 Returns:
1398 name (str): New flavor name to be used
1399
1400 """
1401 # Get used names
1402 fl = self.nova.flavors.list()
1403 fl_names = [f.name for f in fl]
1404
1405 while name in fl_names:
1406 name_suffix += 1
1407 name = flavor_data["name"] + "-" + str(name_suffix)
1408
1409 return name
1410
1411 def _process_extended_config_of_flavor(
1412 self, extended: dict, extra_specs: dict
1413 ) -> None:
1414 """Process the extended dict to fill up extra_specs.
1415 Args:
1416
1417 extended (dict): Keeping the extra specification of flavor
1418 extra_specs (dict) Dict to be filled to be used during flavor creation
1419
1420 """
1421 quotas = {
1422 "cpu-quota": "cpu",
1423 "mem-quota": "memory",
1424 "vif-quota": "vif",
1425 "disk-io-quota": "disk_io",
1426 }
1427
1428 page_sizes = {
1429 "LARGE": "large",
1430 "SMALL": "small",
1431 "SIZE_2MB": "2MB",
1432 "SIZE_1GB": "1GB",
1433 "PREFER_LARGE": "any",
1434 }
1435
1436 policies = {
1437 "cpu-pinning-policy": "hw:cpu_policy",
1438 "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
1439 "mem-policy": "hw:numa_mempolicy",
1440 }
1441
1442 numas = extended.get("numas")
1443 if numas:
1444 self._process_numa_parameters_of_flavor(numas, extra_specs)
1445
1446 for quota, item in quotas.items():
1447 if quota in extended.keys():
1448 self.process_resource_quota(extended.get(quota), item, extra_specs)
1449
1450 # Set the mempage size as specified in the descriptor
1451 if extended.get("mempage-size"):
1452 if extended["mempage-size"] in page_sizes.keys():
1453 extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
1454 else:
1455 # Normally, validations in NBI should not allow to this condition.
1456 self.logger.debug(
1457 "Invalid mempage-size %s. Will be ignored",
1458 extended.get("mempage-size"),
1459 )
1460
1461 for policy, hw_policy in policies.items():
1462 if extended.get(policy):
1463 extra_specs[hw_policy] = extended[policy].lower()
1464
1465 @staticmethod
1466 def _get_flavor_details(flavor_data: dict) -> Tuple:
1467 """Returns the details of flavor
1468 Args:
1469 flavor_data (dict): Dictionary that includes required flavor details
1470
1471 Returns:
1472 ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
1473
1474 """
1475 return (
1476 flavor_data.get("ram", 64),
1477 flavor_data.get("vcpus", 1),
1478 {},
1479 flavor_data.get("extended"),
1480 )
1481
1482 def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
1483 """Adds a tenant flavor to openstack VIM.
1484 if change_name_if_used is True, it will change name in case of conflict,
1485 because it is not supported name repetition.
1486
1487 Args:
1488 flavor_data (dict): Flavor details to be processed
1489 change_name_if_used (bool): Change name in case of conflict
1490
1491 Returns:
1492 flavor_id (str): flavor identifier
1493
1494 """
1495 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1496 retry = 0
1497 max_retries = 3
1498 name_suffix = 0
1499
1500 try:
1501 name = flavor_data["name"]
1502 while retry < max_retries:
1503 retry += 1
1504 try:
1505 self._reload_connection()
1506
1507 if change_name_if_used:
1508 name = self._change_flavor_name(name, name_suffix, flavor_data)
1509
1510 ram, vcpus, extra_specs, extended = self._get_flavor_details(
1511 flavor_data
1512 )
1513 if extended:
1514 self._process_extended_config_of_flavor(extended, extra_specs)
1515
1516 # Create flavor
1517
1518 new_flavor = self.nova.flavors.create(
1519 name=name,
1520 ram=ram,
1521 vcpus=vcpus,
1522 disk=flavor_data.get("disk", 0),
1523 ephemeral=flavor_data.get("ephemeral", 0),
1524 swap=flavor_data.get("swap", 0),
1525 is_public=flavor_data.get("is_public", True),
1526 )
1527
1528 # Add metadata
1529 if extra_specs:
1530 new_flavor.set_keys(extra_specs)
1531
1532 return new_flavor.id
1533
1534 except nvExceptions.Conflict as e:
1535 if change_name_if_used and retry < max_retries:
1536 continue
1537
1538 self._format_exception(e)
1539
1540 except (
1541 ksExceptions.ClientException,
1542 nvExceptions.ClientException,
1543 ConnectionError,
1544 KeyError,
1545 ) as e:
1546 self._format_exception(e)
1547
1548 def delete_flavor(self, flavor_id):
1549 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1550 try:
1551 self._reload_connection()
1552 self.nova.flavors.delete(flavor_id)
1553
1554 return flavor_id
1555 # except nvExceptions.BadRequest as e:
1556 except (
1557 nvExceptions.NotFound,
1558 ksExceptions.ClientException,
1559 nvExceptions.ClientException,
1560 ConnectionError,
1561 ) as e:
1562 self._format_exception(e)
1563
1564 def new_image(self, image_dict):
1565 """
1566 Adds a tenant image to VIM. imge_dict is a dictionary with:
1567 name: name
1568 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1569 location: path or URI
1570 public: "yes" or "no"
1571 metadata: metadata of the image
1572 Returns the image_id
1573 """
1574 retry = 0
1575 max_retries = 3
1576
1577 while retry < max_retries:
1578 retry += 1
1579 try:
1580 self._reload_connection()
1581
1582 # determine format http://docs.openstack.org/developer/glance/formats.html
1583 if "disk_format" in image_dict:
1584 disk_format = image_dict["disk_format"]
1585 else: # autodiscover based on extension
1586 if image_dict["location"].endswith(".qcow2"):
1587 disk_format = "qcow2"
1588 elif image_dict["location"].endswith(".vhd"):
1589 disk_format = "vhd"
1590 elif image_dict["location"].endswith(".vmdk"):
1591 disk_format = "vmdk"
1592 elif image_dict["location"].endswith(".vdi"):
1593 disk_format = "vdi"
1594 elif image_dict["location"].endswith(".iso"):
1595 disk_format = "iso"
1596 elif image_dict["location"].endswith(".aki"):
1597 disk_format = "aki"
1598 elif image_dict["location"].endswith(".ari"):
1599 disk_format = "ari"
1600 elif image_dict["location"].endswith(".ami"):
1601 disk_format = "ami"
1602 else:
1603 disk_format = "raw"
1604
1605 self.logger.debug(
1606 "new_image: '%s' loading from '%s'",
1607 image_dict["name"],
1608 image_dict["location"],
1609 )
1610 if self.vim_type == "VIO":
1611 container_format = "bare"
1612 if "container_format" in image_dict:
1613 container_format = image_dict["container_format"]
1614
1615 new_image = self.glance.images.create(
1616 name=image_dict["name"],
1617 container_format=container_format,
1618 disk_format=disk_format,
1619 )
1620 else:
1621 new_image = self.glance.images.create(name=image_dict["name"])
1622
1623 if image_dict["location"].startswith("http"):
1624 # TODO there is not a method to direct download. It must be downloaded locally with requests
1625 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1626 else: # local path
1627 with open(image_dict["location"]) as fimage:
1628 self.glance.images.upload(new_image.id, fimage)
1629 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1630 # image_dict.get("public","yes")=="yes",
1631 # container_format="bare", data=fimage, disk_format=disk_format)
1632
1633 metadata_to_load = image_dict.get("metadata")
1634
1635 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1636 # for openstack
1637 if self.vim_type == "VIO":
1638 metadata_to_load["upload_location"] = image_dict["location"]
1639 else:
1640 metadata_to_load["location"] = image_dict["location"]
1641
1642 self.glance.images.update(new_image.id, **metadata_to_load)
1643
1644 return new_image.id
1645 except (
1646 nvExceptions.Conflict,
1647 ksExceptions.ClientException,
1648 nvExceptions.ClientException,
1649 ) as e:
1650 self._format_exception(e)
1651 except (
1652 HTTPException,
1653 gl1Exceptions.HTTPException,
1654 gl1Exceptions.CommunicationError,
1655 ConnectionError,
1656 ) as e:
1657 if retry == max_retries:
1658 continue
1659
1660 self._format_exception(e)
1661 except IOError as e: # can not open the file
1662 raise vimconn.VimConnConnectionException(
1663 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1664 http_code=vimconn.HTTP_Bad_Request,
1665 )
1666
1667 def delete_image(self, image_id):
1668 """Deletes a tenant image from openstack VIM. Returns the old id"""
1669 try:
1670 self._reload_connection()
1671 self.glance.images.delete(image_id)
1672
1673 return image_id
1674 except (
1675 nvExceptions.NotFound,
1676 ksExceptions.ClientException,
1677 nvExceptions.ClientException,
1678 gl1Exceptions.CommunicationError,
1679 gl1Exceptions.HTTPNotFound,
1680 ConnectionError,
1681 ) as e: # TODO remove
1682 self._format_exception(e)
1683
1684 def get_image_id_from_path(self, path):
1685 """Get the image id from image path in the VIM database. Returns the image_id"""
1686 try:
1687 self._reload_connection()
1688 images = self.glance.images.list()
1689
1690 for image in images:
1691 if image.metadata.get("location") == path:
1692 return image.id
1693
1694 raise vimconn.VimConnNotFoundException(
1695 "image with location '{}' not found".format(path)
1696 )
1697 except (
1698 ksExceptions.ClientException,
1699 nvExceptions.ClientException,
1700 gl1Exceptions.CommunicationError,
1701 ConnectionError,
1702 ) as e:
1703 self._format_exception(e)
1704
1705 def get_image_list(self, filter_dict={}):
1706 """Obtain tenant images from VIM
1707 Filter_dict can be:
1708 id: image id
1709 name: image name
1710 checksum: image checksum
1711 Returns the image list of dictionaries:
1712 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1713 List can be empty
1714 """
1715 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1716
1717 try:
1718 self._reload_connection()
1719 # filter_dict_os = filter_dict.copy()
1720 # First we filter by the available filter fields: name, id. The others are removed.
1721 image_list = self.glance.images.list()
1722 filtered_list = []
1723
1724 for image in image_list:
1725 try:
1726 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1727 continue
1728
1729 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1730 continue
1731
1732 if (
1733 filter_dict.get("checksum")
1734 and image["checksum"] != filter_dict["checksum"]
1735 ):
1736 continue
1737
1738 filtered_list.append(image.copy())
1739 except gl1Exceptions.HTTPNotFound:
1740 pass
1741
1742 return filtered_list
1743 except (
1744 ksExceptions.ClientException,
1745 nvExceptions.ClientException,
1746 gl1Exceptions.CommunicationError,
1747 ConnectionError,
1748 ) as e:
1749 self._format_exception(e)
1750
1751 def __wait_for_vm(self, vm_id, status):
1752 """wait until vm is in the desired status and return True.
1753 If the VM gets in ERROR status, return false.
1754 If the timeout is reached generate an exception"""
1755 elapsed_time = 0
1756 while elapsed_time < server_timeout:
1757 vm_status = self.nova.servers.get(vm_id).status
1758
1759 if vm_status == status:
1760 return True
1761
1762 if vm_status == "ERROR":
1763 return False
1764
1765 time.sleep(5)
1766 elapsed_time += 5
1767
1768 # if we exceeded the timeout rollback
1769 if elapsed_time >= server_timeout:
1770 raise vimconn.VimConnException(
1771 "Timeout waiting for instance " + vm_id + " to get " + status,
1772 http_code=vimconn.HTTP_Request_Timeout,
1773 )
1774
1775 def _get_openstack_availablity_zones(self):
1776 """
1777 Get from openstack availability zones available
1778 :return:
1779 """
1780 try:
1781 openstack_availability_zone = self.nova.availability_zones.list()
1782 openstack_availability_zone = [
1783 str(zone.zoneName)
1784 for zone in openstack_availability_zone
1785 if zone.zoneName != "internal"
1786 ]
1787
1788 return openstack_availability_zone
1789 except Exception:
1790 return None
1791
1792 def _set_availablity_zones(self):
1793 """
1794 Set vim availablity zone
1795 :return:
1796 """
1797 if "availability_zone" in self.config:
1798 vim_availability_zones = self.config.get("availability_zone")
1799
1800 if isinstance(vim_availability_zones, str):
1801 self.availability_zone = [vim_availability_zones]
1802 elif isinstance(vim_availability_zones, list):
1803 self.availability_zone = vim_availability_zones
1804 else:
1805 self.availability_zone = self._get_openstack_availablity_zones()
1806
1807 def _get_vm_availability_zone(
1808 self, availability_zone_index, availability_zone_list
1809 ):
1810 """
1811 Return thge availability zone to be used by the created VM.
1812 :return: The VIM availability zone to be used or None
1813 """
1814 if availability_zone_index is None:
1815 if not self.config.get("availability_zone"):
1816 return None
1817 elif isinstance(self.config.get("availability_zone"), str):
1818 return self.config["availability_zone"]
1819 else:
1820 # TODO consider using a different parameter at config for default AV and AV list match
1821 return self.config["availability_zone"][0]
1822
1823 vim_availability_zones = self.availability_zone
1824 # check if VIM offer enough availability zones describe in the VNFD
1825 if vim_availability_zones and len(availability_zone_list) <= len(
1826 vim_availability_zones
1827 ):
1828 # check if all the names of NFV AV match VIM AV names
1829 match_by_index = False
1830 for av in availability_zone_list:
1831 if av not in vim_availability_zones:
1832 match_by_index = True
1833 break
1834
1835 if match_by_index:
1836 return vim_availability_zones[availability_zone_index]
1837 else:
1838 return availability_zone_list[availability_zone_index]
1839 else:
1840 raise vimconn.VimConnConflictException(
1841 "No enough availability zones at VIM for this deployment"
1842 )
1843
1844 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1845 """Fill up the security_groups in the port_dict.
1846
1847 Args:
1848 net (dict): Network details
1849 port_dict (dict): Port details
1850
1851 """
1852 if (
1853 self.config.get("security_groups")
1854 and net.get("port_security") is not False
1855 and not self.config.get("no_port_security_extension")
1856 ):
1857 if not self.security_groups_id:
1858 self._get_ids_from_name()
1859
1860 port_dict["security_groups"] = self.security_groups_id
1861
1862 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1863 """Fill up the network binding depending on network type in the port_dict.
1864
1865 Args:
1866 net (dict): Network details
1867 port_dict (dict): Port details
1868
1869 """
1870 if not net.get("type"):
1871 raise vimconn.VimConnException("Type is missing in the network details.")
1872
1873 if net["type"] == "virtual":
1874 pass
1875
1876 # For VF
1877 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1878 port_dict["binding:vnic_type"] = "direct"
1879
1880 # VIO specific Changes
1881 if self.vim_type == "VIO":
1882 # Need to create port with port_security_enabled = False and no-security-groups
1883 port_dict["port_security_enabled"] = False
1884 port_dict["provider_security_groups"] = []
1885 port_dict["security_groups"] = []
1886
1887 else:
1888 # For PT PCI-PASSTHROUGH
1889 port_dict["binding:vnic_type"] = "direct-physical"
1890
1891 @staticmethod
1892 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1893 """Set the "ip" parameter in net dictionary.
1894
1895 Args:
1896 new_port (dict): New created port
1897 net (dict): Network details
1898
1899 """
1900 fixed_ips = new_port["port"].get("fixed_ips")
1901
1902 if fixed_ips:
1903 net["ip"] = fixed_ips[0].get("ip_address")
1904 else:
1905 net["ip"] = None
1906
1907 @staticmethod
1908 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1909 """Fill up the mac_address and fixed_ips in port_dict.
1910
1911 Args:
1912 net (dict): Network details
1913 port_dict (dict): Port details
1914
1915 """
1916 if net.get("mac_address"):
1917 port_dict["mac_address"] = net["mac_address"]
1918
1919 if net.get("ip_address"):
1920 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1921 # TODO add "subnet_id": <subnet_id>
1922
1923 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1924 """Create new port using neutron.
1925
1926 Args:
1927 port_dict (dict): Port details
1928 created_items (dict): All created items
1929 net (dict): Network details
1930
1931 Returns:
1932 new_port (dict): New created port
1933
1934 """
1935 new_port = self.neutron.create_port({"port": port_dict})
1936 created_items["port:" + str(new_port["port"]["id"])] = True
1937 net["mac_adress"] = new_port["port"]["mac_address"]
1938 net["vim_id"] = new_port["port"]["id"]
1939
1940 return new_port
1941
1942 def _create_port(
1943 self, net: dict, name: str, created_items: dict
1944 ) -> Tuple[dict, dict]:
1945 """Create port using net details.
1946
1947 Args:
1948 net (dict): Network details
1949 name (str): Name to be used as network name if net dict does not include name
1950 created_items (dict): All created items
1951
1952 Returns:
1953 new_port, port New created port, port dictionary
1954
1955 """
1956
1957 port_dict = {
1958 "network_id": net["net_id"],
1959 "name": net.get("name"),
1960 "admin_state_up": True,
1961 }
1962
1963 if not port_dict["name"]:
1964 port_dict["name"] = name
1965
1966 self._prepare_port_dict_security_groups(net, port_dict)
1967
1968 self._prepare_port_dict_binding(net, port_dict)
1969
1970 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1971
1972 new_port = self._create_new_port(port_dict, created_items, net)
1973
1974 vimconnector._set_fixed_ip(new_port, net)
1975
1976 port = {"port-id": new_port["port"]["id"]}
1977
1978 if float(self.nova.api_version.get_string()) >= 2.32:
1979 port["tag"] = new_port["port"]["name"]
1980
1981 return new_port, port
1982
1983 def _prepare_network_for_vminstance(
1984 self,
1985 name: str,
1986 net_list: list,
1987 created_items: dict,
1988 net_list_vim: list,
1989 external_network: list,
1990 no_secured_ports: list,
1991 ) -> None:
1992 """Create port and fill up net dictionary for new VM instance creation.
1993
1994 Args:
1995 name (str): Name of network
1996 net_list (list): List of networks
1997 created_items (dict): All created items belongs to a VM
1998 net_list_vim (list): List of ports
1999 external_network (list): List of external-networks
2000 no_secured_ports (list): Port security disabled ports
2001 """
2002
2003 self._reload_connection()
2004
2005 for net in net_list:
2006 # Skip non-connected iface
2007 if not net.get("net_id"):
2008 continue
2009
2010 new_port, port = self._create_port(net, name, created_items)
2011
2012 net_list_vim.append(port)
2013
2014 if net.get("floating_ip", False):
2015 net["exit_on_floating_ip_error"] = True
2016 external_network.append(net)
2017
2018 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
2019 net["exit_on_floating_ip_error"] = False
2020 external_network.append(net)
2021 net["floating_ip"] = self.config.get("use_floating_ip")
2022
2023 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
2024 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
2025 if net.get("port_security") is False and not self.config.get(
2026 "no_port_security_extension"
2027 ):
2028 no_secured_ports.append(
2029 (
2030 new_port["port"]["id"],
2031 net.get("port_security_disable_strategy"),
2032 )
2033 )
2034
2035 def _prepare_persistent_root_volumes(
2036 self,
2037 name: str,
2038 vm_av_zone: list,
2039 disk: dict,
2040 base_disk_index: int,
2041 block_device_mapping: dict,
2042 existing_vim_volumes: list,
2043 created_items: dict,
2044 ) -> Optional[str]:
2045 """Prepare persistent root volumes for new VM instance.
2046
2047 Args:
2048 name (str): Name of VM instance
2049 vm_av_zone (list): List of availability zones
2050 disk (dict): Disk details
2051 base_disk_index (int): Disk index
2052 block_device_mapping (dict): Block device details
2053 existing_vim_volumes (list): Existing disk details
2054 created_items (dict): All created items belongs to VM
2055
2056 Returns:
2057 boot_volume_id (str): ID of boot volume
2058
2059 """
2060 # Disk may include only vim_volume_id or only vim_id."
2061 # Use existing persistent root volume finding with volume_id or vim_id
2062 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2063
2064 if disk.get(key_id):
2065 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2066 existing_vim_volumes.append({"id": disk[key_id]})
2067
2068 else:
2069 # Create persistent root volume
2070 volume = self.cinder.volumes.create(
2071 size=disk["size"],
2072 name=name + "vd" + chr(base_disk_index),
2073 imageRef=disk["image_id"],
2074 # Make sure volume is in the same AZ as the VM to be attached to
2075 availability_zone=vm_av_zone,
2076 )
2077 boot_volume_id = volume.id
2078 self.update_block_device_mapping(
2079 volume=volume,
2080 block_device_mapping=block_device_mapping,
2081 base_disk_index=base_disk_index,
2082 disk=disk,
2083 created_items=created_items,
2084 )
2085
2086 return boot_volume_id
2087
2088 @staticmethod
2089 def update_block_device_mapping(
2090 volume: object,
2091 block_device_mapping: dict,
2092 base_disk_index: int,
2093 disk: dict,
2094 created_items: dict,
2095 ) -> None:
2096 """Add volume information to block device mapping dict.
2097 Args:
2098 volume (object): Created volume object
2099 block_device_mapping (dict): Block device details
2100 base_disk_index (int): Disk index
2101 disk (dict): Disk details
2102 created_items (dict): All created items belongs to VM
2103 """
2104 if not volume:
2105 raise vimconn.VimConnException("Volume is empty.")
2106
2107 if not hasattr(volume, "id"):
2108 raise vimconn.VimConnException(
2109 "Created volume is not valid, does not have id attribute."
2110 )
2111
2112 volume_txt = "volume:" + str(volume.id)
2113 if disk.get("keep"):
2114 volume_txt += ":keep"
2115 created_items[volume_txt] = True
2116 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2117
2118 def _prepare_non_root_persistent_volumes(
2119 self,
2120 name: str,
2121 disk: dict,
2122 vm_av_zone: list,
2123 block_device_mapping: dict,
2124 base_disk_index: int,
2125 existing_vim_volumes: list,
2126 created_items: dict,
2127 ) -> None:
2128 """Prepare persistent volumes for new VM instance.
2129
2130 Args:
2131 name (str): Name of VM instance
2132 disk (dict): Disk details
2133 vm_av_zone (list): List of availability zones
2134 block_device_mapping (dict): Block device details
2135 base_disk_index (int): Disk index
2136 existing_vim_volumes (list): Existing disk details
2137 created_items (dict): All created items belongs to VM
2138 """
2139 # Non-root persistent volumes
2140 # Disk may include only vim_volume_id or only vim_id."
2141 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
2142
2143 if disk.get(key_id):
2144 # Use existing persistent volume
2145 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
2146 existing_vim_volumes.append({"id": disk[key_id]})
2147
2148 else:
2149 # Create persistent volume
2150 volume = self.cinder.volumes.create(
2151 size=disk["size"],
2152 name=name + "vd" + chr(base_disk_index),
2153 # Make sure volume is in the same AZ as the VM to be attached to
2154 availability_zone=vm_av_zone,
2155 )
2156 self.update_block_device_mapping(
2157 volume=volume,
2158 block_device_mapping=block_device_mapping,
2159 base_disk_index=base_disk_index,
2160 disk=disk,
2161 created_items=created_items,
2162 )
2163
2164 def _wait_for_created_volumes_availability(
2165 self, elapsed_time: int, created_items: dict
2166 ) -> Optional[int]:
2167 """Wait till created volumes become available.
2168
2169 Args:
2170 elapsed_time (int): Passed time while waiting
2171 created_items (dict): All created items belongs to VM
2172
2173 Returns:
2174 elapsed_time (int): Time spent while waiting
2175
2176 """
2177
2178 while elapsed_time < volume_timeout:
2179 for created_item in created_items:
2180 v, volume_id = (
2181 created_item.split(":")[0],
2182 created_item.split(":")[1],
2183 )
2184 if v == "volume":
2185 if self.cinder.volumes.get(volume_id).status != "available":
2186 break
2187 else:
2188 # All ready: break from while
2189 break
2190
2191 time.sleep(5)
2192 elapsed_time += 5
2193
2194 return elapsed_time
2195
2196 def _wait_for_existing_volumes_availability(
2197 self, elapsed_time: int, existing_vim_volumes: list
2198 ) -> Optional[int]:
2199 """Wait till existing volumes become available.
2200
2201 Args:
2202 elapsed_time (int): Passed time while waiting
2203 existing_vim_volumes (list): Existing volume details
2204
2205 Returns:
2206 elapsed_time (int): Time spent while waiting
2207
2208 """
2209
2210 while elapsed_time < volume_timeout:
2211 for volume in existing_vim_volumes:
2212 if self.cinder.volumes.get(volume["id"]).status != "available":
2213 break
2214 else: # all ready: break from while
2215 break
2216
2217 time.sleep(5)
2218 elapsed_time += 5
2219
2220 return elapsed_time
2221
2222 def _prepare_disk_for_vminstance(
2223 self,
2224 name: str,
2225 existing_vim_volumes: list,
2226 created_items: dict,
2227 vm_av_zone: list,
2228 block_device_mapping: dict,
2229 disk_list: list = None,
2230 ) -> None:
2231 """Prepare all volumes for new VM instance.
2232
2233 Args:
2234 name (str): Name of Instance
2235 existing_vim_volumes (list): List of existing volumes
2236 created_items (dict): All created items belongs to VM
2237 vm_av_zone (list): VM availability zone
2238 block_device_mapping (dict): Block devices to be attached to VM
2239 disk_list (list): List of disks
2240
2241 """
2242 # Create additional volumes in case these are present in disk_list
2243 base_disk_index = ord("b")
2244 boot_volume_id = None
2245 elapsed_time = 0
2246
2247 for disk in disk_list:
2248 if "image_id" in disk:
2249 # Root persistent volume
2250 base_disk_index = ord("a")
2251 boot_volume_id = self._prepare_persistent_root_volumes(
2252 name=name,
2253 vm_av_zone=vm_av_zone,
2254 disk=disk,
2255 base_disk_index=base_disk_index,
2256 block_device_mapping=block_device_mapping,
2257 existing_vim_volumes=existing_vim_volumes,
2258 created_items=created_items,
2259 )
2260 else:
2261 # Non-root persistent volume
2262 self._prepare_non_root_persistent_volumes(
2263 name=name,
2264 disk=disk,
2265 vm_av_zone=vm_av_zone,
2266 block_device_mapping=block_device_mapping,
2267 base_disk_index=base_disk_index,
2268 existing_vim_volumes=existing_vim_volumes,
2269 created_items=created_items,
2270 )
2271 base_disk_index += 1
2272
2273 # Wait until created volumes are with status available
2274 elapsed_time = self._wait_for_created_volumes_availability(
2275 elapsed_time, created_items
2276 )
2277 # Wait until existing volumes in vim are with status available
2278 elapsed_time = self._wait_for_existing_volumes_availability(
2279 elapsed_time, existing_vim_volumes
2280 )
2281 # If we exceeded the timeout rollback
2282 if elapsed_time >= volume_timeout:
2283 raise vimconn.VimConnException(
2284 "Timeout creating volumes for instance " + name,
2285 http_code=vimconn.HTTP_Request_Timeout,
2286 )
2287 if boot_volume_id:
2288 self.cinder.volumes.set_bootable(boot_volume_id, True)
2289
2290 def _find_the_external_network_for_floating_ip(self):
2291 """Get the external network ip in order to create floating IP.
2292
2293 Returns:
2294 pool_id (str): External network pool ID
2295
2296 """
2297
2298 # Find the external network
2299 external_nets = list()
2300
2301 for net in self.neutron.list_networks()["networks"]:
2302 if net["router:external"]:
2303 external_nets.append(net)
2304
2305 if len(external_nets) == 0:
2306 raise vimconn.VimConnException(
2307 "Cannot create floating_ip automatically since "
2308 "no external network is present",
2309 http_code=vimconn.HTTP_Conflict,
2310 )
2311
2312 if len(external_nets) > 1:
2313 raise vimconn.VimConnException(
2314 "Cannot create floating_ip automatically since "
2315 "multiple external networks are present",
2316 http_code=vimconn.HTTP_Conflict,
2317 )
2318
2319 # Pool ID
2320 return external_nets[0].get("id")
2321
2322 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2323 """Trigger neutron to create a new floating IP using external network ID.
2324
2325 Args:
2326 param (dict): Input parameters to create a floating IP
2327 created_items (dict): All created items belongs to new VM instance
2328
2329 Raises:
2330
2331 VimConnException
2332 """
2333 try:
2334 self.logger.debug("Creating floating IP")
2335 new_floating_ip = self.neutron.create_floatingip(param)
2336 free_floating_ip = new_floating_ip["floatingip"]["id"]
2337 created_items["floating_ip:" + str(free_floating_ip)] = True
2338
2339 except Exception as e:
2340 raise vimconn.VimConnException(
2341 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2342 http_code=vimconn.HTTP_Conflict,
2343 )
2344
2345 def _create_floating_ip(
2346 self, floating_network: dict, server: object, created_items: dict
2347 ) -> None:
2348 """Get the available Pool ID and create a new floating IP.
2349
2350 Args:
2351 floating_network (dict): Dict including external network ID
2352 server (object): Server object
2353 created_items (dict): All created items belongs to new VM instance
2354
2355 """
2356
2357 # Pool_id is available
2358 if (
2359 isinstance(floating_network["floating_ip"], str)
2360 and floating_network["floating_ip"].lower() != "true"
2361 ):
2362 pool_id = floating_network["floating_ip"]
2363
2364 # Find the Pool_id
2365 else:
2366 pool_id = self._find_the_external_network_for_floating_ip()
2367
2368 param = {
2369 "floatingip": {
2370 "floating_network_id": pool_id,
2371 "tenant_id": server.tenant_id,
2372 }
2373 }
2374
2375 self._neutron_create_float_ip(param, created_items)
2376
2377 def _find_floating_ip(
2378 self,
2379 server: object,
2380 floating_ips: list,
2381 floating_network: dict,
2382 ) -> Optional[str]:
2383 """Find the available free floating IPs if there are.
2384
2385 Args:
2386 server (object): Server object
2387 floating_ips (list): List of floating IPs
2388 floating_network (dict): Details of floating network such as ID
2389
2390 Returns:
2391 free_floating_ip (str): Free floating ip address
2392
2393 """
2394 for fip in floating_ips:
2395 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2396 continue
2397
2398 if isinstance(floating_network["floating_ip"], str):
2399 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2400 continue
2401
2402 return fip["id"]
2403
2404 def _assign_floating_ip(
2405 self, free_floating_ip: str, floating_network: dict
2406 ) -> Dict:
2407 """Assign the free floating ip address to port.
2408
2409 Args:
2410 free_floating_ip (str): Floating IP to be assigned
2411 floating_network (dict): ID of floating network
2412
2413 Returns:
2414 fip (dict) (dict): Floating ip details
2415
2416 """
2417 # The vim_id key contains the neutron.port_id
2418 self.neutron.update_floatingip(
2419 free_floating_ip,
2420 {"floatingip": {"port_id": floating_network["vim_id"]}},
2421 )
2422 # For race condition ensure not re-assigned to other VM after 5 seconds
2423 time.sleep(5)
2424
2425 return self.neutron.show_floatingip(free_floating_ip)
2426
2427 def _get_free_floating_ip(
2428 self, server: object, floating_network: dict
2429 ) -> Optional[str]:
2430 """Get the free floating IP address.
2431
2432 Args:
2433 server (object): Server Object
2434 floating_network (dict): Floating network details
2435
2436 Returns:
2437 free_floating_ip (str): Free floating ip addr
2438
2439 """
2440
2441 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2442
2443 # Randomize
2444 random.shuffle(floating_ips)
2445
2446 return self._find_floating_ip(server, floating_ips, floating_network)
2447
2448 def _prepare_external_network_for_vminstance(
2449 self,
2450 external_network: list,
2451 server: object,
2452 created_items: dict,
2453 vm_start_time: float,
2454 ) -> None:
2455 """Assign floating IP address for VM instance.
2456
2457 Args:
2458 external_network (list): ID of External network
2459 server (object): Server Object
2460 created_items (dict): All created items belongs to new VM instance
2461 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2462
2463 Raises:
2464 VimConnException
2465
2466 """
2467 for floating_network in external_network:
2468 try:
2469 assigned = False
2470 floating_ip_retries = 3
2471 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2472 # several times
2473 while not assigned:
2474 free_floating_ip = self._get_free_floating_ip(
2475 server, floating_network
2476 )
2477
2478 if not free_floating_ip:
2479 self._create_floating_ip(
2480 floating_network, server, created_items
2481 )
2482
2483 try:
2484 # For race condition ensure not already assigned
2485 fip = self.neutron.show_floatingip(free_floating_ip)
2486
2487 if fip["floatingip"].get("port_id"):
2488 continue
2489
2490 # Assign floating ip
2491 fip = self._assign_floating_ip(
2492 free_floating_ip, floating_network
2493 )
2494
2495 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2496 self.logger.warning(
2497 "floating_ip {} re-assigned to other port".format(
2498 free_floating_ip
2499 )
2500 )
2501 continue
2502
2503 self.logger.debug(
2504 "Assigned floating_ip {} to VM {}".format(
2505 free_floating_ip, server.id
2506 )
2507 )
2508
2509 assigned = True
2510
2511 except Exception as e:
2512 # Openstack need some time after VM creation to assign an IP. So retry if fails
2513 vm_status = self.nova.servers.get(server.id).status
2514
2515 if vm_status not in ("ACTIVE", "ERROR"):
2516 if time.time() - vm_start_time < server_timeout:
2517 time.sleep(5)
2518 continue
2519 elif floating_ip_retries > 0:
2520 floating_ip_retries -= 1
2521 continue
2522
2523 raise vimconn.VimConnException(
2524 "Cannot create floating_ip: {} {}".format(
2525 type(e).__name__, e
2526 ),
2527 http_code=vimconn.HTTP_Conflict,
2528 )
2529
2530 except Exception as e:
2531 if not floating_network["exit_on_floating_ip_error"]:
2532 self.logger.error("Cannot create floating_ip. %s", str(e))
2533 continue
2534
2535 raise
2536
2537 def _update_port_security_for_vminstance(
2538 self,
2539 no_secured_ports: list,
2540 server: object,
2541 ) -> None:
2542 """Updates the port security according to no_secured_ports list.
2543
2544 Args:
2545 no_secured_ports (list): List of ports that security will be disabled
2546 server (object): Server Object
2547
2548 Raises:
2549 VimConnException
2550
2551 """
2552 # Wait until the VM is active and then disable the port-security
2553 if no_secured_ports:
2554 self.__wait_for_vm(server.id, "ACTIVE")
2555
2556 for port in no_secured_ports:
2557 port_update = {
2558 "port": {"port_security_enabled": False, "security_groups": None}
2559 }
2560
2561 if port[1] == "allow-address-pairs":
2562 port_update = {
2563 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2564 }
2565
2566 try:
2567 self.neutron.update_port(port[0], port_update)
2568
2569 except Exception:
2570 raise vimconn.VimConnException(
2571 "It was not possible to disable port security for port {}".format(
2572 port[0]
2573 )
2574 )
2575
2576 def new_vminstance(
2577 self,
2578 name: str,
2579 description: str,
2580 start: bool,
2581 image_id: str,
2582 flavor_id: str,
2583 affinity_group_list: list,
2584 net_list: list,
2585 cloud_config=None,
2586 disk_list=None,
2587 availability_zone_index=None,
2588 availability_zone_list=None,
2589 ) -> tuple:
2590 """Adds a VM instance to VIM.
2591
2592 Args:
2593 name (str): name of VM
2594 description (str): description
2595 start (bool): indicates if VM must start or boot in pause mode. Ignored
2596 image_id (str) image uuid
2597 flavor_id (str) flavor uuid
2598 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2599 net_list (list): list of interfaces, each one is a dictionary with:
2600 name: name of network
2601 net_id: network uuid to connect
2602 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2603 model: interface model, ignored #TODO
2604 mac_address: used for SR-IOV ifaces #TODO for other types
2605 use: 'data', 'bridge', 'mgmt'
2606 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2607 vim_id: filled/added by this function
2608 floating_ip: True/False (or it can be None)
2609 port_security: True/False
2610 cloud_config (dict): (optional) dictionary with:
2611 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2612 users: (optional) list of users to be inserted, each item is a dict with:
2613 name: (mandatory) user name,
2614 key-pairs: (optional) list of strings with the public key to be inserted to the user
2615 user-data: (optional) string is a text script to be passed directly to cloud-init
2616 config-files: (optional). List of files to be transferred. Each item is a dict with:
2617 dest: (mandatory) string with the destination absolute path
2618 encoding: (optional, by default text). Can be one of:
2619 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2620 content : (mandatory) string with the content of the file
2621 permissions: (optional) string with file permissions, typically octal notation '0644'
2622 owner: (optional) file owner, string with the format 'owner:group'
2623 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2624 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2625 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2626 size: (mandatory) string with the size of the disk in GB
2627 vim_id: (optional) should use this existing volume id
2628 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2629 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2630 availability_zone_index is None
2631 #TODO ip, security groups
2632
2633 Returns:
2634 A tuple with the instance identifier and created_items or raises an exception on error
2635 created_items can be None or a dictionary where this method can include key-values that will be passed to
2636 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2637 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2638 as not present.
2639
2640 """
2641 self.logger.debug(
2642 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2643 image_id,
2644 flavor_id,
2645 str(net_list),
2646 )
2647
2648 try:
2649 server = None
2650 created_items = {}
2651 net_list_vim = []
2652 # list of external networks to be connected to instance, later on used to create floating_ip
2653 external_network = []
2654 # List of ports with port-security disabled
2655 no_secured_ports = []
2656 block_device_mapping = {}
2657 existing_vim_volumes = []
2658 server_group_id = None
2659 scheduller_hints = {}
2660
2661 # Check the Openstack Connection
2662 self._reload_connection()
2663
2664 # Prepare network list
2665 self._prepare_network_for_vminstance(
2666 name=name,
2667 net_list=net_list,
2668 created_items=created_items,
2669 net_list_vim=net_list_vim,
2670 external_network=external_network,
2671 no_secured_ports=no_secured_ports,
2672 )
2673
2674 # Cloud config
2675 config_drive, userdata = self._create_user_data(cloud_config)
2676
2677 # Get availability Zone
2678 vm_av_zone = self._get_vm_availability_zone(
2679 availability_zone_index, availability_zone_list
2680 )
2681
2682 if disk_list:
2683 # Prepare disks
2684 self._prepare_disk_for_vminstance(
2685 name=name,
2686 existing_vim_volumes=existing_vim_volumes,
2687 created_items=created_items,
2688 vm_av_zone=vm_av_zone,
2689 block_device_mapping=block_device_mapping,
2690 disk_list=disk_list,
2691 )
2692
2693 if affinity_group_list:
2694 # Only first id on the list will be used. Openstack restriction
2695 server_group_id = affinity_group_list[0]["affinity_group_id"]
2696 scheduller_hints["group"] = server_group_id
2697
2698 self.logger.debug(
2699 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2700 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2701 "block_device_mapping={}, server_group={})".format(
2702 name,
2703 image_id,
2704 flavor_id,
2705 net_list_vim,
2706 self.config.get("security_groups"),
2707 vm_av_zone,
2708 self.config.get("keypair"),
2709 userdata,
2710 config_drive,
2711 block_device_mapping,
2712 server_group_id,
2713 )
2714 )
2715
2716 # Create VM
2717 server = self.nova.servers.create(
2718 name=name,
2719 image=image_id,
2720 flavor=flavor_id,
2721 nics=net_list_vim,
2722 security_groups=self.config.get("security_groups"),
2723 # TODO remove security_groups in future versions. Already at neutron port
2724 availability_zone=vm_av_zone,
2725 key_name=self.config.get("keypair"),
2726 userdata=userdata,
2727 config_drive=config_drive,
2728 block_device_mapping=block_device_mapping,
2729 scheduler_hints=scheduller_hints,
2730 )
2731
2732 vm_start_time = time.time()
2733
2734 self._update_port_security_for_vminstance(no_secured_ports, server)
2735
2736 self._prepare_external_network_for_vminstance(
2737 external_network=external_network,
2738 server=server,
2739 created_items=created_items,
2740 vm_start_time=vm_start_time,
2741 )
2742
2743 return server.id, created_items
2744
2745 except Exception as e:
2746 server_id = None
2747 if server:
2748 server_id = server.id
2749
2750 try:
2751 created_items = self.remove_keep_tag_from_persistent_volumes(
2752 created_items
2753 )
2754
2755 self.delete_vminstance(server_id, created_items)
2756
2757 except Exception as e2:
2758 self.logger.error("new_vminstance rollback fail {}".format(e2))
2759
2760 self._format_exception(e)
2761
2762 @staticmethod
2763 def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
2764 """Removes the keep flag from persistent volumes. So, those volumes could be removed.
2765
2766 Args:
2767 created_items (dict): All created items belongs to VM
2768
2769 Returns:
2770 updated_created_items (dict): Dict which does not include keep flag for volumes.
2771
2772 """
2773 return {
2774 key.replace(":keep", ""): value for (key, value) in created_items.items()
2775 }
2776
2777 def get_vminstance(self, vm_id):
2778 """Returns the VM instance information from VIM"""
2779 # self.logger.debug("Getting VM from VIM")
2780 try:
2781 self._reload_connection()
2782 server = self.nova.servers.find(id=vm_id)
2783 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2784
2785 return server.to_dict()
2786 except (
2787 ksExceptions.ClientException,
2788 nvExceptions.ClientException,
2789 nvExceptions.NotFound,
2790 ConnectionError,
2791 ) as e:
2792 self._format_exception(e)
2793
2794 def get_vminstance_console(self, vm_id, console_type="vnc"):
2795 """
2796 Get a console for the virtual machine
2797 Params:
2798 vm_id: uuid of the VM
2799 console_type, can be:
2800 "novnc" (by default), "xvpvnc" for VNC types,
2801 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2802 Returns dict with the console parameters:
2803 protocol: ssh, ftp, http, https, ...
2804 server: usually ip address
2805 port: the http, ssh, ... port
2806 suffix: extra text, e.g. the http path and query string
2807 """
2808 self.logger.debug("Getting VM CONSOLE from VIM")
2809
2810 try:
2811 self._reload_connection()
2812 server = self.nova.servers.find(id=vm_id)
2813
2814 if console_type is None or console_type == "novnc":
2815 console_dict = server.get_vnc_console("novnc")
2816 elif console_type == "xvpvnc":
2817 console_dict = server.get_vnc_console(console_type)
2818 elif console_type == "rdp-html5":
2819 console_dict = server.get_rdp_console(console_type)
2820 elif console_type == "spice-html5":
2821 console_dict = server.get_spice_console(console_type)
2822 else:
2823 raise vimconn.VimConnException(
2824 "console type '{}' not allowed".format(console_type),
2825 http_code=vimconn.HTTP_Bad_Request,
2826 )
2827
2828 console_dict1 = console_dict.get("console")
2829
2830 if console_dict1:
2831 console_url = console_dict1.get("url")
2832
2833 if console_url:
2834 # parse console_url
2835 protocol_index = console_url.find("//")
2836 suffix_index = (
2837 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2838 )
2839 port_index = (
2840 console_url[protocol_index + 2 : suffix_index].find(":")
2841 + protocol_index
2842 + 2
2843 )
2844
2845 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2846 return (
2847 -vimconn.HTTP_Internal_Server_Error,
2848 "Unexpected response from VIM",
2849 )
2850
2851 console_dict = {
2852 "protocol": console_url[0:protocol_index],
2853 "server": console_url[protocol_index + 2 : port_index],
2854 "port": console_url[port_index:suffix_index],
2855 "suffix": console_url[suffix_index + 1 :],
2856 }
2857 protocol_index += 2
2858
2859 return console_dict
2860 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2861 except (
2862 nvExceptions.NotFound,
2863 ksExceptions.ClientException,
2864 nvExceptions.ClientException,
2865 nvExceptions.BadRequest,
2866 ConnectionError,
2867 ) as e:
2868 self._format_exception(e)
2869
2870 def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
2871 """Neutron delete ports by id.
2872 Args:
2873 k_id (str): Port id in the VIM
2874 """
2875 try:
2876 port_dict = self.neutron.list_ports()
2877 existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
2878
2879 if k_id in existing_ports:
2880 self.neutron.delete_port(k_id)
2881
2882 except Exception as e:
2883 self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
2884
2885 def _delete_volumes_by_id_wth_cinder(
2886 self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
2887 ) -> bool:
2888 """Cinder delete volume by id.
2889 Args:
2890 k (str): Full item name in created_items
2891 k_id (str): ID of floating ip in VIM
2892 volumes_to_hold (list): Volumes not to delete
2893 created_items (dict): All created items belongs to VM
2894 """
2895 try:
2896 if k_id in volumes_to_hold:
2897 return
2898
2899 if self.cinder.volumes.get(k_id).status != "available":
2900 return True
2901
2902 else:
2903 self.cinder.volumes.delete(k_id)
2904 created_items[k] = None
2905
2906 except Exception as e:
2907 self.logger.error(
2908 "Error deleting volume: {}: {}".format(type(e).__name__, e)
2909 )
2910
2911 def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
2912 """Neutron delete floating ip by id.
2913 Args:
2914 k (str): Full item name in created_items
2915 k_id (str): ID of floating ip in VIM
2916 created_items (dict): All created items belongs to VM
2917 """
2918 try:
2919 self.neutron.delete_floatingip(k_id)
2920 created_items[k] = None
2921
2922 except Exception as e:
2923 self.logger.error(
2924 "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
2925 )
2926
2927 @staticmethod
2928 def _get_item_name_id(k: str) -> Tuple[str, str]:
2929 k_item, _, k_id = k.partition(":")
2930 return k_item, k_id
2931
2932 def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
2933 """Delete VM ports attached to the networks before deleting virtual machine.
2934 Args:
2935 created_items (dict): All created items belongs to VM
2936 """
2937
2938 for k, v in created_items.items():
2939 if not v: # skip already deleted
2940 continue
2941
2942 try:
2943 k_item, k_id = self._get_item_name_id(k)
2944 if k_item == "port":
2945 self._delete_ports_by_id_wth_neutron(k_id)
2946
2947 except Exception as e:
2948 self.logger.error(
2949 "Error deleting port: {}: {}".format(type(e).__name__, e)
2950 )
2951
2952 def _delete_created_items(
2953 self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
2954 ) -> bool:
2955 """Delete Volumes and floating ip if they exist in created_items."""
2956 for k, v in created_items.items():
2957 if not v: # skip already deleted
2958 continue
2959
2960 try:
2961 k_item, k_id = self._get_item_name_id(k)
2962
2963 if k_item == "volume":
2964 unavailable_vol = self._delete_volumes_by_id_wth_cinder(
2965 k, k_id, volumes_to_hold, created_items
2966 )
2967
2968 if unavailable_vol:
2969 keep_waiting = True
2970
2971 elif k_item == "floating_ip":
2972 self._delete_floating_ip_by_id(k, k_id, created_items)
2973
2974 except Exception as e:
2975 self.logger.error("Error deleting {}: {}".format(k, e))
2976
2977 return keep_waiting
2978
2979 @staticmethod
2980 def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
2981 """Remove the volumes which has key flag from created_items
2982
2983 Args:
2984 created_items (dict): All created items belongs to VM
2985
2986 Returns:
2987 created_items (dict): Persistent volumes eliminated created_items
2988 """
2989 return {
2990 key: value
2991 for (key, value) in created_items.items()
2992 if len(key.split(":")) == 2
2993 }
2994
2995 def delete_vminstance(
2996 self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
2997 ) -> None:
2998 """Removes a VM instance from VIM. Returns the old identifier.
2999 Args:
3000 vm_id (str): Identifier of VM instance
3001 created_items (dict): All created items belongs to VM
3002 volumes_to_hold (list): Volumes_to_hold
3003 """
3004 if created_items is None:
3005 created_items = {}
3006 if volumes_to_hold is None:
3007 volumes_to_hold = []
3008
3009 try:
3010 created_items = self._extract_items_wth_keep_flag_from_created_items(
3011 created_items
3012 )
3013
3014 self._reload_connection()
3015
3016 # Delete VM ports attached to the networks before the virtual machine
3017 if created_items:
3018 self._delete_vm_ports_attached_to_network(created_items)
3019
3020 if vm_id:
3021 self.nova.servers.delete(vm_id)
3022
3023 # Although having detached, volumes should have in active status before deleting.
3024 # We ensure in this loop
3025 keep_waiting = True
3026 elapsed_time = 0
3027
3028 while keep_waiting and elapsed_time < volume_timeout:
3029 keep_waiting = False
3030
3031 # Delete volumes and floating IP.
3032 keep_waiting = self._delete_created_items(
3033 created_items, volumes_to_hold, keep_waiting
3034 )
3035
3036 if keep_waiting:
3037 time.sleep(1)
3038 elapsed_time += 1
3039
3040 except (
3041 nvExceptions.NotFound,
3042 ksExceptions.ClientException,
3043 nvExceptions.ClientException,
3044 ConnectionError,
3045 ) as e:
3046 self._format_exception(e)
3047
3048 def refresh_vms_status(self, vm_list):
3049 """Get the status of the virtual machines and their interfaces/ports
3050 Params: the list of VM identifiers
3051 Returns a dictionary with:
3052 vm_id: #VIM id of this Virtual Machine
3053 status: #Mandatory. Text with one of:
3054 # DELETED (not found at vim)
3055 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3056 # OTHER (Vim reported other status not understood)
3057 # ERROR (VIM indicates an ERROR status)
3058 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3059 # CREATING (on building process), ERROR
3060 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3061 #
3062 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3063 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3064 interfaces:
3065 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3066 mac_address: #Text format XX:XX:XX:XX:XX:XX
3067 vim_net_id: #network id where this interface is connected
3068 vim_interface_id: #interface/port VIM id
3069 ip_address: #null, or text with IPv4, IPv6 address
3070 compute_node: #identification of compute node where PF,VF interface is allocated
3071 pci: #PCI address of the NIC that hosts the PF,VF
3072 vlan: #physical VLAN used for VF
3073 """
3074 vm_dict = {}
3075 self.logger.debug(
3076 "refresh_vms status: Getting tenant VM instance information from VIM"
3077 )
3078
3079 for vm_id in vm_list:
3080 vm = {}
3081
3082 try:
3083 vm_vim = self.get_vminstance(vm_id)
3084
3085 if vm_vim["status"] in vmStatus2manoFormat:
3086 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
3087 else:
3088 vm["status"] = "OTHER"
3089 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
3090
3091 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
3092 vm_vim.pop("user_data", None)
3093 vm["vim_info"] = self.serialize(vm_vim)
3094
3095 vm["interfaces"] = []
3096 if vm_vim.get("fault"):
3097 vm["error_msg"] = str(vm_vim["fault"])
3098
3099 # get interfaces
3100 try:
3101 self._reload_connection()
3102 port_dict = self.neutron.list_ports(device_id=vm_id)
3103
3104 for port in port_dict["ports"]:
3105 interface = {}
3106 interface["vim_info"] = self.serialize(port)
3107 interface["mac_address"] = port.get("mac_address")
3108 interface["vim_net_id"] = port["network_id"]
3109 interface["vim_interface_id"] = port["id"]
3110 # check if OS-EXT-SRV-ATTR:host is there,
3111 # in case of non-admin credentials, it will be missing
3112
3113 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
3114 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
3115
3116 interface["pci"] = None
3117
3118 # check if binding:profile is there,
3119 # in case of non-admin credentials, it will be missing
3120 if port.get("binding:profile"):
3121 if port["binding:profile"].get("pci_slot"):
3122 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
3123 # the slot to 0x00
3124 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
3125 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
3126 pci = port["binding:profile"]["pci_slot"]
3127 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
3128 interface["pci"] = pci
3129
3130 interface["vlan"] = None
3131
3132 if port.get("binding:vif_details"):
3133 interface["vlan"] = port["binding:vif_details"].get("vlan")
3134
3135 # Get vlan from network in case not present in port for those old openstacks and cases where
3136 # it is needed vlan at PT
3137 if not interface["vlan"]:
3138 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
3139 network = self.neutron.show_network(port["network_id"])
3140
3141 if (
3142 network["network"].get("provider:network_type")
3143 == "vlan"
3144 ):
3145 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
3146 interface["vlan"] = network["network"].get(
3147 "provider:segmentation_id"
3148 )
3149
3150 ips = []
3151 # look for floating ip address
3152 try:
3153 floating_ip_dict = self.neutron.list_floatingips(
3154 port_id=port["id"]
3155 )
3156
3157 if floating_ip_dict.get("floatingips"):
3158 ips.append(
3159 floating_ip_dict["floatingips"][0].get(
3160 "floating_ip_address"
3161 )
3162 )
3163 except Exception:
3164 pass
3165
3166 for subnet in port["fixed_ips"]:
3167 ips.append(subnet["ip_address"])
3168
3169 interface["ip_address"] = ";".join(ips)
3170 vm["interfaces"].append(interface)
3171 except Exception as e:
3172 self.logger.error(
3173 "Error getting vm interface information {}: {}".format(
3174 type(e).__name__, e
3175 ),
3176 exc_info=True,
3177 )
3178 except vimconn.VimConnNotFoundException as e:
3179 self.logger.error("Exception getting vm status: %s", str(e))
3180 vm["status"] = "DELETED"
3181 vm["error_msg"] = str(e)
3182 except vimconn.VimConnException as e:
3183 self.logger.error("Exception getting vm status: %s", str(e))
3184 vm["status"] = "VIM_ERROR"
3185 vm["error_msg"] = str(e)
3186
3187 vm_dict[vm_id] = vm
3188
3189 return vm_dict
3190
3191 def action_vminstance(self, vm_id, action_dict, created_items={}):
3192 """Send and action over a VM instance from VIM
3193 Returns None or the console dict if the action was successfully sent to the VIM
3194 """
3195 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
3196
3197 try:
3198 self._reload_connection()
3199 server = self.nova.servers.find(id=vm_id)
3200
3201 if "start" in action_dict:
3202 if action_dict["start"] == "rebuild":
3203 server.rebuild()
3204 else:
3205 if server.status == "PAUSED":
3206 server.unpause()
3207 elif server.status == "SUSPENDED":
3208 server.resume()
3209 elif server.status == "SHUTOFF":
3210 server.start()
3211 else:
3212 self.logger.debug(
3213 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
3214 )
3215 raise vimconn.VimConnException(
3216 "Cannot 'start' instance while it is in active state",
3217 http_code=vimconn.HTTP_Bad_Request,
3218 )
3219
3220 elif "pause" in action_dict:
3221 server.pause()
3222 elif "resume" in action_dict:
3223 server.resume()
3224 elif "shutoff" in action_dict or "shutdown" in action_dict:
3225 self.logger.debug("server status %s", server.status)
3226 if server.status == "ACTIVE":
3227 server.stop()
3228 else:
3229 self.logger.debug("ERROR: VM is not in Active state")
3230 raise vimconn.VimConnException(
3231 "VM is not in active state, stop operation is not allowed",
3232 http_code=vimconn.HTTP_Bad_Request,
3233 )
3234 elif "forceOff" in action_dict:
3235 server.stop() # TODO
3236 elif "terminate" in action_dict:
3237 server.delete()
3238 elif "createImage" in action_dict:
3239 server.create_image()
3240 # "path":path_schema,
3241 # "description":description_schema,
3242 # "name":name_schema,
3243 # "metadata":metadata_schema,
3244 # "imageRef": id_schema,
3245 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
3246 elif "rebuild" in action_dict:
3247 server.rebuild(server.image["id"])
3248 elif "reboot" in action_dict:
3249 server.reboot() # reboot_type="SOFT"
3250 elif "console" in action_dict:
3251 console_type = action_dict["console"]
3252
3253 if console_type is None or console_type == "novnc":
3254 console_dict = server.get_vnc_console("novnc")
3255 elif console_type == "xvpvnc":
3256 console_dict = server.get_vnc_console(console_type)
3257 elif console_type == "rdp-html5":
3258 console_dict = server.get_rdp_console(console_type)
3259 elif console_type == "spice-html5":
3260 console_dict = server.get_spice_console(console_type)
3261 else:
3262 raise vimconn.VimConnException(
3263 "console type '{}' not allowed".format(console_type),
3264 http_code=vimconn.HTTP_Bad_Request,
3265 )
3266
3267 try:
3268 console_url = console_dict["console"]["url"]
3269 # parse console_url
3270 protocol_index = console_url.find("//")
3271 suffix_index = (
3272 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
3273 )
3274 port_index = (
3275 console_url[protocol_index + 2 : suffix_index].find(":")
3276 + protocol_index
3277 + 2
3278 )
3279
3280 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
3281 raise vimconn.VimConnException(
3282 "Unexpected response from VIM " + str(console_dict)
3283 )
3284
3285 console_dict2 = {
3286 "protocol": console_url[0:protocol_index],
3287 "server": console_url[protocol_index + 2 : port_index],
3288 "port": int(console_url[port_index + 1 : suffix_index]),
3289 "suffix": console_url[suffix_index + 1 :],
3290 }
3291
3292 return console_dict2
3293 except Exception:
3294 raise vimconn.VimConnException(
3295 "Unexpected response from VIM " + str(console_dict)
3296 )
3297
3298 return None
3299 except (
3300 ksExceptions.ClientException,
3301 nvExceptions.ClientException,
3302 nvExceptions.NotFound,
3303 ConnectionError,
3304 ) as e:
3305 self._format_exception(e)
3306 # TODO insert exception vimconn.HTTP_Unauthorized
3307
3308 # ###### VIO Specific Changes #########
3309 def _generate_vlanID(self):
3310 """
3311 Method to get unused vlanID
3312 Args:
3313 None
3314 Returns:
3315 vlanID
3316 """
3317 # Get used VLAN IDs
3318 usedVlanIDs = []
3319 networks = self.get_network_list()
3320
3321 for net in networks:
3322 if net.get("provider:segmentation_id"):
3323 usedVlanIDs.append(net.get("provider:segmentation_id"))
3324
3325 used_vlanIDs = set(usedVlanIDs)
3326
3327 # find unused VLAN ID
3328 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3329 try:
3330 start_vlanid, end_vlanid = map(
3331 int, vlanID_range.replace(" ", "").split("-")
3332 )
3333
3334 for vlanID in range(start_vlanid, end_vlanid + 1):
3335 if vlanID not in used_vlanIDs:
3336 return vlanID
3337 except Exception as exp:
3338 raise vimconn.VimConnException(
3339 "Exception {} occurred while generating VLAN ID.".format(exp)
3340 )
3341 else:
3342 raise vimconn.VimConnConflictException(
3343 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3344 self.config.get("dataplane_net_vlan_range")
3345 )
3346 )
3347
3348 def _generate_multisegment_vlanID(self):
3349 """
3350 Method to get unused vlanID
3351 Args:
3352 None
3353 Returns:
3354 vlanID
3355 """
3356 # Get used VLAN IDs
3357 usedVlanIDs = []
3358 networks = self.get_network_list()
3359 for net in networks:
3360 if net.get("provider:network_type") == "vlan" and net.get(
3361 "provider:segmentation_id"
3362 ):
3363 usedVlanIDs.append(net.get("provider:segmentation_id"))
3364 elif net.get("segments"):
3365 for segment in net.get("segments"):
3366 if segment.get("provider:network_type") == "vlan" and segment.get(
3367 "provider:segmentation_id"
3368 ):
3369 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3370
3371 used_vlanIDs = set(usedVlanIDs)
3372
3373 # find unused VLAN ID
3374 for vlanID_range in self.config.get("multisegment_vlan_range"):
3375 try:
3376 start_vlanid, end_vlanid = map(
3377 int, vlanID_range.replace(" ", "").split("-")
3378 )
3379
3380 for vlanID in range(start_vlanid, end_vlanid + 1):
3381 if vlanID not in used_vlanIDs:
3382 return vlanID
3383 except Exception as exp:
3384 raise vimconn.VimConnException(
3385 "Exception {} occurred while generating VLAN ID.".format(exp)
3386 )
3387 else:
3388 raise vimconn.VimConnConflictException(
3389 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3390 self.config.get("multisegment_vlan_range")
3391 )
3392 )
3393
3394 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3395 """
3396 Method to validate user given vlanID ranges
3397 Args: None
3398 Returns: None
3399 """
3400 for vlanID_range in input_vlan_range:
3401 vlan_range = vlanID_range.replace(" ", "")
3402 # validate format
3403 vlanID_pattern = r"(\d)*-(\d)*$"
3404 match_obj = re.match(vlanID_pattern, vlan_range)
3405 if not match_obj:
3406 raise vimconn.VimConnConflictException(
3407 "Invalid VLAN range for {}: {}.You must provide "
3408 "'{}' in format [start_ID - end_ID].".format(
3409 text_vlan_range, vlanID_range, text_vlan_range
3410 )
3411 )
3412
3413 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3414 if start_vlanid <= 0:
3415 raise vimconn.VimConnConflictException(
3416 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3417 "networks valid IDs are 1 to 4094 ".format(
3418 text_vlan_range, vlanID_range
3419 )
3420 )
3421
3422 if end_vlanid > 4094:
3423 raise vimconn.VimConnConflictException(
3424 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3425 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3426 text_vlan_range, vlanID_range
3427 )
3428 )
3429
3430 if start_vlanid > end_vlanid:
3431 raise vimconn.VimConnConflictException(
3432 "Invalid VLAN range for {}: {}. You must provide '{}'"
3433 " in format start_ID - end_ID and start_ID < end_ID ".format(
3434 text_vlan_range, vlanID_range, text_vlan_range
3435 )
3436 )
3437
3438 # NOT USED FUNCTIONS
3439
3440 def new_external_port(self, port_data):
3441 """Adds a external port to VIM
3442 Returns the port identifier"""
3443 # TODO openstack if needed
3444 return (
3445 -vimconn.HTTP_Internal_Server_Error,
3446 "osconnector.new_external_port() not implemented",
3447 )
3448
3449 def connect_port_network(self, port_id, network_id, admin=False):
3450 """Connects a external port to a network
3451 Returns status code of the VIM response"""
3452 # TODO openstack if needed
3453 return (
3454 -vimconn.HTTP_Internal_Server_Error,
3455 "osconnector.connect_port_network() not implemented",
3456 )
3457
3458 def new_user(self, user_name, user_passwd, tenant_id=None):
3459 """Adds a new user to openstack VIM
3460 Returns the user identifier"""
3461 self.logger.debug("osconnector: Adding a new user to VIM")
3462
3463 try:
3464 self._reload_connection()
3465 user = self.keystone.users.create(
3466 user_name, password=user_passwd, default_project=tenant_id
3467 )
3468 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3469
3470 return user.id
3471 except ksExceptions.ConnectionError as e:
3472 error_value = -vimconn.HTTP_Bad_Request
3473 error_text = (
3474 type(e).__name__
3475 + ": "
3476 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3477 )
3478 except ksExceptions.ClientException as e: # TODO remove
3479 error_value = -vimconn.HTTP_Bad_Request
3480 error_text = (
3481 type(e).__name__
3482 + ": "
3483 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3484 )
3485
3486 # TODO insert exception vimconn.HTTP_Unauthorized
3487 # if reaching here is because an exception
3488 self.logger.debug("new_user " + error_text)
3489
3490 return error_value, error_text
3491
3492 def delete_user(self, user_id):
3493 """Delete a user from openstack VIM
3494 Returns the user identifier"""
3495 if self.debug:
3496 print("osconnector: Deleting a user from VIM")
3497
3498 try:
3499 self._reload_connection()
3500 self.keystone.users.delete(user_id)
3501
3502 return 1, user_id
3503 except ksExceptions.ConnectionError as e:
3504 error_value = -vimconn.HTTP_Bad_Request
3505 error_text = (
3506 type(e).__name__
3507 + ": "
3508 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3509 )
3510 except ksExceptions.NotFound as e:
3511 error_value = -vimconn.HTTP_Not_Found
3512 error_text = (
3513 type(e).__name__
3514 + ": "
3515 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3516 )
3517 except ksExceptions.ClientException as e: # TODO remove
3518 error_value = -vimconn.HTTP_Bad_Request
3519 error_text = (
3520 type(e).__name__
3521 + ": "
3522 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3523 )
3524
3525 # TODO insert exception vimconn.HTTP_Unauthorized
3526 # if reaching here is because an exception
3527 self.logger.debug("delete_tenant " + error_text)
3528
3529 return error_value, error_text
3530
3531 def get_hosts_info(self):
3532 """Get the information of deployed hosts
3533 Returns the hosts content"""
3534 if self.debug:
3535 print("osconnector: Getting Host info from VIM")
3536
3537 try:
3538 h_list = []
3539 self._reload_connection()
3540 hypervisors = self.nova.hypervisors.list()
3541
3542 for hype in hypervisors:
3543 h_list.append(hype.to_dict())
3544
3545 return 1, {"hosts": h_list}
3546 except nvExceptions.NotFound as e:
3547 error_value = -vimconn.HTTP_Not_Found
3548 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3549 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3550 error_value = -vimconn.HTTP_Bad_Request
3551 error_text = (
3552 type(e).__name__
3553 + ": "
3554 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3555 )
3556
3557 # TODO insert exception vimconn.HTTP_Unauthorized
3558 # if reaching here is because an exception
3559 self.logger.debug("get_hosts_info " + error_text)
3560
3561 return error_value, error_text
3562
3563 def get_hosts(self, vim_tenant):
3564 """Get the hosts and deployed instances
3565 Returns the hosts content"""
3566 r, hype_dict = self.get_hosts_info()
3567
3568 if r < 0:
3569 return r, hype_dict
3570
3571 hypervisors = hype_dict["hosts"]
3572
3573 try:
3574 servers = self.nova.servers.list()
3575 for hype in hypervisors:
3576 for server in servers:
3577 if (
3578 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3579 == hype["hypervisor_hostname"]
3580 ):
3581 if "vm" in hype:
3582 hype["vm"].append(server.id)
3583 else:
3584 hype["vm"] = [server.id]
3585
3586 return 1, hype_dict
3587 except nvExceptions.NotFound as e:
3588 error_value = -vimconn.HTTP_Not_Found
3589 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3590 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3591 error_value = -vimconn.HTTP_Bad_Request
3592 error_text = (
3593 type(e).__name__
3594 + ": "
3595 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3596 )
3597
3598 # TODO insert exception vimconn.HTTP_Unauthorized
3599 # if reaching here is because an exception
3600 self.logger.debug("get_hosts " + error_text)
3601
3602 return error_value, error_text
3603
3604 def new_classification(self, name, ctype, definition):
3605 self.logger.debug(
3606 "Adding a new (Traffic) Classification to VIM, named %s", name
3607 )
3608
3609 try:
3610 new_class = None
3611 self._reload_connection()
3612
3613 if ctype not in supportedClassificationTypes:
3614 raise vimconn.VimConnNotSupportedException(
3615 "OpenStack VIM connector does not support provided "
3616 "Classification Type {}, supported ones are: {}".format(
3617 ctype, supportedClassificationTypes
3618 )
3619 )
3620
3621 if not self._validate_classification(ctype, definition):
3622 raise vimconn.VimConnException(
3623 "Incorrect Classification definition for the type specified."
3624 )
3625
3626 classification_dict = definition
3627 classification_dict["name"] = name
3628 new_class = self.neutron.create_sfc_flow_classifier(
3629 {"flow_classifier": classification_dict}
3630 )
3631
3632 return new_class["flow_classifier"]["id"]
3633 except (
3634 neExceptions.ConnectionFailed,
3635 ksExceptions.ClientException,
3636 neExceptions.NeutronException,
3637 ConnectionError,
3638 ) as e:
3639 self.logger.error("Creation of Classification failed.")
3640 self._format_exception(e)
3641
3642 def get_classification(self, class_id):
3643 self.logger.debug(" Getting Classification %s from VIM", class_id)
3644 filter_dict = {"id": class_id}
3645 class_list = self.get_classification_list(filter_dict)
3646
3647 if len(class_list) == 0:
3648 raise vimconn.VimConnNotFoundException(
3649 "Classification '{}' not found".format(class_id)
3650 )
3651 elif len(class_list) > 1:
3652 raise vimconn.VimConnConflictException(
3653 "Found more than one Classification with this criteria"
3654 )
3655
3656 classification = class_list[0]
3657
3658 return classification
3659
3660 def get_classification_list(self, filter_dict={}):
3661 self.logger.debug(
3662 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3663 )
3664
3665 try:
3666 filter_dict_os = filter_dict.copy()
3667 self._reload_connection()
3668
3669 if self.api_version3 and "tenant_id" in filter_dict_os:
3670 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3671
3672 classification_dict = self.neutron.list_sfc_flow_classifiers(
3673 **filter_dict_os
3674 )
3675 classification_list = classification_dict["flow_classifiers"]
3676 self.__classification_os2mano(classification_list)
3677
3678 return classification_list
3679 except (
3680 neExceptions.ConnectionFailed,
3681 ksExceptions.ClientException,
3682 neExceptions.NeutronException,
3683 ConnectionError,
3684 ) as e:
3685 self._format_exception(e)
3686
3687 def delete_classification(self, class_id):
3688 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3689
3690 try:
3691 self._reload_connection()
3692 self.neutron.delete_sfc_flow_classifier(class_id)
3693
3694 return class_id
3695 except (
3696 neExceptions.ConnectionFailed,
3697 neExceptions.NeutronException,
3698 ksExceptions.ClientException,
3699 neExceptions.NeutronException,
3700 ConnectionError,
3701 ) as e:
3702 self._format_exception(e)
3703
3704 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3705 self.logger.debug(
3706 "Adding a new Service Function Instance to VIM, named '%s'", name
3707 )
3708
3709 try:
3710 new_sfi = None
3711 self._reload_connection()
3712 correlation = None
3713
3714 if sfc_encap:
3715 correlation = "nsh"
3716
3717 if len(ingress_ports) != 1:
3718 raise vimconn.VimConnNotSupportedException(
3719 "OpenStack VIM connector can only have 1 ingress port per SFI"
3720 )
3721
3722 if len(egress_ports) != 1:
3723 raise vimconn.VimConnNotSupportedException(
3724 "OpenStack VIM connector can only have 1 egress port per SFI"
3725 )
3726
3727 sfi_dict = {
3728 "name": name,
3729 "ingress": ingress_ports[0],
3730 "egress": egress_ports[0],
3731 "service_function_parameters": {"correlation": correlation},
3732 }
3733 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3734
3735 return new_sfi["port_pair"]["id"]
3736 except (
3737 neExceptions.ConnectionFailed,
3738 ksExceptions.ClientException,
3739 neExceptions.NeutronException,
3740 ConnectionError,
3741 ) as e:
3742 if new_sfi:
3743 try:
3744 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3745 except Exception:
3746 self.logger.error(
3747 "Creation of Service Function Instance failed, with "
3748 "subsequent deletion failure as well."
3749 )
3750
3751 self._format_exception(e)
3752
3753 def get_sfi(self, sfi_id):
3754 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3755 filter_dict = {"id": sfi_id}
3756 sfi_list = self.get_sfi_list(filter_dict)
3757
3758 if len(sfi_list) == 0:
3759 raise vimconn.VimConnNotFoundException(
3760 "Service Function Instance '{}' not found".format(sfi_id)
3761 )
3762 elif len(sfi_list) > 1:
3763 raise vimconn.VimConnConflictException(
3764 "Found more than one Service Function Instance with this criteria"
3765 )
3766
3767 sfi = sfi_list[0]
3768
3769 return sfi
3770
3771 def get_sfi_list(self, filter_dict={}):
3772 self.logger.debug(
3773 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3774 )
3775
3776 try:
3777 self._reload_connection()
3778 filter_dict_os = filter_dict.copy()
3779
3780 if self.api_version3 and "tenant_id" in filter_dict_os:
3781 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3782
3783 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3784 sfi_list = sfi_dict["port_pairs"]
3785 self.__sfi_os2mano(sfi_list)
3786
3787 return sfi_list
3788 except (
3789 neExceptions.ConnectionFailed,
3790 ksExceptions.ClientException,
3791 neExceptions.NeutronException,
3792 ConnectionError,
3793 ) as e:
3794 self._format_exception(e)
3795
3796 def delete_sfi(self, sfi_id):
3797 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3798
3799 try:
3800 self._reload_connection()
3801 self.neutron.delete_sfc_port_pair(sfi_id)
3802
3803 return sfi_id
3804 except (
3805 neExceptions.ConnectionFailed,
3806 neExceptions.NeutronException,
3807 ksExceptions.ClientException,
3808 neExceptions.NeutronException,
3809 ConnectionError,
3810 ) as e:
3811 self._format_exception(e)
3812
3813 def new_sf(self, name, sfis, sfc_encap=True):
3814 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3815
3816 try:
3817 new_sf = None
3818 self._reload_connection()
3819 # correlation = None
3820 # if sfc_encap:
3821 # correlation = "nsh"
3822
3823 for instance in sfis:
3824 sfi = self.get_sfi(instance)
3825
3826 if sfi.get("sfc_encap") != sfc_encap:
3827 raise vimconn.VimConnNotSupportedException(
3828 "OpenStack VIM connector requires all SFIs of the "
3829 "same SF to share the same SFC Encapsulation"
3830 )
3831
3832 sf_dict = {"name": name, "port_pairs": sfis}
3833 new_sf = self.neutron.create_sfc_port_pair_group(
3834 {"port_pair_group": sf_dict}
3835 )
3836
3837 return new_sf["port_pair_group"]["id"]
3838 except (
3839 neExceptions.ConnectionFailed,
3840 ksExceptions.ClientException,
3841 neExceptions.NeutronException,
3842 ConnectionError,
3843 ) as e:
3844 if new_sf:
3845 try:
3846 self.neutron.delete_sfc_port_pair_group(
3847 new_sf["port_pair_group"]["id"]
3848 )
3849 except Exception:
3850 self.logger.error(
3851 "Creation of Service Function failed, with "
3852 "subsequent deletion failure as well."
3853 )
3854
3855 self._format_exception(e)
3856
3857 def get_sf(self, sf_id):
3858 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3859 filter_dict = {"id": sf_id}
3860 sf_list = self.get_sf_list(filter_dict)
3861
3862 if len(sf_list) == 0:
3863 raise vimconn.VimConnNotFoundException(
3864 "Service Function '{}' not found".format(sf_id)
3865 )
3866 elif len(sf_list) > 1:
3867 raise vimconn.VimConnConflictException(
3868 "Found more than one Service Function with this criteria"
3869 )
3870
3871 sf = sf_list[0]
3872
3873 return sf
3874
3875 def get_sf_list(self, filter_dict={}):
3876 self.logger.debug(
3877 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3878 )
3879
3880 try:
3881 self._reload_connection()
3882 filter_dict_os = filter_dict.copy()
3883
3884 if self.api_version3 and "tenant_id" in filter_dict_os:
3885 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3886
3887 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3888 sf_list = sf_dict["port_pair_groups"]
3889 self.__sf_os2mano(sf_list)
3890
3891 return sf_list
3892 except (
3893 neExceptions.ConnectionFailed,
3894 ksExceptions.ClientException,
3895 neExceptions.NeutronException,
3896 ConnectionError,
3897 ) as e:
3898 self._format_exception(e)
3899
3900 def delete_sf(self, sf_id):
3901 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3902
3903 try:
3904 self._reload_connection()
3905 self.neutron.delete_sfc_port_pair_group(sf_id)
3906
3907 return sf_id
3908 except (
3909 neExceptions.ConnectionFailed,
3910 neExceptions.NeutronException,
3911 ksExceptions.ClientException,
3912 neExceptions.NeutronException,
3913 ConnectionError,
3914 ) as e:
3915 self._format_exception(e)
3916
3917 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3918 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3919
3920 try:
3921 new_sfp = None
3922 self._reload_connection()
3923 # In networking-sfc the MPLS encapsulation is legacy
3924 # should be used when no full SFC Encapsulation is intended
3925 correlation = "mpls"
3926
3927 if sfc_encap:
3928 correlation = "nsh"
3929
3930 sfp_dict = {
3931 "name": name,
3932 "flow_classifiers": classifications,
3933 "port_pair_groups": sfs,
3934 "chain_parameters": {"correlation": correlation},
3935 }
3936
3937 if spi:
3938 sfp_dict["chain_id"] = spi
3939
3940 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3941
3942 return new_sfp["port_chain"]["id"]
3943 except (
3944 neExceptions.ConnectionFailed,
3945 ksExceptions.ClientException,
3946 neExceptions.NeutronException,
3947 ConnectionError,
3948 ) as e:
3949 if new_sfp:
3950 try:
3951 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3952 except Exception:
3953 self.logger.error(
3954 "Creation of Service Function Path failed, with "
3955 "subsequent deletion failure as well."
3956 )
3957
3958 self._format_exception(e)
3959
3960 def get_sfp(self, sfp_id):
3961 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3962
3963 filter_dict = {"id": sfp_id}
3964 sfp_list = self.get_sfp_list(filter_dict)
3965
3966 if len(sfp_list) == 0:
3967 raise vimconn.VimConnNotFoundException(
3968 "Service Function Path '{}' not found".format(sfp_id)
3969 )
3970 elif len(sfp_list) > 1:
3971 raise vimconn.VimConnConflictException(
3972 "Found more than one Service Function Path with this criteria"
3973 )
3974
3975 sfp = sfp_list[0]
3976
3977 return sfp
3978
3979 def get_sfp_list(self, filter_dict={}):
3980 self.logger.debug(
3981 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3982 )
3983
3984 try:
3985 self._reload_connection()
3986 filter_dict_os = filter_dict.copy()
3987
3988 if self.api_version3 and "tenant_id" in filter_dict_os:
3989 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3990
3991 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3992 sfp_list = sfp_dict["port_chains"]
3993 self.__sfp_os2mano(sfp_list)
3994
3995 return sfp_list
3996 except (
3997 neExceptions.ConnectionFailed,
3998 ksExceptions.ClientException,
3999 neExceptions.NeutronException,
4000 ConnectionError,
4001 ) as e:
4002 self._format_exception(e)
4003
4004 def delete_sfp(self, sfp_id):
4005 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
4006
4007 try:
4008 self._reload_connection()
4009 self.neutron.delete_sfc_port_chain(sfp_id)
4010
4011 return sfp_id
4012 except (
4013 neExceptions.ConnectionFailed,
4014 neExceptions.NeutronException,
4015 ksExceptions.ClientException,
4016 neExceptions.NeutronException,
4017 ConnectionError,
4018 ) as e:
4019 self._format_exception(e)
4020
4021 def refresh_sfps_status(self, sfp_list):
4022 """Get the status of the service function path
4023 Params: the list of sfp identifiers
4024 Returns a dictionary with:
4025 vm_id: #VIM id of this service function path
4026 status: #Mandatory. Text with one of:
4027 # DELETED (not found at vim)
4028 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4029 # OTHER (Vim reported other status not understood)
4030 # ERROR (VIM indicates an ERROR status)
4031 # ACTIVE,
4032 # CREATING (on building process)
4033 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4034 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
4035 """
4036 sfp_dict = {}
4037 self.logger.debug(
4038 "refresh_sfps status: Getting tenant SFP information from VIM"
4039 )
4040
4041 for sfp_id in sfp_list:
4042 sfp = {}
4043
4044 try:
4045 sfp_vim = self.get_sfp(sfp_id)
4046
4047 if sfp_vim["spi"]:
4048 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
4049 else:
4050 sfp["status"] = "OTHER"
4051 sfp["error_msg"] = "VIM status reported " + sfp["status"]
4052
4053 sfp["vim_info"] = self.serialize(sfp_vim)
4054
4055 if sfp_vim.get("fault"):
4056 sfp["error_msg"] = str(sfp_vim["fault"])
4057 except vimconn.VimConnNotFoundException as e:
4058 self.logger.error("Exception getting sfp status: %s", str(e))
4059 sfp["status"] = "DELETED"
4060 sfp["error_msg"] = str(e)
4061 except vimconn.VimConnException as e:
4062 self.logger.error("Exception getting sfp status: %s", str(e))
4063 sfp["status"] = "VIM_ERROR"
4064 sfp["error_msg"] = str(e)
4065
4066 sfp_dict[sfp_id] = sfp
4067
4068 return sfp_dict
4069
4070 def refresh_sfis_status(self, sfi_list):
4071 """Get the status of the service function instances
4072 Params: the list of sfi identifiers
4073 Returns a dictionary with:
4074 vm_id: #VIM id of this service function instance
4075 status: #Mandatory. Text with one of:
4076 # DELETED (not found at vim)
4077 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4078 # OTHER (Vim reported other status not understood)
4079 # ERROR (VIM indicates an ERROR status)
4080 # ACTIVE,
4081 # CREATING (on building process)
4082 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4083 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4084 """
4085 sfi_dict = {}
4086 self.logger.debug(
4087 "refresh_sfis status: Getting tenant sfi information from VIM"
4088 )
4089
4090 for sfi_id in sfi_list:
4091 sfi = {}
4092
4093 try:
4094 sfi_vim = self.get_sfi(sfi_id)
4095
4096 if sfi_vim:
4097 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
4098 else:
4099 sfi["status"] = "OTHER"
4100 sfi["error_msg"] = "VIM status reported " + sfi["status"]
4101
4102 sfi["vim_info"] = self.serialize(sfi_vim)
4103
4104 if sfi_vim.get("fault"):
4105 sfi["error_msg"] = str(sfi_vim["fault"])
4106 except vimconn.VimConnNotFoundException as e:
4107 self.logger.error("Exception getting sfi status: %s", str(e))
4108 sfi["status"] = "DELETED"
4109 sfi["error_msg"] = str(e)
4110 except vimconn.VimConnException as e:
4111 self.logger.error("Exception getting sfi status: %s", str(e))
4112 sfi["status"] = "VIM_ERROR"
4113 sfi["error_msg"] = str(e)
4114
4115 sfi_dict[sfi_id] = sfi
4116
4117 return sfi_dict
4118
4119 def refresh_sfs_status(self, sf_list):
4120 """Get the status of the service functions
4121 Params: the list of sf identifiers
4122 Returns a dictionary with:
4123 vm_id: #VIM id of this service function
4124 status: #Mandatory. Text with one of:
4125 # DELETED (not found at vim)
4126 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4127 # OTHER (Vim reported other status not understood)
4128 # ERROR (VIM indicates an ERROR status)
4129 # ACTIVE,
4130 # CREATING (on building process)
4131 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4132 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4133 """
4134 sf_dict = {}
4135 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
4136
4137 for sf_id in sf_list:
4138 sf = {}
4139
4140 try:
4141 sf_vim = self.get_sf(sf_id)
4142
4143 if sf_vim:
4144 sf["status"] = vmStatus2manoFormat["ACTIVE"]
4145 else:
4146 sf["status"] = "OTHER"
4147 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
4148
4149 sf["vim_info"] = self.serialize(sf_vim)
4150
4151 if sf_vim.get("fault"):
4152 sf["error_msg"] = str(sf_vim["fault"])
4153 except vimconn.VimConnNotFoundException as e:
4154 self.logger.error("Exception getting sf status: %s", str(e))
4155 sf["status"] = "DELETED"
4156 sf["error_msg"] = str(e)
4157 except vimconn.VimConnException as e:
4158 self.logger.error("Exception getting sf status: %s", str(e))
4159 sf["status"] = "VIM_ERROR"
4160 sf["error_msg"] = str(e)
4161
4162 sf_dict[sf_id] = sf
4163
4164 return sf_dict
4165
4166 def refresh_classifications_status(self, classification_list):
4167 """Get the status of the classifications
4168 Params: the list of classification identifiers
4169 Returns a dictionary with:
4170 vm_id: #VIM id of this classifier
4171 status: #Mandatory. Text with one of:
4172 # DELETED (not found at vim)
4173 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
4174 # OTHER (Vim reported other status not understood)
4175 # ERROR (VIM indicates an ERROR status)
4176 # ACTIVE,
4177 # CREATING (on building process)
4178 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
4179 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
4180 """
4181 classification_dict = {}
4182 self.logger.debug(
4183 "refresh_classifications status: Getting tenant classification information from VIM"
4184 )
4185
4186 for classification_id in classification_list:
4187 classification = {}
4188
4189 try:
4190 classification_vim = self.get_classification(classification_id)
4191
4192 if classification_vim:
4193 classification["status"] = vmStatus2manoFormat["ACTIVE"]
4194 else:
4195 classification["status"] = "OTHER"
4196 classification["error_msg"] = (
4197 "VIM status reported " + classification["status"]
4198 )
4199
4200 classification["vim_info"] = self.serialize(classification_vim)
4201
4202 if classification_vim.get("fault"):
4203 classification["error_msg"] = str(classification_vim["fault"])
4204 except vimconn.VimConnNotFoundException as e:
4205 self.logger.error("Exception getting classification status: %s", str(e))
4206 classification["status"] = "DELETED"
4207 classification["error_msg"] = str(e)
4208 except vimconn.VimConnException as e:
4209 self.logger.error("Exception getting classification status: %s", str(e))
4210 classification["status"] = "VIM_ERROR"
4211 classification["error_msg"] = str(e)
4212
4213 classification_dict[classification_id] = classification
4214
4215 return classification_dict
4216
4217 def new_affinity_group(self, affinity_group_data):
4218 """Adds a server group to VIM
4219 affinity_group_data contains a dictionary with information, keys:
4220 name: name in VIM for the server group
4221 type: affinity or anti-affinity
4222 scope: Only nfvi-node allowed
4223 Returns the server group identifier"""
4224 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
4225
4226 try:
4227 name = affinity_group_data["name"]
4228 policy = affinity_group_data["type"]
4229
4230 self._reload_connection()
4231 new_server_group = self.nova.server_groups.create(name, policy)
4232
4233 return new_server_group.id
4234 except (
4235 ksExceptions.ClientException,
4236 nvExceptions.ClientException,
4237 ConnectionError,
4238 KeyError,
4239 ) as e:
4240 self._format_exception(e)
4241
4242 def get_affinity_group(self, affinity_group_id):
4243 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
4244 self.logger.debug("Getting flavor '%s'", affinity_group_id)
4245 try:
4246 self._reload_connection()
4247 server_group = self.nova.server_groups.find(id=affinity_group_id)
4248
4249 return server_group.to_dict()
4250 except (
4251 nvExceptions.NotFound,
4252 nvExceptions.ClientException,
4253 ksExceptions.ClientException,
4254 ConnectionError,
4255 ) as e:
4256 self._format_exception(e)
4257
4258 def delete_affinity_group(self, affinity_group_id):
4259 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
4260 self.logger.debug("Getting server group '%s'", affinity_group_id)
4261 try:
4262 self._reload_connection()
4263 self.nova.server_groups.delete(affinity_group_id)
4264
4265 return affinity_group_id
4266 except (
4267 nvExceptions.NotFound,
4268 ksExceptions.ClientException,
4269 nvExceptions.ClientException,
4270 ConnectionError,
4271 ) as e:
4272 self._format_exception(e)
4273
4274 def get_vdu_state(self, vm_id):
4275 """
4276 Getting the state of a vdu
4277 param:
4278 vm_id: ID of an instance
4279 """
4280 self.logger.debug("Getting the status of VM")
4281 self.logger.debug("VIM VM ID %s", vm_id)
4282 self._reload_connection()
4283 server = self.nova.servers.find(id=vm_id)
4284 server_dict = server.to_dict()
4285 vdu_data = [
4286 server_dict["status"],
4287 server_dict["flavor"]["id"],
4288 server_dict["OS-EXT-SRV-ATTR:host"],
4289 server_dict["OS-EXT-AZ:availability_zone"],
4290 ]
4291 self.logger.debug("vdu_data %s", vdu_data)
4292 return vdu_data
4293
4294 def check_compute_availability(self, host, server_flavor_details):
4295 self._reload_connection()
4296 hypervisor_search = self.nova.hypervisors.search(
4297 hypervisor_match=host, servers=True
4298 )
4299 for hypervisor in hypervisor_search:
4300 hypervisor_id = hypervisor.to_dict()["id"]
4301 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4302 hypervisor_dict = hypervisor_details.to_dict()
4303 hypervisor_temp = json.dumps(hypervisor_dict)
4304 hypervisor_json = json.loads(hypervisor_temp)
4305 resources_available = [
4306 hypervisor_json["free_ram_mb"],
4307 hypervisor_json["disk_available_least"],
4308 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4309 ]
4310 compute_available = all(
4311 x > y for x, y in zip(resources_available, server_flavor_details)
4312 )
4313 if compute_available:
4314 return host
4315
4316 def check_availability_zone(
4317 self, old_az, server_flavor_details, old_host, host=None
4318 ):
4319 self._reload_connection()
4320 az_check = {"zone_check": False, "compute_availability": None}
4321 aggregates_list = self.nova.aggregates.list()
4322 for aggregate in aggregates_list:
4323 aggregate_details = aggregate.to_dict()
4324 aggregate_temp = json.dumps(aggregate_details)
4325 aggregate_json = json.loads(aggregate_temp)
4326 if aggregate_json["availability_zone"] == old_az:
4327 hosts_list = aggregate_json["hosts"]
4328 if host is not None:
4329 if host in hosts_list:
4330 az_check["zone_check"] = True
4331 available_compute_id = self.check_compute_availability(
4332 host, server_flavor_details
4333 )
4334 if available_compute_id is not None:
4335 az_check["compute_availability"] = available_compute_id
4336 else:
4337 for check_host in hosts_list:
4338 if check_host != old_host:
4339 available_compute_id = self.check_compute_availability(
4340 check_host, server_flavor_details
4341 )
4342 if available_compute_id is not None:
4343 az_check["zone_check"] = True
4344 az_check["compute_availability"] = available_compute_id
4345 break
4346 else:
4347 az_check["zone_check"] = True
4348 return az_check
4349
4350 def migrate_instance(self, vm_id, compute_host=None):
4351 """
4352 Migrate a vdu
4353 param:
4354 vm_id: ID of an instance
4355 compute_host: Host to migrate the vdu to
4356 """
4357 self._reload_connection()
4358 vm_state = False
4359 instance_state = self.get_vdu_state(vm_id)
4360 server_flavor_id = instance_state[1]
4361 server_hypervisor_name = instance_state[2]
4362 server_availability_zone = instance_state[3]
4363 try:
4364 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4365 server_flavor_details = [
4366 server_flavor["ram"],
4367 server_flavor["disk"],
4368 server_flavor["vcpus"],
4369 ]
4370 if compute_host == server_hypervisor_name:
4371 raise vimconn.VimConnException(
4372 "Unable to migrate instance '{}' to the same host '{}'".format(
4373 vm_id, compute_host
4374 ),
4375 http_code=vimconn.HTTP_Bad_Request,
4376 )
4377 az_status = self.check_availability_zone(
4378 server_availability_zone,
4379 server_flavor_details,
4380 server_hypervisor_name,
4381 compute_host,
4382 )
4383 availability_zone_check = az_status["zone_check"]
4384 available_compute_id = az_status.get("compute_availability")
4385
4386 if availability_zone_check is False:
4387 raise vimconn.VimConnException(
4388 "Unable to migrate instance '{}' to a different availability zone".format(
4389 vm_id
4390 ),
4391 http_code=vimconn.HTTP_Bad_Request,
4392 )
4393 if available_compute_id is not None:
4394 self.nova.servers.live_migrate(
4395 server=vm_id,
4396 host=available_compute_id,
4397 block_migration=True,
4398 disk_over_commit=False,
4399 )
4400 state = "MIGRATING"
4401 changed_compute_host = ""
4402 if state == "MIGRATING":
4403 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4404 changed_compute_host = self.get_vdu_state(vm_id)[2]
4405 if vm_state and changed_compute_host == available_compute_id:
4406 self.logger.debug(
4407 "Instance '{}' migrated to the new compute host '{}'".format(
4408 vm_id, changed_compute_host
4409 )
4410 )
4411 return state, available_compute_id
4412 else:
4413 raise vimconn.VimConnException(
4414 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4415 vm_id, available_compute_id
4416 ),
4417 http_code=vimconn.HTTP_Bad_Request,
4418 )
4419 else:
4420 raise vimconn.VimConnException(
4421 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4422 available_compute_id
4423 ),
4424 http_code=vimconn.HTTP_Bad_Request,
4425 )
4426 except (
4427 nvExceptions.BadRequest,
4428 nvExceptions.ClientException,
4429 nvExceptions.NotFound,
4430 ) as e:
4431 self._format_exception(e)
4432
4433 def resize_instance(self, vm_id, new_flavor_id):
4434 """
4435 For resizing the vm based on the given
4436 flavor details
4437 param:
4438 vm_id : ID of an instance
4439 new_flavor_id : Flavor id to be resized
4440 Return the status of a resized instance
4441 """
4442 self._reload_connection()
4443 self.logger.debug("resize the flavor of an instance")
4444 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4445 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4446 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4447 try:
4448 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4449 if old_flavor_disk > new_flavor_disk:
4450 raise nvExceptions.BadRequest(
4451 400,
4452 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4453 )
4454 else:
4455 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4456 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4457 if vm_state:
4458 instance_resized_status = self.confirm_resize(vm_id)
4459 return instance_resized_status
4460 else:
4461 raise nvExceptions.BadRequest(
4462 409,
4463 message="Cannot 'resize' vm_state is in ERROR",
4464 )
4465
4466 else:
4467 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4468 raise nvExceptions.BadRequest(
4469 409,
4470 message="Cannot 'resize' instance while it is in vm_state resized",
4471 )
4472 except (
4473 nvExceptions.BadRequest,
4474 nvExceptions.ClientException,
4475 nvExceptions.NotFound,
4476 ) as e:
4477 self._format_exception(e)
4478
4479 def confirm_resize(self, vm_id):
4480 """
4481 Confirm the resize of an instance
4482 param:
4483 vm_id: ID of an instance
4484 """
4485 self._reload_connection()
4486 self.nova.servers.confirm_resize(server=vm_id)
4487 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4488 self.__wait_for_vm(vm_id, "ACTIVE")
4489 instance_status = self.get_vdu_state(vm_id)[0]
4490 return instance_status