f509db6606dacd302d4e374d250467dfacb20610
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41 from typing import Dict, Optional, Tuple
42
43 from cinderclient import client as cClient
44 from glanceclient import client as glClient
45 import glanceclient.exc as gl1Exceptions
46 from keystoneauth1 import session
47 from keystoneauth1.identity import v2, v3
48 import keystoneclient.exceptions as ksExceptions
49 import keystoneclient.v2_0.client as ksClient_v2
50 import keystoneclient.v3.client as ksClient_v3
51 import netaddr
52 from neutronclient.common import exceptions as neExceptions
53 from neutronclient.neutron import client as neClient
54 from novaclient import client as nClient, exceptions as nvExceptions
55 from osm_ro_plugin import vimconn
56 from requests.exceptions import ConnectionError
57 import yaml
58
59 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
60 __date__ = "$22-sep-2017 23:59:59$"
61
62 """contain the openstack virtual machine status to openmano status"""
63 vmStatus2manoFormat = {
64 "ACTIVE": "ACTIVE",
65 "PAUSED": "PAUSED",
66 "SUSPENDED": "SUSPENDED",
67 "SHUTOFF": "INACTIVE",
68 "BUILD": "BUILD",
69 "ERROR": "ERROR",
70 "DELETED": "DELETED",
71 }
72 netStatus2manoFormat = {
73 "ACTIVE": "ACTIVE",
74 "PAUSED": "PAUSED",
75 "INACTIVE": "INACTIVE",
76 "BUILD": "BUILD",
77 "ERROR": "ERROR",
78 "DELETED": "DELETED",
79 }
80
81 supportedClassificationTypes = ["legacy_flow_classifier"]
82
83 # global var to have a timeout creating and deleting volumes
84 volume_timeout = 1800
85 server_timeout = 1800
86
87
88 class SafeDumper(yaml.SafeDumper):
89 def represent_data(self, data):
90 # Openstack APIs use custom subclasses of dict and YAML safe dumper
91 # is designed to not handle that (reference issue 142 of pyyaml)
92 if isinstance(data, dict) and data.__class__ != dict:
93 # A simple solution is to convert those items back to dicts
94 data = dict(data.items())
95
96 return super(SafeDumper, self).represent_data(data)
97
98
99 class vimconnector(vimconn.VimConnector):
100 def __init__(
101 self,
102 uuid,
103 name,
104 tenant_id,
105 tenant_name,
106 url,
107 url_admin=None,
108 user=None,
109 passwd=None,
110 log_level=None,
111 config={},
112 persistent_info={},
113 ):
114 """using common constructor parameters. In this case
115 'url' is the keystone authorization url,
116 'url_admin' is not use
117 """
118 api_version = config.get("APIversion")
119
120 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
121 raise vimconn.VimConnException(
122 "Invalid value '{}' for config:APIversion. "
123 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
124 )
125
126 vim_type = config.get("vim_type")
127
128 if vim_type and vim_type not in ("vio", "VIO"):
129 raise vimconn.VimConnException(
130 "Invalid value '{}' for config:vim_type."
131 "Allowed values are 'vio' or 'VIO'".format(vim_type)
132 )
133
134 if config.get("dataplane_net_vlan_range") is not None:
135 # validate vlan ranges provided by user
136 self._validate_vlan_ranges(
137 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
138 )
139
140 if config.get("multisegment_vlan_range") is not None:
141 # validate vlan ranges provided by user
142 self._validate_vlan_ranges(
143 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
144 )
145
146 vimconn.VimConnector.__init__(
147 self,
148 uuid,
149 name,
150 tenant_id,
151 tenant_name,
152 url,
153 url_admin,
154 user,
155 passwd,
156 log_level,
157 config,
158 )
159
160 if self.config.get("insecure") and self.config.get("ca_cert"):
161 raise vimconn.VimConnException(
162 "options insecure and ca_cert are mutually exclusive"
163 )
164
165 self.verify = True
166
167 if self.config.get("insecure"):
168 self.verify = False
169
170 if self.config.get("ca_cert"):
171 self.verify = self.config.get("ca_cert")
172
173 if not url:
174 raise TypeError("url param can not be NoneType")
175
176 self.persistent_info = persistent_info
177 self.availability_zone = persistent_info.get("availability_zone", None)
178 self.session = persistent_info.get("session", {"reload_client": True})
179 self.my_tenant_id = self.session.get("my_tenant_id")
180 self.nova = self.session.get("nova")
181 self.neutron = self.session.get("neutron")
182 self.cinder = self.session.get("cinder")
183 self.glance = self.session.get("glance")
184 # self.glancev1 = self.session.get("glancev1")
185 self.keystone = self.session.get("keystone")
186 self.api_version3 = self.session.get("api_version3")
187 self.vim_type = self.config.get("vim_type")
188
189 if self.vim_type:
190 self.vim_type = self.vim_type.upper()
191
192 if self.config.get("use_internal_endpoint"):
193 self.endpoint_type = "internalURL"
194 else:
195 self.endpoint_type = None
196
197 logging.getLogger("urllib3").setLevel(logging.WARNING)
198 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
199 logging.getLogger("novaclient").setLevel(logging.WARNING)
200 self.logger = logging.getLogger("ro.vim.openstack")
201
202 # allow security_groups to be a list or a single string
203 if isinstance(self.config.get("security_groups"), str):
204 self.config["security_groups"] = [self.config["security_groups"]]
205
206 self.security_groups_id = None
207
208 # ###### VIO Specific Changes #########
209 if self.vim_type == "VIO":
210 self.logger = logging.getLogger("ro.vim.vio")
211
212 if log_level:
213 self.logger.setLevel(getattr(logging, log_level))
214
215 def __getitem__(self, index):
216 """Get individuals parameters.
217 Throw KeyError"""
218 if index == "project_domain_id":
219 return self.config.get("project_domain_id")
220 elif index == "user_domain_id":
221 return self.config.get("user_domain_id")
222 else:
223 return vimconn.VimConnector.__getitem__(self, index)
224
225 def __setitem__(self, index, value):
226 """Set individuals parameters and it is marked as dirty so to force connection reload.
227 Throw KeyError"""
228 if index == "project_domain_id":
229 self.config["project_domain_id"] = value
230 elif index == "user_domain_id":
231 self.config["user_domain_id"] = value
232 else:
233 vimconn.VimConnector.__setitem__(self, index, value)
234
235 self.session["reload_client"] = True
236
237 def serialize(self, value):
238 """Serialization of python basic types.
239
240 In the case value is not serializable a message will be logged and a
241 simple representation of the data that cannot be converted back to
242 python is returned.
243 """
244 if isinstance(value, str):
245 return value
246
247 try:
248 return yaml.dump(
249 value, Dumper=SafeDumper, default_flow_style=True, width=256
250 )
251 except yaml.representer.RepresenterError:
252 self.logger.debug(
253 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
254 pformat(value),
255 exc_info=True,
256 )
257
258 return str(value)
259
260 def _reload_connection(self):
261 """Called before any operation, it check if credentials has changed
262 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
263 """
264 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
265 if self.session["reload_client"]:
266 if self.config.get("APIversion"):
267 self.api_version3 = (
268 self.config["APIversion"] == "v3.3"
269 or self.config["APIversion"] == "3"
270 )
271 else: # get from ending auth_url that end with v3 or with v2.0
272 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
273 "/v3/"
274 )
275
276 self.session["api_version3"] = self.api_version3
277
278 if self.api_version3:
279 if self.config.get("project_domain_id") or self.config.get(
280 "project_domain_name"
281 ):
282 project_domain_id_default = None
283 else:
284 project_domain_id_default = "default"
285
286 if self.config.get("user_domain_id") or self.config.get(
287 "user_domain_name"
288 ):
289 user_domain_id_default = None
290 else:
291 user_domain_id_default = "default"
292 auth = v3.Password(
293 auth_url=self.url,
294 username=self.user,
295 password=self.passwd,
296 project_name=self.tenant_name,
297 project_id=self.tenant_id,
298 project_domain_id=self.config.get(
299 "project_domain_id", project_domain_id_default
300 ),
301 user_domain_id=self.config.get(
302 "user_domain_id", user_domain_id_default
303 ),
304 project_domain_name=self.config.get("project_domain_name"),
305 user_domain_name=self.config.get("user_domain_name"),
306 )
307 else:
308 auth = v2.Password(
309 auth_url=self.url,
310 username=self.user,
311 password=self.passwd,
312 tenant_name=self.tenant_name,
313 tenant_id=self.tenant_id,
314 )
315
316 sess = session.Session(auth=auth, verify=self.verify)
317 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
318 # Titanium cloud and StarlingX
319 region_name = self.config.get("region_name")
320
321 if self.api_version3:
322 self.keystone = ksClient_v3.Client(
323 session=sess,
324 endpoint_type=self.endpoint_type,
325 region_name=region_name,
326 )
327 else:
328 self.keystone = ksClient_v2.Client(
329 session=sess, endpoint_type=self.endpoint_type
330 )
331
332 self.session["keystone"] = self.keystone
333 # In order to enable microversion functionality an explicit microversion must be specified in "config".
334 # This implementation approach is due to the warning message in
335 # https://developer.openstack.org/api-guide/compute/microversions.html
336 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
337 # always require an specific microversion.
338 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
339 version = self.config.get("microversion")
340
341 if not version:
342 version = "2.1"
343
344 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
345 # Titanium cloud and StarlingX
346 self.nova = self.session["nova"] = nClient.Client(
347 str(version),
348 session=sess,
349 endpoint_type=self.endpoint_type,
350 region_name=region_name,
351 )
352 self.neutron = self.session["neutron"] = neClient.Client(
353 "2.0",
354 session=sess,
355 endpoint_type=self.endpoint_type,
356 region_name=region_name,
357 )
358 self.cinder = self.session["cinder"] = cClient.Client(
359 2,
360 session=sess,
361 endpoint_type=self.endpoint_type,
362 region_name=region_name,
363 )
364
365 try:
366 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
367 except Exception:
368 self.logger.error("Cannot get project_id from session", exc_info=True)
369
370 if self.endpoint_type == "internalURL":
371 glance_service_id = self.keystone.services.list(name="glance")[0].id
372 glance_endpoint = self.keystone.endpoints.list(
373 glance_service_id, interface="internal"
374 )[0].url
375 else:
376 glance_endpoint = None
377
378 self.glance = self.session["glance"] = glClient.Client(
379 2, session=sess, endpoint=glance_endpoint
380 )
381 # using version 1 of glance client in new_image()
382 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
383 # endpoint=glance_endpoint)
384 self.session["reload_client"] = False
385 self.persistent_info["session"] = self.session
386 # add availablity zone info inside self.persistent_info
387 self._set_availablity_zones()
388 self.persistent_info["availability_zone"] = self.availability_zone
389 # force to get again security_groups_ids next time they are needed
390 self.security_groups_id = None
391
392 def __net_os2mano(self, net_list_dict):
393 """Transform the net openstack format to mano format
394 net_list_dict can be a list of dict or a single dict"""
395 if type(net_list_dict) is dict:
396 net_list_ = (net_list_dict,)
397 elif type(net_list_dict) is list:
398 net_list_ = net_list_dict
399 else:
400 raise TypeError("param net_list_dict must be a list or a dictionary")
401 for net in net_list_:
402 if net.get("provider:network_type") == "vlan":
403 net["type"] = "data"
404 else:
405 net["type"] = "bridge"
406
407 def __classification_os2mano(self, class_list_dict):
408 """Transform the openstack format (Flow Classifier) to mano format
409 (Classification) class_list_dict can be a list of dict or a single dict
410 """
411 if isinstance(class_list_dict, dict):
412 class_list_ = [class_list_dict]
413 elif isinstance(class_list_dict, list):
414 class_list_ = class_list_dict
415 else:
416 raise TypeError("param class_list_dict must be a list or a dictionary")
417 for classification in class_list_:
418 id = classification.pop("id")
419 name = classification.pop("name")
420 description = classification.pop("description")
421 project_id = classification.pop("project_id")
422 tenant_id = classification.pop("tenant_id")
423 original_classification = copy.deepcopy(classification)
424 classification.clear()
425 classification["ctype"] = "legacy_flow_classifier"
426 classification["definition"] = original_classification
427 classification["id"] = id
428 classification["name"] = name
429 classification["description"] = description
430 classification["project_id"] = project_id
431 classification["tenant_id"] = tenant_id
432
433 def __sfi_os2mano(self, sfi_list_dict):
434 """Transform the openstack format (Port Pair) to mano format (SFI)
435 sfi_list_dict can be a list of dict or a single dict
436 """
437 if isinstance(sfi_list_dict, dict):
438 sfi_list_ = [sfi_list_dict]
439 elif isinstance(sfi_list_dict, list):
440 sfi_list_ = sfi_list_dict
441 else:
442 raise TypeError("param sfi_list_dict must be a list or a dictionary")
443
444 for sfi in sfi_list_:
445 sfi["ingress_ports"] = []
446 sfi["egress_ports"] = []
447
448 if sfi.get("ingress"):
449 sfi["ingress_ports"].append(sfi["ingress"])
450
451 if sfi.get("egress"):
452 sfi["egress_ports"].append(sfi["egress"])
453
454 del sfi["ingress"]
455 del sfi["egress"]
456 params = sfi.get("service_function_parameters")
457 sfc_encap = False
458
459 if params:
460 correlation = params.get("correlation")
461
462 if correlation:
463 sfc_encap = True
464
465 sfi["sfc_encap"] = sfc_encap
466 del sfi["service_function_parameters"]
467
468 def __sf_os2mano(self, sf_list_dict):
469 """Transform the openstack format (Port Pair Group) to mano format (SF)
470 sf_list_dict can be a list of dict or a single dict
471 """
472 if isinstance(sf_list_dict, dict):
473 sf_list_ = [sf_list_dict]
474 elif isinstance(sf_list_dict, list):
475 sf_list_ = sf_list_dict
476 else:
477 raise TypeError("param sf_list_dict must be a list or a dictionary")
478
479 for sf in sf_list_:
480 del sf["port_pair_group_parameters"]
481 sf["sfis"] = sf["port_pairs"]
482 del sf["port_pairs"]
483
484 def __sfp_os2mano(self, sfp_list_dict):
485 """Transform the openstack format (Port Chain) to mano format (SFP)
486 sfp_list_dict can be a list of dict or a single dict
487 """
488 if isinstance(sfp_list_dict, dict):
489 sfp_list_ = [sfp_list_dict]
490 elif isinstance(sfp_list_dict, list):
491 sfp_list_ = sfp_list_dict
492 else:
493 raise TypeError("param sfp_list_dict must be a list or a dictionary")
494
495 for sfp in sfp_list_:
496 params = sfp.pop("chain_parameters")
497 sfc_encap = False
498
499 if params:
500 correlation = params.get("correlation")
501
502 if correlation:
503 sfc_encap = True
504
505 sfp["sfc_encap"] = sfc_encap
506 sfp["spi"] = sfp.pop("chain_id")
507 sfp["classifications"] = sfp.pop("flow_classifiers")
508 sfp["service_functions"] = sfp.pop("port_pair_groups")
509
510 # placeholder for now; read TODO note below
511 def _validate_classification(self, type, definition):
512 # only legacy_flow_classifier Type is supported at this point
513 return True
514 # TODO(igordcard): this method should be an abstract method of an
515 # abstract Classification class to be implemented by the specific
516 # Types. Also, abstract vimconnector should call the validation
517 # method before the implemented VIM connectors are called.
518
519 def _format_exception(self, exception):
520 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
521 message_error = str(exception)
522 tip = ""
523
524 if isinstance(
525 exception,
526 (
527 neExceptions.NetworkNotFoundClient,
528 nvExceptions.NotFound,
529 ksExceptions.NotFound,
530 gl1Exceptions.HTTPNotFound,
531 ),
532 ):
533 raise vimconn.VimConnNotFoundException(
534 type(exception).__name__ + ": " + message_error
535 )
536 elif isinstance(
537 exception,
538 (
539 HTTPException,
540 gl1Exceptions.HTTPException,
541 gl1Exceptions.CommunicationError,
542 ConnectionError,
543 ksExceptions.ConnectionError,
544 neExceptions.ConnectionFailed,
545 ),
546 ):
547 if type(exception).__name__ == "SSLError":
548 tip = " (maybe option 'insecure' must be added to the VIM)"
549
550 raise vimconn.VimConnConnectionException(
551 "Invalid URL or credentials{}: {}".format(tip, message_error)
552 )
553 elif isinstance(
554 exception,
555 (
556 KeyError,
557 nvExceptions.BadRequest,
558 ksExceptions.BadRequest,
559 ),
560 ):
561 raise vimconn.VimConnException(
562 type(exception).__name__ + ": " + message_error
563 )
564 elif isinstance(
565 exception,
566 (
567 nvExceptions.ClientException,
568 ksExceptions.ClientException,
569 neExceptions.NeutronException,
570 ),
571 ):
572 raise vimconn.VimConnUnexpectedResponse(
573 type(exception).__name__ + ": " + message_error
574 )
575 elif isinstance(exception, nvExceptions.Conflict):
576 raise vimconn.VimConnConflictException(
577 type(exception).__name__ + ": " + message_error
578 )
579 elif isinstance(exception, vimconn.VimConnException):
580 raise exception
581 else: # ()
582 self.logger.error("General Exception " + message_error, exc_info=True)
583
584 raise vimconn.VimConnConnectionException(
585 type(exception).__name__ + ": " + message_error
586 )
587
588 def _get_ids_from_name(self):
589 """
590 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
591 :return: None
592 """
593 # get tenant_id if only tenant_name is supplied
594 self._reload_connection()
595
596 if not self.my_tenant_id:
597 raise vimconn.VimConnConnectionException(
598 "Error getting tenant information from name={} id={}".format(
599 self.tenant_name, self.tenant_id
600 )
601 )
602
603 if self.config.get("security_groups") and not self.security_groups_id:
604 # convert from name to id
605 neutron_sg_list = self.neutron.list_security_groups(
606 tenant_id=self.my_tenant_id
607 )["security_groups"]
608
609 self.security_groups_id = []
610 for sg in self.config.get("security_groups"):
611 for neutron_sg in neutron_sg_list:
612 if sg in (neutron_sg["id"], neutron_sg["name"]):
613 self.security_groups_id.append(neutron_sg["id"])
614 break
615 else:
616 self.security_groups_id = None
617
618 raise vimconn.VimConnConnectionException(
619 "Not found security group {} for this tenant".format(sg)
620 )
621
622 def check_vim_connectivity(self):
623 # just get network list to check connectivity and credentials
624 self.get_network_list(filter_dict={})
625
626 def get_tenant_list(self, filter_dict={}):
627 """Obtain tenants of VIM
628 filter_dict can contain the following keys:
629 name: filter by tenant name
630 id: filter by tenant uuid/id
631 <other VIM specific>
632 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
633 """
634 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
635
636 try:
637 self._reload_connection()
638
639 if self.api_version3:
640 project_class_list = self.keystone.projects.list(
641 name=filter_dict.get("name")
642 )
643 else:
644 project_class_list = self.keystone.tenants.findall(**filter_dict)
645
646 project_list = []
647
648 for project in project_class_list:
649 if filter_dict.get("id") and filter_dict["id"] != project.id:
650 continue
651
652 project_list.append(project.to_dict())
653
654 return project_list
655 except (
656 ksExceptions.ConnectionError,
657 ksExceptions.ClientException,
658 ConnectionError,
659 ) as e:
660 self._format_exception(e)
661
662 def new_tenant(self, tenant_name, tenant_description):
663 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
664 self.logger.debug("Adding a new tenant name: %s", tenant_name)
665
666 try:
667 self._reload_connection()
668
669 if self.api_version3:
670 project = self.keystone.projects.create(
671 tenant_name,
672 self.config.get("project_domain_id", "default"),
673 description=tenant_description,
674 is_domain=False,
675 )
676 else:
677 project = self.keystone.tenants.create(tenant_name, tenant_description)
678
679 return project.id
680 except (
681 ksExceptions.ConnectionError,
682 ksExceptions.ClientException,
683 ksExceptions.BadRequest,
684 ConnectionError,
685 ) as e:
686 self._format_exception(e)
687
688 def delete_tenant(self, tenant_id):
689 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
690 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
691
692 try:
693 self._reload_connection()
694
695 if self.api_version3:
696 self.keystone.projects.delete(tenant_id)
697 else:
698 self.keystone.tenants.delete(tenant_id)
699
700 return tenant_id
701 except (
702 ksExceptions.ConnectionError,
703 ksExceptions.ClientException,
704 ksExceptions.NotFound,
705 ConnectionError,
706 ) as e:
707 self._format_exception(e)
708
709 def new_network(
710 self,
711 net_name,
712 net_type,
713 ip_profile=None,
714 shared=False,
715 provider_network_profile=None,
716 ):
717 """Adds a tenant network to VIM
718 Params:
719 'net_name': name of the network
720 'net_type': one of:
721 'bridge': overlay isolated network
722 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
723 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
724 'ip_profile': is a dict containing the IP parameters of the network
725 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
726 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
727 'gateway_address': (Optional) ip_schema, that is X.X.X.X
728 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
729 'dhcp_enabled': True or False
730 'dhcp_start_address': ip_schema, first IP to grant
731 'dhcp_count': number of IPs to grant.
732 'shared': if this network can be seen/use by other tenants/organization
733 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
734 physical-network: physnet-label}
735 Returns a tuple with the network identifier and created_items, or raises an exception on error
736 created_items can be None or a dictionary where this method can include key-values that will be passed to
737 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
738 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
739 as not present.
740 """
741 self.logger.debug(
742 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
743 )
744 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
745
746 try:
747 vlan = None
748
749 if provider_network_profile:
750 vlan = provider_network_profile.get("segmentation-id")
751
752 new_net = None
753 created_items = {}
754 self._reload_connection()
755 network_dict = {"name": net_name, "admin_state_up": True}
756
757 if net_type in ("data", "ptp") or provider_network_profile:
758 provider_physical_network = None
759
760 if provider_network_profile and provider_network_profile.get(
761 "physical-network"
762 ):
763 provider_physical_network = provider_network_profile.get(
764 "physical-network"
765 )
766
767 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
768 # or not declared, just ignore the checking
769 if (
770 isinstance(
771 self.config.get("dataplane_physical_net"), (tuple, list)
772 )
773 and provider_physical_network
774 not in self.config["dataplane_physical_net"]
775 ):
776 raise vimconn.VimConnConflictException(
777 "Invalid parameter 'provider-network:physical-network' "
778 "for network creation. '{}' is not one of the declared "
779 "list at VIM_config:dataplane_physical_net".format(
780 provider_physical_network
781 )
782 )
783
784 # use the default dataplane_physical_net
785 if not provider_physical_network:
786 provider_physical_network = self.config.get(
787 "dataplane_physical_net"
788 )
789
790 # if it is non empty list, use the first value. If it is a string use the value directly
791 if (
792 isinstance(provider_physical_network, (tuple, list))
793 and provider_physical_network
794 ):
795 provider_physical_network = provider_physical_network[0]
796
797 if not provider_physical_network:
798 raise vimconn.VimConnConflictException(
799 "missing information needed for underlay networks. Provide "
800 "'dataplane_physical_net' configuration at VIM or use the NS "
801 "instantiation parameter 'provider-network.physical-network'"
802 " for the VLD"
803 )
804
805 if not self.config.get("multisegment_support"):
806 network_dict[
807 "provider:physical_network"
808 ] = provider_physical_network
809
810 if (
811 provider_network_profile
812 and "network-type" in provider_network_profile
813 ):
814 network_dict[
815 "provider:network_type"
816 ] = provider_network_profile["network-type"]
817 else:
818 network_dict["provider:network_type"] = self.config.get(
819 "dataplane_network_type", "vlan"
820 )
821
822 if vlan:
823 network_dict["provider:segmentation_id"] = vlan
824 else:
825 # Multi-segment case
826 segment_list = []
827 segment1_dict = {
828 "provider:physical_network": "",
829 "provider:network_type": "vxlan",
830 }
831 segment_list.append(segment1_dict)
832 segment2_dict = {
833 "provider:physical_network": provider_physical_network,
834 "provider:network_type": "vlan",
835 }
836
837 if vlan:
838 segment2_dict["provider:segmentation_id"] = vlan
839 elif self.config.get("multisegment_vlan_range"):
840 vlanID = self._generate_multisegment_vlanID()
841 segment2_dict["provider:segmentation_id"] = vlanID
842
843 # else
844 # raise vimconn.VimConnConflictException(
845 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
846 # network")
847 segment_list.append(segment2_dict)
848 network_dict["segments"] = segment_list
849
850 # VIO Specific Changes. It needs a concrete VLAN
851 if self.vim_type == "VIO" and vlan is None:
852 if self.config.get("dataplane_net_vlan_range") is None:
853 raise vimconn.VimConnConflictException(
854 "You must provide 'dataplane_net_vlan_range' in format "
855 "[start_ID - end_ID] at VIM_config for creating underlay "
856 "networks"
857 )
858
859 network_dict["provider:segmentation_id"] = self._generate_vlanID()
860
861 network_dict["shared"] = shared
862
863 if self.config.get("disable_network_port_security"):
864 network_dict["port_security_enabled"] = False
865
866 if self.config.get("neutron_availability_zone_hints"):
867 hints = self.config.get("neutron_availability_zone_hints")
868
869 if isinstance(hints, str):
870 hints = [hints]
871
872 network_dict["availability_zone_hints"] = hints
873
874 new_net = self.neutron.create_network({"network": network_dict})
875 # print new_net
876 # create subnetwork, even if there is no profile
877
878 if not ip_profile:
879 ip_profile = {}
880
881 if not ip_profile.get("subnet_address"):
882 # Fake subnet is required
883 subnet_rand = random.randint(0, 255)
884 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
885
886 if "ip_version" not in ip_profile:
887 ip_profile["ip_version"] = "IPv4"
888
889 subnet = {
890 "name": net_name + "-subnet",
891 "network_id": new_net["network"]["id"],
892 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
893 "cidr": ip_profile["subnet_address"],
894 }
895
896 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
897 if ip_profile.get("gateway_address"):
898 subnet["gateway_ip"] = ip_profile["gateway_address"]
899 else:
900 subnet["gateway_ip"] = None
901
902 if ip_profile.get("dns_address"):
903 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
904
905 if "dhcp_enabled" in ip_profile:
906 subnet["enable_dhcp"] = (
907 False
908 if ip_profile["dhcp_enabled"] == "false"
909 or ip_profile["dhcp_enabled"] is False
910 else True
911 )
912
913 if ip_profile.get("dhcp_start_address"):
914 subnet["allocation_pools"] = []
915 subnet["allocation_pools"].append(dict())
916 subnet["allocation_pools"][0]["start"] = ip_profile[
917 "dhcp_start_address"
918 ]
919
920 if ip_profile.get("dhcp_count"):
921 # parts = ip_profile["dhcp_start_address"].split(".")
922 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
923 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
924 ip_int += ip_profile["dhcp_count"] - 1
925 ip_str = str(netaddr.IPAddress(ip_int))
926 subnet["allocation_pools"][0]["end"] = ip_str
927
928 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
929 self.neutron.create_subnet({"subnet": subnet})
930
931 if net_type == "data" and self.config.get("multisegment_support"):
932 if self.config.get("l2gw_support"):
933 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
934 for l2gw in l2gw_list:
935 l2gw_conn = {
936 "l2_gateway_id": l2gw["id"],
937 "network_id": new_net["network"]["id"],
938 "segmentation_id": str(vlanID),
939 }
940 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
941 {"l2_gateway_connection": l2gw_conn}
942 )
943 created_items[
944 "l2gwconn:"
945 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
946 ] = True
947
948 return new_net["network"]["id"], created_items
949 except Exception as e:
950 # delete l2gw connections (if any) before deleting the network
951 for k, v in created_items.items():
952 if not v: # skip already deleted
953 continue
954
955 try:
956 k_item, _, k_id = k.partition(":")
957
958 if k_item == "l2gwconn":
959 self.neutron.delete_l2_gateway_connection(k_id)
960 except Exception as e2:
961 self.logger.error(
962 "Error deleting l2 gateway connection: {}: {}".format(
963 type(e2).__name__, e2
964 )
965 )
966
967 if new_net:
968 self.neutron.delete_network(new_net["network"]["id"])
969
970 self._format_exception(e)
971
972 def get_network_list(self, filter_dict={}):
973 """Obtain tenant networks of VIM
974 Filter_dict can be:
975 name: network name
976 id: network uuid
977 shared: boolean
978 tenant_id: tenant
979 admin_state_up: boolean
980 status: 'ACTIVE'
981 Returns the network list of dictionaries
982 """
983 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
984
985 try:
986 self._reload_connection()
987 filter_dict_os = filter_dict.copy()
988
989 if self.api_version3 and "tenant_id" in filter_dict_os:
990 # TODO check
991 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
992
993 net_dict = self.neutron.list_networks(**filter_dict_os)
994 net_list = net_dict["networks"]
995 self.__net_os2mano(net_list)
996
997 return net_list
998 except (
999 neExceptions.ConnectionFailed,
1000 ksExceptions.ClientException,
1001 neExceptions.NeutronException,
1002 ConnectionError,
1003 ) as e:
1004 self._format_exception(e)
1005
1006 def get_network(self, net_id):
1007 """Obtain details of network from VIM
1008 Returns the network information from a network id"""
1009 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1010 filter_dict = {"id": net_id}
1011 net_list = self.get_network_list(filter_dict)
1012
1013 if len(net_list) == 0:
1014 raise vimconn.VimConnNotFoundException(
1015 "Network '{}' not found".format(net_id)
1016 )
1017 elif len(net_list) > 1:
1018 raise vimconn.VimConnConflictException(
1019 "Found more than one network with this criteria"
1020 )
1021
1022 net = net_list[0]
1023 subnets = []
1024 for subnet_id in net.get("subnets", ()):
1025 try:
1026 subnet = self.neutron.show_subnet(subnet_id)
1027 except Exception as e:
1028 self.logger.error(
1029 "osconnector.get_network(): Error getting subnet %s %s"
1030 % (net_id, str(e))
1031 )
1032 subnet = {"id": subnet_id, "fault": str(e)}
1033
1034 subnets.append(subnet)
1035
1036 net["subnets"] = subnets
1037 net["encapsulation"] = net.get("provider:network_type")
1038 net["encapsulation_type"] = net.get("provider:network_type")
1039 net["segmentation_id"] = net.get("provider:segmentation_id")
1040 net["encapsulation_id"] = net.get("provider:segmentation_id")
1041
1042 return net
1043
1044 def delete_network(self, net_id, created_items=None):
1045 """
1046 Removes a tenant network from VIM and its associated elements
1047 :param net_id: VIM identifier of the network, provided by method new_network
1048 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1049 Returns the network identifier or raises an exception upon error or when network is not found
1050 """
1051 self.logger.debug("Deleting network '%s' from VIM", net_id)
1052
1053 if created_items is None:
1054 created_items = {}
1055
1056 try:
1057 self._reload_connection()
1058 # delete l2gw connections (if any) before deleting the network
1059 for k, v in created_items.items():
1060 if not v: # skip already deleted
1061 continue
1062
1063 try:
1064 k_item, _, k_id = k.partition(":")
1065 if k_item == "l2gwconn":
1066 self.neutron.delete_l2_gateway_connection(k_id)
1067 except Exception as e:
1068 self.logger.error(
1069 "Error deleting l2 gateway connection: {}: {}".format(
1070 type(e).__name__, e
1071 )
1072 )
1073
1074 # delete VM ports attached to this networks before the network
1075 ports = self.neutron.list_ports(network_id=net_id)
1076 for p in ports["ports"]:
1077 try:
1078 self.neutron.delete_port(p["id"])
1079 except Exception as e:
1080 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1081
1082 self.neutron.delete_network(net_id)
1083
1084 return net_id
1085 except (
1086 neExceptions.ConnectionFailed,
1087 neExceptions.NetworkNotFoundClient,
1088 neExceptions.NeutronException,
1089 ksExceptions.ClientException,
1090 neExceptions.NeutronException,
1091 ConnectionError,
1092 ) as e:
1093 self._format_exception(e)
1094
1095 def refresh_nets_status(self, net_list):
1096 """Get the status of the networks
1097 Params: the list of network identifiers
1098 Returns a dictionary with:
1099 net_id: #VIM id of this network
1100 status: #Mandatory. Text with one of:
1101 # DELETED (not found at vim)
1102 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1103 # OTHER (Vim reported other status not understood)
1104 # ERROR (VIM indicates an ERROR status)
1105 # ACTIVE, INACTIVE, DOWN (admin down),
1106 # BUILD (on building process)
1107 #
1108 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1109 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1110 """
1111 net_dict = {}
1112
1113 for net_id in net_list:
1114 net = {}
1115
1116 try:
1117 net_vim = self.get_network(net_id)
1118
1119 if net_vim["status"] in netStatus2manoFormat:
1120 net["status"] = netStatus2manoFormat[net_vim["status"]]
1121 else:
1122 net["status"] = "OTHER"
1123 net["error_msg"] = "VIM status reported " + net_vim["status"]
1124
1125 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1126 net["status"] = "DOWN"
1127
1128 net["vim_info"] = self.serialize(net_vim)
1129
1130 if net_vim.get("fault"): # TODO
1131 net["error_msg"] = str(net_vim["fault"])
1132 except vimconn.VimConnNotFoundException as e:
1133 self.logger.error("Exception getting net status: %s", str(e))
1134 net["status"] = "DELETED"
1135 net["error_msg"] = str(e)
1136 except vimconn.VimConnException as e:
1137 self.logger.error("Exception getting net status: %s", str(e))
1138 net["status"] = "VIM_ERROR"
1139 net["error_msg"] = str(e)
1140 net_dict[net_id] = net
1141 return net_dict
1142
1143 def get_flavor(self, flavor_id):
1144 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1145 self.logger.debug("Getting flavor '%s'", flavor_id)
1146
1147 try:
1148 self._reload_connection()
1149 flavor = self.nova.flavors.find(id=flavor_id)
1150 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1151
1152 return flavor.to_dict()
1153 except (
1154 nvExceptions.NotFound,
1155 nvExceptions.ClientException,
1156 ksExceptions.ClientException,
1157 ConnectionError,
1158 ) as e:
1159 self._format_exception(e)
1160
1161 def get_flavor_id_from_data(self, flavor_dict):
1162 """Obtain flavor id that match the flavor description
1163 Returns the flavor_id or raises a vimconnNotFoundException
1164 flavor_dict: contains the required ram, vcpus, disk
1165 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1166 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1167 vimconnNotFoundException is raised
1168 """
1169 exact_match = False if self.config.get("use_existing_flavors") else True
1170
1171 try:
1172 self._reload_connection()
1173 flavor_candidate_id = None
1174 flavor_candidate_data = (10000, 10000, 10000)
1175 flavor_target = (
1176 flavor_dict["ram"],
1177 flavor_dict["vcpus"],
1178 flavor_dict["disk"],
1179 flavor_dict.get("ephemeral", 0),
1180 flavor_dict.get("swap", 0),
1181 )
1182 # numa=None
1183 extended = flavor_dict.get("extended", {})
1184 if extended:
1185 # TODO
1186 raise vimconn.VimConnNotFoundException(
1187 "Flavor with EPA still not implemented"
1188 )
1189 # if len(numas) > 1:
1190 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1191 # numa=numas[0]
1192 # numas = extended.get("numas")
1193 for flavor in self.nova.flavors.list():
1194 epa = flavor.get_keys()
1195
1196 if epa:
1197 continue
1198 # TODO
1199
1200 flavor_data = (
1201 flavor.ram,
1202 flavor.vcpus,
1203 flavor.disk,
1204 flavor.ephemeral,
1205 flavor.swap if isinstance(flavor.swap, int) else 0,
1206 )
1207 if flavor_data == flavor_target:
1208 return flavor.id
1209 elif (
1210 not exact_match
1211 and flavor_target < flavor_data < flavor_candidate_data
1212 ):
1213 flavor_candidate_id = flavor.id
1214 flavor_candidate_data = flavor_data
1215
1216 if not exact_match and flavor_candidate_id:
1217 return flavor_candidate_id
1218
1219 raise vimconn.VimConnNotFoundException(
1220 "Cannot find any flavor matching '{}'".format(flavor_dict)
1221 )
1222 except (
1223 nvExceptions.NotFound,
1224 nvExceptions.ClientException,
1225 ksExceptions.ClientException,
1226 ConnectionError,
1227 ) as e:
1228 self._format_exception(e)
1229
1230 def process_resource_quota(self, quota, prefix, extra_specs):
1231 """
1232 :param prefix:
1233 :param extra_specs:
1234 :return:
1235 """
1236 if "limit" in quota:
1237 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1238
1239 if "reserve" in quota:
1240 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1241
1242 if "shares" in quota:
1243 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1244 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1245
1246 def new_flavor(self, flavor_data, change_name_if_used=True):
1247 """Adds a tenant flavor to openstack VIM
1248 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1249 repetition
1250 Returns the flavor identifier
1251 """
1252 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1253 retry = 0
1254 max_retries = 3
1255 name_suffix = 0
1256
1257 try:
1258 name = flavor_data["name"]
1259 while retry < max_retries:
1260 retry += 1
1261 try:
1262 self._reload_connection()
1263
1264 if change_name_if_used:
1265 # get used names
1266 fl_names = []
1267 fl = self.nova.flavors.list()
1268
1269 for f in fl:
1270 fl_names.append(f.name)
1271
1272 while name in fl_names:
1273 name_suffix += 1
1274 name = flavor_data["name"] + "-" + str(name_suffix)
1275
1276 ram = flavor_data.get("ram", 64)
1277 vcpus = flavor_data.get("vcpus", 1)
1278 extra_specs = {}
1279
1280 extended = flavor_data.get("extended")
1281 if extended:
1282 numas = extended.get("numas")
1283
1284 if numas:
1285 numa_nodes = len(numas)
1286
1287 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1288
1289 if self.vim_type == "VIO":
1290 extra_specs[
1291 "vmware:extra_config"
1292 ] = '{"numa.nodeAffinity":"0"}'
1293 extra_specs["vmware:latency_sensitivity_level"] = "high"
1294
1295 for numa in numas:
1296 if "id" in numa:
1297 node_id = numa["id"]
1298
1299 if "memory" in numa:
1300 memory_mb = numa["memory"] * 1024
1301 memory = "hw:numa_mem.{}".format(node_id)
1302 extra_specs[memory] = int(memory_mb)
1303
1304 if "vcpu" in numa:
1305 vcpu = numa["vcpu"]
1306 cpu = "hw:numa_cpus.{}".format(node_id)
1307 vcpu = ",".join(map(str, vcpu))
1308 extra_specs[cpu] = vcpu
1309
1310 # overwrite ram and vcpus
1311 # check if key "memory" is present in numa else use ram value at flavor
1312 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1313 # implemented/virt-driver-cpu-thread-pinning.html
1314 extra_specs["hw:cpu_sockets"] = str(numa_nodes)
1315
1316 if "paired-threads" in numa:
1317 vcpus = numa["paired-threads"] * 2
1318 # cpu_thread_policy "require" implies that the compute node must have an
1319 # STM architecture
1320 extra_specs["hw:cpu_thread_policy"] = "require"
1321 extra_specs["hw:cpu_policy"] = "dedicated"
1322 elif "cores" in numa:
1323 vcpus = numa["cores"]
1324 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1325 # architecture, or a non-SMT architecture will be emulated
1326 extra_specs["hw:cpu_thread_policy"] = "isolate"
1327 extra_specs["hw:cpu_policy"] = "dedicated"
1328 elif "threads" in numa:
1329 vcpus = numa["threads"]
1330 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1331 # architecture
1332 extra_specs["hw:cpu_thread_policy"] = "prefer"
1333 extra_specs["hw:cpu_policy"] = "dedicated"
1334 # for interface in numa.get("interfaces",() ):
1335 # if interface["dedicated"]=="yes":
1336 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1337 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1338 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1339 # when a way to connect it is available
1340 elif extended.get("cpu-quota"):
1341 self.process_resource_quota(
1342 extended.get("cpu-quota"), "cpu", extra_specs
1343 )
1344
1345 if extended.get("mem-quota"):
1346 self.process_resource_quota(
1347 extended.get("mem-quota"), "memory", extra_specs
1348 )
1349
1350 if extended.get("vif-quota"):
1351 self.process_resource_quota(
1352 extended.get("vif-quota"), "vif", extra_specs
1353 )
1354
1355 if extended.get("disk-io-quota"):
1356 self.process_resource_quota(
1357 extended.get("disk-io-quota"), "disk_io", extra_specs
1358 )
1359
1360 # Set the mempage size as specified in the descriptor
1361 if extended.get("mempage-size"):
1362 if extended.get("mempage-size") == "LARGE":
1363 extra_specs["hw:mem_page_size"] = "large"
1364 elif extended.get("mempage-size") == "SMALL":
1365 extra_specs["hw:mem_page_size"] = "small"
1366 elif extended.get("mempage-size") == "SIZE_2MB":
1367 extra_specs["hw:mem_page_size"] = "2MB"
1368 elif extended.get("mempage-size") == "SIZE_1GB":
1369 extra_specs["hw:mem_page_size"] = "1GB"
1370 elif extended.get("mempage-size") == "PREFER_LARGE":
1371 extra_specs["hw:mem_page_size"] = "any"
1372 else:
1373 # The validations in NBI should make reaching here not possible.
1374 # If this message is shown, check validations
1375 self.logger.debug(
1376 "Invalid mempage-size %s. Will be ignored",
1377 extended.get("mempage-size"),
1378 )
1379 if extended.get("cpu-pinning-policy"):
1380 extra_specs["hw:cpu_policy"] = extended.get(
1381 "cpu-pinning-policy"
1382 ).lower()
1383
1384 # Set the cpu thread pinning policy as specified in the descriptor
1385 if extended.get("cpu-thread-pinning-policy"):
1386 extra_specs["hw:cpu_thread_policy"] = extended.get(
1387 "cpu-thread-pinning-policy"
1388 ).lower()
1389
1390 # Set the mem policy as specified in the descriptor
1391 if extended.get("mem-policy"):
1392 extra_specs["hw:numa_mempolicy"] = extended.get(
1393 "mem-policy"
1394 ).lower()
1395
1396 # create flavor
1397 new_flavor = self.nova.flavors.create(
1398 name=name,
1399 ram=ram,
1400 vcpus=vcpus,
1401 disk=flavor_data.get("disk", 0),
1402 ephemeral=flavor_data.get("ephemeral", 0),
1403 swap=flavor_data.get("swap", 0),
1404 is_public=flavor_data.get("is_public", True),
1405 )
1406 # add metadata
1407 if extra_specs:
1408 new_flavor.set_keys(extra_specs)
1409
1410 return new_flavor.id
1411 except nvExceptions.Conflict as e:
1412 if change_name_if_used and retry < max_retries:
1413 continue
1414
1415 self._format_exception(e)
1416 # except nvExceptions.BadRequest as e:
1417 except (
1418 ksExceptions.ClientException,
1419 nvExceptions.ClientException,
1420 ConnectionError,
1421 KeyError,
1422 ) as e:
1423 self._format_exception(e)
1424
1425 def delete_flavor(self, flavor_id):
1426 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1427 try:
1428 self._reload_connection()
1429 self.nova.flavors.delete(flavor_id)
1430
1431 return flavor_id
1432 # except nvExceptions.BadRequest as e:
1433 except (
1434 nvExceptions.NotFound,
1435 ksExceptions.ClientException,
1436 nvExceptions.ClientException,
1437 ConnectionError,
1438 ) as e:
1439 self._format_exception(e)
1440
1441 def new_image(self, image_dict):
1442 """
1443 Adds a tenant image to VIM. imge_dict is a dictionary with:
1444 name: name
1445 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1446 location: path or URI
1447 public: "yes" or "no"
1448 metadata: metadata of the image
1449 Returns the image_id
1450 """
1451 retry = 0
1452 max_retries = 3
1453
1454 while retry < max_retries:
1455 retry += 1
1456 try:
1457 self._reload_connection()
1458
1459 # determine format http://docs.openstack.org/developer/glance/formats.html
1460 if "disk_format" in image_dict:
1461 disk_format = image_dict["disk_format"]
1462 else: # autodiscover based on extension
1463 if image_dict["location"].endswith(".qcow2"):
1464 disk_format = "qcow2"
1465 elif image_dict["location"].endswith(".vhd"):
1466 disk_format = "vhd"
1467 elif image_dict["location"].endswith(".vmdk"):
1468 disk_format = "vmdk"
1469 elif image_dict["location"].endswith(".vdi"):
1470 disk_format = "vdi"
1471 elif image_dict["location"].endswith(".iso"):
1472 disk_format = "iso"
1473 elif image_dict["location"].endswith(".aki"):
1474 disk_format = "aki"
1475 elif image_dict["location"].endswith(".ari"):
1476 disk_format = "ari"
1477 elif image_dict["location"].endswith(".ami"):
1478 disk_format = "ami"
1479 else:
1480 disk_format = "raw"
1481
1482 self.logger.debug(
1483 "new_image: '%s' loading from '%s'",
1484 image_dict["name"],
1485 image_dict["location"],
1486 )
1487 if self.vim_type == "VIO":
1488 container_format = "bare"
1489 if "container_format" in image_dict:
1490 container_format = image_dict["container_format"]
1491
1492 new_image = self.glance.images.create(
1493 name=image_dict["name"],
1494 container_format=container_format,
1495 disk_format=disk_format,
1496 )
1497 else:
1498 new_image = self.glance.images.create(name=image_dict["name"])
1499
1500 if image_dict["location"].startswith("http"):
1501 # TODO there is not a method to direct download. It must be downloaded locally with requests
1502 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1503 else: # local path
1504 with open(image_dict["location"]) as fimage:
1505 self.glance.images.upload(new_image.id, fimage)
1506 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1507 # image_dict.get("public","yes")=="yes",
1508 # container_format="bare", data=fimage, disk_format=disk_format)
1509
1510 metadata_to_load = image_dict.get("metadata")
1511
1512 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1513 # for openstack
1514 if self.vim_type == "VIO":
1515 metadata_to_load["upload_location"] = image_dict["location"]
1516 else:
1517 metadata_to_load["location"] = image_dict["location"]
1518
1519 self.glance.images.update(new_image.id, **metadata_to_load)
1520
1521 return new_image.id
1522 except (
1523 nvExceptions.Conflict,
1524 ksExceptions.ClientException,
1525 nvExceptions.ClientException,
1526 ) as e:
1527 self._format_exception(e)
1528 except (
1529 HTTPException,
1530 gl1Exceptions.HTTPException,
1531 gl1Exceptions.CommunicationError,
1532 ConnectionError,
1533 ) as e:
1534 if retry == max_retries:
1535 continue
1536
1537 self._format_exception(e)
1538 except IOError as e: # can not open the file
1539 raise vimconn.VimConnConnectionException(
1540 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1541 http_code=vimconn.HTTP_Bad_Request,
1542 )
1543
1544 def delete_image(self, image_id):
1545 """Deletes a tenant image from openstack VIM. Returns the old id"""
1546 try:
1547 self._reload_connection()
1548 self.glance.images.delete(image_id)
1549
1550 return image_id
1551 except (
1552 nvExceptions.NotFound,
1553 ksExceptions.ClientException,
1554 nvExceptions.ClientException,
1555 gl1Exceptions.CommunicationError,
1556 gl1Exceptions.HTTPNotFound,
1557 ConnectionError,
1558 ) as e: # TODO remove
1559 self._format_exception(e)
1560
1561 def get_image_id_from_path(self, path):
1562 """Get the image id from image path in the VIM database. Returns the image_id"""
1563 try:
1564 self._reload_connection()
1565 images = self.glance.images.list()
1566
1567 for image in images:
1568 if image.metadata.get("location") == path:
1569 return image.id
1570
1571 raise vimconn.VimConnNotFoundException(
1572 "image with location '{}' not found".format(path)
1573 )
1574 except (
1575 ksExceptions.ClientException,
1576 nvExceptions.ClientException,
1577 gl1Exceptions.CommunicationError,
1578 ConnectionError,
1579 ) as e:
1580 self._format_exception(e)
1581
1582 def get_image_list(self, filter_dict={}):
1583 """Obtain tenant images from VIM
1584 Filter_dict can be:
1585 id: image id
1586 name: image name
1587 checksum: image checksum
1588 Returns the image list of dictionaries:
1589 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1590 List can be empty
1591 """
1592 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1593
1594 try:
1595 self._reload_connection()
1596 # filter_dict_os = filter_dict.copy()
1597 # First we filter by the available filter fields: name, id. The others are removed.
1598 image_list = self.glance.images.list()
1599 filtered_list = []
1600
1601 for image in image_list:
1602 try:
1603 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1604 continue
1605
1606 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1607 continue
1608
1609 if (
1610 filter_dict.get("checksum")
1611 and image["checksum"] != filter_dict["checksum"]
1612 ):
1613 continue
1614
1615 filtered_list.append(image.copy())
1616 except gl1Exceptions.HTTPNotFound:
1617 pass
1618
1619 return filtered_list
1620 except (
1621 ksExceptions.ClientException,
1622 nvExceptions.ClientException,
1623 gl1Exceptions.CommunicationError,
1624 ConnectionError,
1625 ) as e:
1626 self._format_exception(e)
1627
1628 def __wait_for_vm(self, vm_id, status):
1629 """wait until vm is in the desired status and return True.
1630 If the VM gets in ERROR status, return false.
1631 If the timeout is reached generate an exception"""
1632 elapsed_time = 0
1633 while elapsed_time < server_timeout:
1634 vm_status = self.nova.servers.get(vm_id).status
1635
1636 if vm_status == status:
1637 return True
1638
1639 if vm_status == "ERROR":
1640 return False
1641
1642 time.sleep(5)
1643 elapsed_time += 5
1644
1645 # if we exceeded the timeout rollback
1646 if elapsed_time >= server_timeout:
1647 raise vimconn.VimConnException(
1648 "Timeout waiting for instance " + vm_id + " to get " + status,
1649 http_code=vimconn.HTTP_Request_Timeout,
1650 )
1651
1652 def _get_openstack_availablity_zones(self):
1653 """
1654 Get from openstack availability zones available
1655 :return:
1656 """
1657 try:
1658 openstack_availability_zone = self.nova.availability_zones.list()
1659 openstack_availability_zone = [
1660 str(zone.zoneName)
1661 for zone in openstack_availability_zone
1662 if zone.zoneName != "internal"
1663 ]
1664
1665 return openstack_availability_zone
1666 except Exception:
1667 return None
1668
1669 def _set_availablity_zones(self):
1670 """
1671 Set vim availablity zone
1672 :return:
1673 """
1674 if "availability_zone" in self.config:
1675 vim_availability_zones = self.config.get("availability_zone")
1676
1677 if isinstance(vim_availability_zones, str):
1678 self.availability_zone = [vim_availability_zones]
1679 elif isinstance(vim_availability_zones, list):
1680 self.availability_zone = vim_availability_zones
1681 else:
1682 self.availability_zone = self._get_openstack_availablity_zones()
1683
1684 def _get_vm_availability_zone(
1685 self, availability_zone_index, availability_zone_list
1686 ):
1687 """
1688 Return thge availability zone to be used by the created VM.
1689 :return: The VIM availability zone to be used or None
1690 """
1691 if availability_zone_index is None:
1692 if not self.config.get("availability_zone"):
1693 return None
1694 elif isinstance(self.config.get("availability_zone"), str):
1695 return self.config["availability_zone"]
1696 else:
1697 # TODO consider using a different parameter at config for default AV and AV list match
1698 return self.config["availability_zone"][0]
1699
1700 vim_availability_zones = self.availability_zone
1701 # check if VIM offer enough availability zones describe in the VNFD
1702 if vim_availability_zones and len(availability_zone_list) <= len(
1703 vim_availability_zones
1704 ):
1705 # check if all the names of NFV AV match VIM AV names
1706 match_by_index = False
1707 for av in availability_zone_list:
1708 if av not in vim_availability_zones:
1709 match_by_index = True
1710 break
1711
1712 if match_by_index:
1713 return vim_availability_zones[availability_zone_index]
1714 else:
1715 return availability_zone_list[availability_zone_index]
1716 else:
1717 raise vimconn.VimConnConflictException(
1718 "No enough availability zones at VIM for this deployment"
1719 )
1720
1721 def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
1722 """Fill up the security_groups in the port_dict.
1723
1724 Args:
1725 net (dict): Network details
1726 port_dict (dict): Port details
1727
1728 """
1729 if (
1730 self.config.get("security_groups")
1731 and net.get("port_security") is not False
1732 and not self.config.get("no_port_security_extension")
1733 ):
1734 if not self.security_groups_id:
1735 self._get_ids_from_name()
1736
1737 port_dict["security_groups"] = self.security_groups_id
1738
1739 def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
1740 """Fill up the network binding depending on network type in the port_dict.
1741
1742 Args:
1743 net (dict): Network details
1744 port_dict (dict): Port details
1745
1746 """
1747 if not net.get("type"):
1748 raise vimconn.VimConnException("Type is missing in the network details.")
1749
1750 if net["type"] == "virtual":
1751 pass
1752
1753 # For VF
1754 elif net["type"] == "VF" or net["type"] == "SR-IOV":
1755 port_dict["binding:vnic_type"] = "direct"
1756
1757 # VIO specific Changes
1758 if self.vim_type == "VIO":
1759 # Need to create port with port_security_enabled = False and no-security-groups
1760 port_dict["port_security_enabled"] = False
1761 port_dict["provider_security_groups"] = []
1762 port_dict["security_groups"] = []
1763
1764 else:
1765 # For PT PCI-PASSTHROUGH
1766 port_dict["binding:vnic_type"] = "direct-physical"
1767
1768 @staticmethod
1769 def _set_fixed_ip(new_port: dict, net: dict) -> None:
1770 """Set the "ip" parameter in net dictionary.
1771
1772 Args:
1773 new_port (dict): New created port
1774 net (dict): Network details
1775
1776 """
1777 fixed_ips = new_port["port"].get("fixed_ips")
1778
1779 if fixed_ips:
1780 net["ip"] = fixed_ips[0].get("ip_address")
1781 else:
1782 net["ip"] = None
1783
1784 @staticmethod
1785 def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
1786 """Fill up the mac_address and fixed_ips in port_dict.
1787
1788 Args:
1789 net (dict): Network details
1790 port_dict (dict): Port details
1791
1792 """
1793 if net.get("mac_address"):
1794 port_dict["mac_address"] = net["mac_address"]
1795
1796 if net.get("ip_address"):
1797 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1798 # TODO add "subnet_id": <subnet_id>
1799
1800 def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
1801 """Create new port using neutron.
1802
1803 Args:
1804 port_dict (dict): Port details
1805 created_items (dict): All created items
1806 net (dict): Network details
1807
1808 Returns:
1809 new_port (dict): New created port
1810
1811 """
1812 new_port = self.neutron.create_port({"port": port_dict})
1813 created_items["port:" + str(new_port["port"]["id"])] = True
1814 net["mac_adress"] = new_port["port"]["mac_address"]
1815 net["vim_id"] = new_port["port"]["id"]
1816
1817 return new_port
1818
1819 def _create_port(
1820 self, net: dict, name: str, created_items: dict
1821 ) -> Tuple[dict, dict]:
1822 """Create port using net details.
1823
1824 Args:
1825 net (dict): Network details
1826 name (str): Name to be used as network name if net dict does not include name
1827 created_items (dict): All created items
1828
1829 Returns:
1830 new_port, port New created port, port dictionary
1831
1832 """
1833
1834 port_dict = {
1835 "network_id": net["net_id"],
1836 "name": net.get("name"),
1837 "admin_state_up": True,
1838 }
1839
1840 if not port_dict["name"]:
1841 port_dict["name"] = name
1842
1843 self._prepare_port_dict_security_groups(net, port_dict)
1844
1845 self._prepare_port_dict_binding(net, port_dict)
1846
1847 vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
1848
1849 new_port = self._create_new_port(port_dict, created_items, net)
1850
1851 vimconnector._set_fixed_ip(new_port, net)
1852
1853 port = {"port-id": new_port["port"]["id"]}
1854
1855 if float(self.nova.api_version.get_string()) >= 2.32:
1856 port["tag"] = new_port["port"]["name"]
1857
1858 return new_port, port
1859
1860 def _prepare_network_for_vminstance(
1861 self,
1862 name: str,
1863 net_list: list,
1864 created_items: dict,
1865 net_list_vim: list,
1866 external_network: list,
1867 no_secured_ports: list,
1868 ) -> None:
1869 """Create port and fill up net dictionary for new VM instance creation.
1870
1871 Args:
1872 name (str): Name of network
1873 net_list (list): List of networks
1874 created_items (dict): All created items belongs to a VM
1875 net_list_vim (list): List of ports
1876 external_network (list): List of external-networks
1877 no_secured_ports (list): Port security disabled ports
1878 """
1879
1880 self._reload_connection()
1881
1882 for net in net_list:
1883 # Skip non-connected iface
1884 if not net.get("net_id"):
1885 continue
1886
1887 new_port, port = self._create_port(net, name, created_items)
1888
1889 net_list_vim.append(port)
1890
1891 if net.get("floating_ip", False):
1892 net["exit_on_floating_ip_error"] = True
1893 external_network.append(net)
1894
1895 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
1896 net["exit_on_floating_ip_error"] = False
1897 external_network.append(net)
1898 net["floating_ip"] = self.config.get("use_floating_ip")
1899
1900 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1901 # is dropped. As a workaround we wait until the VM is active and then disable the port-security
1902 if net.get("port_security") is False and not self.config.get(
1903 "no_port_security_extension"
1904 ):
1905 no_secured_ports.append(
1906 (
1907 new_port["port"]["id"],
1908 net.get("port_security_disable_strategy"),
1909 )
1910 )
1911
1912 def _prepare_persistent_root_volumes(
1913 self,
1914 name: str,
1915 vm_av_zone: list,
1916 disk: dict,
1917 base_disk_index: int,
1918 block_device_mapping: dict,
1919 existing_vim_volumes: list,
1920 created_items: dict,
1921 ) -> Optional[str]:
1922 """Prepare persistent root volumes for new VM instance.
1923
1924 Args:
1925 name (str): Name of VM instance
1926 vm_av_zone (list): List of availability zones
1927 disk (dict): Disk details
1928 base_disk_index (int): Disk index
1929 block_device_mapping (dict): Block device details
1930 existing_vim_volumes (list): Existing disk details
1931 created_items (dict): All created items belongs to VM
1932
1933 Returns:
1934 boot_volume_id (str): ID of boot volume
1935
1936 """
1937 # Disk may include only vim_volume_id or only vim_id."
1938 # Use existing persistent root volume finding with volume_id or vim_id
1939 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
1940
1941 if disk.get(key_id):
1942 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
1943 existing_vim_volumes.append({"id": disk[key_id]})
1944
1945 else:
1946 # Create persistent root volume
1947 volume = self.cinder.volumes.create(
1948 size=disk["size"],
1949 name=name + "vd" + chr(base_disk_index),
1950 imageRef=disk["image_id"],
1951 # Make sure volume is in the same AZ as the VM to be attached to
1952 availability_zone=vm_av_zone,
1953 )
1954 boot_volume_id = volume.id
1955 created_items["volume:" + str(volume.id)] = True
1956 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
1957
1958 return boot_volume_id
1959
1960 def _prepare_non_root_persistent_volumes(
1961 self,
1962 name: str,
1963 disk: dict,
1964 vm_av_zone: list,
1965 block_device_mapping: dict,
1966 base_disk_index: int,
1967 existing_vim_volumes: list,
1968 created_items: dict,
1969 ) -> None:
1970 """Prepare persistent volumes for new VM instance.
1971
1972 Args:
1973 name (str): Name of VM instance
1974 disk (dict): Disk details
1975 vm_av_zone (list): List of availability zones
1976 block_device_mapping (dict): Block device details
1977 base_disk_index (int): Disk index
1978 existing_vim_volumes (list): Existing disk details
1979 created_items (dict): All created items belongs to VM
1980 """
1981 # Non-root persistent volumes
1982 # Disk may include only vim_volume_id or only vim_id."
1983 key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
1984
1985 if disk.get(key_id):
1986 # Use existing persistent volume
1987 block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
1988 existing_vim_volumes.append({"id": disk[key_id]})
1989
1990 else:
1991 # Create persistent volume
1992 volume = self.cinder.volumes.create(
1993 size=disk["size"],
1994 name=name + "vd" + chr(base_disk_index),
1995 # Make sure volume is in the same AZ as the VM to be attached to
1996 availability_zone=vm_av_zone,
1997 )
1998 created_items["volume:" + str(volume.id)] = True
1999 block_device_mapping["vd" + chr(base_disk_index)] = volume.id
2000
2001 def _wait_for_created_volumes_availability(
2002 self, elapsed_time: int, created_items: dict
2003 ) -> Optional[int]:
2004 """Wait till created volumes become available.
2005
2006 Args:
2007 elapsed_time (int): Passed time while waiting
2008 created_items (dict): All created items belongs to VM
2009
2010 Returns:
2011 elapsed_time (int): Time spent while waiting
2012
2013 """
2014
2015 while elapsed_time < volume_timeout:
2016 for created_item in created_items:
2017 v, _, volume_id = created_item.partition(":")
2018 if v == "volume":
2019 if self.cinder.volumes.get(volume_id).status != "available":
2020 break
2021 else:
2022 # All ready: break from while
2023 break
2024
2025 time.sleep(5)
2026 elapsed_time += 5
2027
2028 return elapsed_time
2029
2030 def _wait_for_existing_volumes_availability(
2031 self, elapsed_time: int, existing_vim_volumes: list
2032 ) -> Optional[int]:
2033 """Wait till existing volumes become available.
2034
2035 Args:
2036 elapsed_time (int): Passed time while waiting
2037 existing_vim_volumes (list): Existing volume details
2038
2039 Returns:
2040 elapsed_time (int): Time spent while waiting
2041
2042 """
2043
2044 while elapsed_time < volume_timeout:
2045 for volume in existing_vim_volumes:
2046 if self.cinder.volumes.get(volume["id"]).status != "available":
2047 break
2048 else: # all ready: break from while
2049 break
2050
2051 time.sleep(5)
2052 elapsed_time += 5
2053
2054 return elapsed_time
2055
2056 def _prepare_disk_for_vminstance(
2057 self,
2058 name: str,
2059 existing_vim_volumes: list,
2060 created_items: dict,
2061 vm_av_zone: list,
2062 disk_list: list = None,
2063 ) -> None:
2064 """Prepare all volumes for new VM instance.
2065
2066 Args:
2067 name (str): Name of Instance
2068 existing_vim_volumes (list): List of existing volumes
2069 created_items (dict): All created items belongs to VM
2070 vm_av_zone (list): VM availability zone
2071 disk_list (list): List of disks
2072
2073 """
2074 # Create additional volumes in case these are present in disk_list
2075 base_disk_index = ord("b")
2076 boot_volume_id = None
2077 elapsed_time = 0
2078
2079 block_device_mapping = {}
2080 for disk in disk_list:
2081 if "image_id" in disk:
2082 # Root persistent volume
2083 base_disk_index = ord("a")
2084 boot_volume_id = self._prepare_persistent_root_volumes(
2085 name=name,
2086 vm_av_zone=vm_av_zone,
2087 disk=disk,
2088 base_disk_index=base_disk_index,
2089 block_device_mapping=block_device_mapping,
2090 existing_vim_volumes=existing_vim_volumes,
2091 created_items=created_items,
2092 )
2093 else:
2094 # Non-root persistent volume
2095 self._prepare_non_root_persistent_volumes(
2096 name=name,
2097 disk=disk,
2098 vm_av_zone=vm_av_zone,
2099 block_device_mapping=block_device_mapping,
2100 base_disk_index=base_disk_index,
2101 existing_vim_volumes=existing_vim_volumes,
2102 created_items=created_items,
2103 )
2104 base_disk_index += 1
2105
2106 # Wait until created volumes are with status available
2107 elapsed_time = self._wait_for_created_volumes_availability(
2108 elapsed_time, created_items
2109 )
2110 # Wait until existing volumes in vim are with status available
2111 elapsed_time = self._wait_for_existing_volumes_availability(
2112 elapsed_time, existing_vim_volumes
2113 )
2114 # If we exceeded the timeout rollback
2115 if elapsed_time >= volume_timeout:
2116 raise vimconn.VimConnException(
2117 "Timeout creating volumes for instance " + name,
2118 http_code=vimconn.HTTP_Request_Timeout,
2119 )
2120 if boot_volume_id:
2121 self.cinder.volumes.set_bootable(boot_volume_id, True)
2122
2123 def _find_the_external_network_for_floating_ip(self):
2124 """Get the external network ip in order to create floating IP.
2125
2126 Returns:
2127 pool_id (str): External network pool ID
2128
2129 """
2130
2131 # Find the external network
2132 external_nets = list()
2133
2134 for net in self.neutron.list_networks()["networks"]:
2135 if net["router:external"]:
2136 external_nets.append(net)
2137
2138 if len(external_nets) == 0:
2139 raise vimconn.VimConnException(
2140 "Cannot create floating_ip automatically since "
2141 "no external network is present",
2142 http_code=vimconn.HTTP_Conflict,
2143 )
2144
2145 if len(external_nets) > 1:
2146 raise vimconn.VimConnException(
2147 "Cannot create floating_ip automatically since "
2148 "multiple external networks are present",
2149 http_code=vimconn.HTTP_Conflict,
2150 )
2151
2152 # Pool ID
2153 return external_nets[0].get("id")
2154
2155 def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
2156 """Trigger neutron to create a new floating IP using external network ID.
2157
2158 Args:
2159 param (dict): Input parameters to create a floating IP
2160 created_items (dict): All created items belongs to new VM instance
2161
2162 Raises:
2163
2164 VimConnException
2165 """
2166 try:
2167 self.logger.debug("Creating floating IP")
2168 new_floating_ip = self.neutron.create_floatingip(param)
2169 free_floating_ip = new_floating_ip["floatingip"]["id"]
2170 created_items["floating_ip:" + str(free_floating_ip)] = True
2171
2172 except Exception as e:
2173 raise vimconn.VimConnException(
2174 type(e).__name__ + ": Cannot create new floating_ip " + str(e),
2175 http_code=vimconn.HTTP_Conflict,
2176 )
2177
2178 def _create_floating_ip(
2179 self, floating_network: dict, server: object, created_items: dict
2180 ) -> None:
2181 """Get the available Pool ID and create a new floating IP.
2182
2183 Args:
2184 floating_network (dict): Dict including external network ID
2185 server (object): Server object
2186 created_items (dict): All created items belongs to new VM instance
2187
2188 """
2189
2190 # Pool_id is available
2191 if (
2192 isinstance(floating_network["floating_ip"], str)
2193 and floating_network["floating_ip"].lower() != "true"
2194 ):
2195 pool_id = floating_network["floating_ip"]
2196
2197 # Find the Pool_id
2198 else:
2199 pool_id = self._find_the_external_network_for_floating_ip()
2200
2201 param = {
2202 "floatingip": {
2203 "floating_network_id": pool_id,
2204 "tenant_id": server.tenant_id,
2205 }
2206 }
2207
2208 self._neutron_create_float_ip(param, created_items)
2209
2210 def _find_floating_ip(
2211 self,
2212 server: object,
2213 floating_ips: list,
2214 floating_network: dict,
2215 ) -> Optional[str]:
2216 """Find the available free floating IPs if there are.
2217
2218 Args:
2219 server (object): Server object
2220 floating_ips (list): List of floating IPs
2221 floating_network (dict): Details of floating network such as ID
2222
2223 Returns:
2224 free_floating_ip (str): Free floating ip address
2225
2226 """
2227 for fip in floating_ips:
2228 if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
2229 continue
2230
2231 if isinstance(floating_network["floating_ip"], str):
2232 if fip.get("floating_network_id") != floating_network["floating_ip"]:
2233 continue
2234
2235 return fip["id"]
2236
2237 def _assign_floating_ip(
2238 self, free_floating_ip: str, floating_network: dict
2239 ) -> Dict:
2240 """Assign the free floating ip address to port.
2241
2242 Args:
2243 free_floating_ip (str): Floating IP to be assigned
2244 floating_network (dict): ID of floating network
2245
2246 Returns:
2247 fip (dict) (dict): Floating ip details
2248
2249 """
2250 # The vim_id key contains the neutron.port_id
2251 self.neutron.update_floatingip(
2252 free_floating_ip,
2253 {"floatingip": {"port_id": floating_network["vim_id"]}},
2254 )
2255 # For race condition ensure not re-assigned to other VM after 5 seconds
2256 time.sleep(5)
2257
2258 return self.neutron.show_floatingip(free_floating_ip)
2259
2260 def _get_free_floating_ip(
2261 self, server: object, floating_network: dict, created_items: dict
2262 ) -> Optional[str]:
2263 """Get the free floating IP address.
2264
2265 Args:
2266 server (object): Server Object
2267 floating_network (dict): Floating network details
2268 created_items (dict): All created items belongs to new VM instance
2269
2270 Returns:
2271 free_floating_ip (str): Free floating ip addr
2272
2273 """
2274
2275 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
2276
2277 # Randomize
2278 random.shuffle(floating_ips)
2279
2280 return self._find_floating_ip(
2281 server, floating_ips, floating_network, created_items
2282 )
2283
2284 def _prepare_external_network_for_vminstance(
2285 self,
2286 external_network: list,
2287 server: object,
2288 created_items: dict,
2289 vm_start_time: float,
2290 ) -> None:
2291 """Assign floating IP address for VM instance.
2292
2293 Args:
2294 external_network (list): ID of External network
2295 server (object): Server Object
2296 created_items (dict): All created items belongs to new VM instance
2297 vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
2298
2299 Raises:
2300 VimConnException
2301
2302 """
2303 for floating_network in external_network:
2304 try:
2305 assigned = False
2306 floating_ip_retries = 3
2307 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2308 # several times
2309 while not assigned:
2310 free_floating_ip = self._get_free_floating_ip(
2311 server, floating_network, created_items
2312 )
2313
2314 if not free_floating_ip:
2315 self._create_floating_ip(
2316 floating_network, server, created_items
2317 )
2318
2319 try:
2320 # For race condition ensure not already assigned
2321 fip = self.neutron.show_floatingip(free_floating_ip)
2322
2323 if fip["floatingip"].get("port_id"):
2324 continue
2325
2326 # Assign floating ip
2327 fip = self._assign_floating_ip(
2328 free_floating_ip, floating_network
2329 )
2330
2331 if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
2332 self.logger.warning(
2333 "floating_ip {} re-assigned to other port".format(
2334 free_floating_ip
2335 )
2336 )
2337 continue
2338
2339 self.logger.debug(
2340 "Assigned floating_ip {} to VM {}".format(
2341 free_floating_ip, server.id
2342 )
2343 )
2344
2345 assigned = True
2346
2347 except Exception as e:
2348 # Openstack need some time after VM creation to assign an IP. So retry if fails
2349 vm_status = self.nova.servers.get(server.id).status
2350
2351 if vm_status not in ("ACTIVE", "ERROR"):
2352 if time.time() - vm_start_time < server_timeout:
2353 time.sleep(5)
2354 continue
2355 elif floating_ip_retries > 0:
2356 floating_ip_retries -= 1
2357 continue
2358
2359 raise vimconn.VimConnException(
2360 "Cannot create floating_ip: {} {}".format(
2361 type(e).__name__, e
2362 ),
2363 http_code=vimconn.HTTP_Conflict,
2364 )
2365
2366 except Exception as e:
2367 if not floating_network["exit_on_floating_ip_error"]:
2368 self.logger.error("Cannot create floating_ip. %s", str(e))
2369 continue
2370
2371 raise
2372
2373 def _update_port_security_for_vminstance(
2374 self,
2375 no_secured_ports: list,
2376 server: object,
2377 ) -> None:
2378 """Updates the port security according to no_secured_ports list.
2379
2380 Args:
2381 no_secured_ports (list): List of ports that security will be disabled
2382 server (object): Server Object
2383
2384 Raises:
2385 VimConnException
2386
2387 """
2388 # Wait until the VM is active and then disable the port-security
2389 if no_secured_ports:
2390 self.__wait_for_vm(server.id, "ACTIVE")
2391
2392 for port in no_secured_ports:
2393 port_update = {
2394 "port": {"port_security_enabled": False, "security_groups": None}
2395 }
2396
2397 if port[1] == "allow-address-pairs":
2398 port_update = {
2399 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2400 }
2401
2402 try:
2403 self.neutron.update_port(port[0], port_update)
2404
2405 except Exception:
2406 raise vimconn.VimConnException(
2407 "It was not possible to disable port security for port {}".format(
2408 port[0]
2409 )
2410 )
2411
2412 def new_vminstance(
2413 self,
2414 name: str,
2415 description: str,
2416 start: bool,
2417 image_id: str,
2418 flavor_id: str,
2419 affinity_group_list: list,
2420 net_list: list,
2421 cloud_config=None,
2422 disk_list=None,
2423 availability_zone_index=None,
2424 availability_zone_list=None,
2425 ) -> tuple:
2426 """Adds a VM instance to VIM.
2427
2428 Args:
2429 name (str): name of VM
2430 description (str): description
2431 start (bool): indicates if VM must start or boot in pause mode. Ignored
2432 image_id (str) image uuid
2433 flavor_id (str) flavor uuid
2434 affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
2435 net_list (list): list of interfaces, each one is a dictionary with:
2436 name: name of network
2437 net_id: network uuid to connect
2438 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
2439 model: interface model, ignored #TODO
2440 mac_address: used for SR-IOV ifaces #TODO for other types
2441 use: 'data', 'bridge', 'mgmt'
2442 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
2443 vim_id: filled/added by this function
2444 floating_ip: True/False (or it can be None)
2445 port_security: True/False
2446 cloud_config (dict): (optional) dictionary with:
2447 key-pairs: (optional) list of strings with the public key to be inserted to the default user
2448 users: (optional) list of users to be inserted, each item is a dict with:
2449 name: (mandatory) user name,
2450 key-pairs: (optional) list of strings with the public key to be inserted to the user
2451 user-data: (optional) string is a text script to be passed directly to cloud-init
2452 config-files: (optional). List of files to be transferred. Each item is a dict with:
2453 dest: (mandatory) string with the destination absolute path
2454 encoding: (optional, by default text). Can be one of:
2455 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
2456 content : (mandatory) string with the content of the file
2457 permissions: (optional) string with file permissions, typically octal notation '0644'
2458 owner: (optional) file owner, string with the format 'owner:group'
2459 boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
2460 disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
2461 image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
2462 size: (mandatory) string with the size of the disk in GB
2463 vim_id: (optional) should use this existing volume id
2464 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
2465 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
2466 availability_zone_index is None
2467 #TODO ip, security groups
2468
2469 Returns:
2470 A tuple with the instance identifier and created_items or raises an exception on error
2471 created_items can be None or a dictionary where this method can include key-values that will be passed to
2472 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
2473 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
2474 as not present.
2475 """
2476 self.logger.debug(
2477 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
2478 image_id,
2479 flavor_id,
2480 str(net_list),
2481 )
2482
2483 try:
2484 server = None
2485 created_items = {}
2486 net_list_vim = []
2487 # list of external networks to be connected to instance, later on used to create floating_ip
2488 external_network = []
2489 # List of ports with port-security disabled
2490 no_secured_ports = []
2491 block_device_mapping = None
2492 existing_vim_volumes = []
2493 server_group_id = None
2494 scheduller_hints = {}
2495
2496 # Check the Openstack Connection
2497 self._reload_connection()
2498
2499 # Prepare network list
2500 self._prepare_network_for_vminstance(
2501 name=name,
2502 net_list=net_list,
2503 created_items=created_items,
2504 net_list_vim=net_list_vim,
2505 external_network=external_network,
2506 no_secured_ports=no_secured_ports,
2507 )
2508
2509 # Cloud config
2510 config_drive, userdata = self._create_user_data(cloud_config)
2511
2512 # Get availability Zone
2513 vm_av_zone = self._get_vm_availability_zone(
2514 availability_zone_index, availability_zone_list
2515 )
2516
2517 if disk_list:
2518 # Prepare disks
2519 self._prepare_disk_for_vminstance(
2520 name=name,
2521 existing_vim_volumes=existing_vim_volumes,
2522 created_items=created_items,
2523 vm_av_zone=vm_av_zone,
2524 disk_list=disk_list,
2525 )
2526
2527 if affinity_group_list:
2528 # Only first id on the list will be used. Openstack restriction
2529 server_group_id = affinity_group_list[0]["affinity_group_id"]
2530 scheduller_hints["group"] = server_group_id
2531
2532 self.logger.debug(
2533 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
2534 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
2535 "block_device_mapping={}, server_group={})".format(
2536 name,
2537 image_id,
2538 flavor_id,
2539 net_list_vim,
2540 self.config.get("security_groups"),
2541 vm_av_zone,
2542 self.config.get("keypair"),
2543 userdata,
2544 config_drive,
2545 block_device_mapping,
2546 server_group_id,
2547 )
2548 )
2549
2550 # Create VM
2551 server = self.nova.servers.create(
2552 name=name,
2553 image=image_id,
2554 flavor=flavor_id,
2555 nics=net_list_vim,
2556 security_groups=self.config.get("security_groups"),
2557 # TODO remove security_groups in future versions. Already at neutron port
2558 availability_zone=vm_av_zone,
2559 key_name=self.config.get("keypair"),
2560 userdata=userdata,
2561 config_drive=config_drive,
2562 block_device_mapping=block_device_mapping,
2563 scheduler_hints=scheduller_hints,
2564 )
2565
2566 vm_start_time = time.time()
2567
2568 self._update_port_security_for_vminstance(no_secured_ports, server)
2569
2570 self._prepare_external_network_for_vminstance(
2571 external_network=external_network,
2572 server=server,
2573 created_items=created_items,
2574 vm_start_time=vm_start_time,
2575 )
2576
2577 return server.id, created_items
2578
2579 except Exception as e:
2580 server_id = None
2581 if server:
2582 server_id = server.id
2583
2584 try:
2585 self.delete_vminstance(server_id, created_items)
2586
2587 except Exception as e2:
2588 self.logger.error("new_vminstance rollback fail {}".format(e2))
2589
2590 self._format_exception(e)
2591
2592 def get_vminstance(self, vm_id):
2593 """Returns the VM instance information from VIM"""
2594 # self.logger.debug("Getting VM from VIM")
2595 try:
2596 self._reload_connection()
2597 server = self.nova.servers.find(id=vm_id)
2598 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2599
2600 return server.to_dict()
2601 except (
2602 ksExceptions.ClientException,
2603 nvExceptions.ClientException,
2604 nvExceptions.NotFound,
2605 ConnectionError,
2606 ) as e:
2607 self._format_exception(e)
2608
2609 def get_vminstance_console(self, vm_id, console_type="vnc"):
2610 """
2611 Get a console for the virtual machine
2612 Params:
2613 vm_id: uuid of the VM
2614 console_type, can be:
2615 "novnc" (by default), "xvpvnc" for VNC types,
2616 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2617 Returns dict with the console parameters:
2618 protocol: ssh, ftp, http, https, ...
2619 server: usually ip address
2620 port: the http, ssh, ... port
2621 suffix: extra text, e.g. the http path and query string
2622 """
2623 self.logger.debug("Getting VM CONSOLE from VIM")
2624
2625 try:
2626 self._reload_connection()
2627 server = self.nova.servers.find(id=vm_id)
2628
2629 if console_type is None or console_type == "novnc":
2630 console_dict = server.get_vnc_console("novnc")
2631 elif console_type == "xvpvnc":
2632 console_dict = server.get_vnc_console(console_type)
2633 elif console_type == "rdp-html5":
2634 console_dict = server.get_rdp_console(console_type)
2635 elif console_type == "spice-html5":
2636 console_dict = server.get_spice_console(console_type)
2637 else:
2638 raise vimconn.VimConnException(
2639 "console type '{}' not allowed".format(console_type),
2640 http_code=vimconn.HTTP_Bad_Request,
2641 )
2642
2643 console_dict1 = console_dict.get("console")
2644
2645 if console_dict1:
2646 console_url = console_dict1.get("url")
2647
2648 if console_url:
2649 # parse console_url
2650 protocol_index = console_url.find("//")
2651 suffix_index = (
2652 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2653 )
2654 port_index = (
2655 console_url[protocol_index + 2 : suffix_index].find(":")
2656 + protocol_index
2657 + 2
2658 )
2659
2660 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2661 return (
2662 -vimconn.HTTP_Internal_Server_Error,
2663 "Unexpected response from VIM",
2664 )
2665
2666 console_dict = {
2667 "protocol": console_url[0:protocol_index],
2668 "server": console_url[protocol_index + 2 : port_index],
2669 "port": console_url[port_index:suffix_index],
2670 "suffix": console_url[suffix_index + 1 :],
2671 }
2672 protocol_index += 2
2673
2674 return console_dict
2675 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2676 except (
2677 nvExceptions.NotFound,
2678 ksExceptions.ClientException,
2679 nvExceptions.ClientException,
2680 nvExceptions.BadRequest,
2681 ConnectionError,
2682 ) as e:
2683 self._format_exception(e)
2684
2685 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
2686 """Removes a VM instance from VIM. Returns the old identifier"""
2687 # print "osconnector: Getting VM from VIM"
2688 if created_items is None:
2689 created_items = {}
2690
2691 try:
2692 self._reload_connection()
2693 # delete VM ports attached to this networks before the virtual machine
2694 for k, v in created_items.items():
2695 if not v: # skip already deleted
2696 continue
2697
2698 try:
2699 k_item, _, k_id = k.partition(":")
2700 if k_item == "port":
2701 port_dict = self.neutron.list_ports()
2702 existing_ports = [
2703 port["id"] for port in port_dict["ports"] if port_dict
2704 ]
2705 if k_id in existing_ports:
2706 self.neutron.delete_port(k_id)
2707 except Exception as e:
2708 self.logger.error(
2709 "Error deleting port: {}: {}".format(type(e).__name__, e)
2710 )
2711
2712 # #commented because detaching the volumes makes the servers.delete not work properly ?!?
2713 # #dettach volumes attached
2714 # server = self.nova.servers.get(vm_id)
2715 # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
2716 # #for volume in volumes_attached_dict:
2717 # # self.cinder.volumes.detach(volume["id"])
2718
2719 if vm_id:
2720 self.nova.servers.delete(vm_id)
2721
2722 # delete volumes. Although having detached, they should have in active status before deleting
2723 # we ensure in this loop
2724 keep_waiting = True
2725 elapsed_time = 0
2726
2727 while keep_waiting and elapsed_time < volume_timeout:
2728 keep_waiting = False
2729
2730 for k, v in created_items.items():
2731 if not v: # skip already deleted
2732 continue
2733
2734 try:
2735 k_item, _, k_id = k.partition(":")
2736 if k_item == "volume":
2737 if self.cinder.volumes.get(k_id).status != "available":
2738 keep_waiting = True
2739 else:
2740 if k_id not in volumes_to_hold:
2741 self.cinder.volumes.delete(k_id)
2742 created_items[k] = None
2743 elif k_item == "floating_ip": # floating ip
2744 self.neutron.delete_floatingip(k_id)
2745 created_items[k] = None
2746
2747 except Exception as e:
2748 self.logger.error("Error deleting {}: {}".format(k, e))
2749
2750 if keep_waiting:
2751 time.sleep(1)
2752 elapsed_time += 1
2753
2754 return None
2755 except (
2756 nvExceptions.NotFound,
2757 ksExceptions.ClientException,
2758 nvExceptions.ClientException,
2759 ConnectionError,
2760 ) as e:
2761 self._format_exception(e)
2762
2763 def refresh_vms_status(self, vm_list):
2764 """Get the status of the virtual machines and their interfaces/ports
2765 Params: the list of VM identifiers
2766 Returns a dictionary with:
2767 vm_id: #VIM id of this Virtual Machine
2768 status: #Mandatory. Text with one of:
2769 # DELETED (not found at vim)
2770 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2771 # OTHER (Vim reported other status not understood)
2772 # ERROR (VIM indicates an ERROR status)
2773 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2774 # CREATING (on building process), ERROR
2775 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2776 #
2777 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2778 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2779 interfaces:
2780 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2781 mac_address: #Text format XX:XX:XX:XX:XX:XX
2782 vim_net_id: #network id where this interface is connected
2783 vim_interface_id: #interface/port VIM id
2784 ip_address: #null, or text with IPv4, IPv6 address
2785 compute_node: #identification of compute node where PF,VF interface is allocated
2786 pci: #PCI address of the NIC that hosts the PF,VF
2787 vlan: #physical VLAN used for VF
2788 """
2789 vm_dict = {}
2790 self.logger.debug(
2791 "refresh_vms status: Getting tenant VM instance information from VIM"
2792 )
2793
2794 for vm_id in vm_list:
2795 vm = {}
2796
2797 try:
2798 vm_vim = self.get_vminstance(vm_id)
2799
2800 if vm_vim["status"] in vmStatus2manoFormat:
2801 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
2802 else:
2803 vm["status"] = "OTHER"
2804 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
2805
2806 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
2807 vm_vim.pop("user_data", None)
2808 vm["vim_info"] = self.serialize(vm_vim)
2809
2810 vm["interfaces"] = []
2811 if vm_vim.get("fault"):
2812 vm["error_msg"] = str(vm_vim["fault"])
2813
2814 # get interfaces
2815 try:
2816 self._reload_connection()
2817 port_dict = self.neutron.list_ports(device_id=vm_id)
2818
2819 for port in port_dict["ports"]:
2820 interface = {}
2821 interface["vim_info"] = self.serialize(port)
2822 interface["mac_address"] = port.get("mac_address")
2823 interface["vim_net_id"] = port["network_id"]
2824 interface["vim_interface_id"] = port["id"]
2825 # check if OS-EXT-SRV-ATTR:host is there,
2826 # in case of non-admin credentials, it will be missing
2827
2828 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
2829 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
2830
2831 interface["pci"] = None
2832
2833 # check if binding:profile is there,
2834 # in case of non-admin credentials, it will be missing
2835 if port.get("binding:profile"):
2836 if port["binding:profile"].get("pci_slot"):
2837 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
2838 # the slot to 0x00
2839 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
2840 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
2841 pci = port["binding:profile"]["pci_slot"]
2842 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
2843 interface["pci"] = pci
2844
2845 interface["vlan"] = None
2846
2847 if port.get("binding:vif_details"):
2848 interface["vlan"] = port["binding:vif_details"].get("vlan")
2849
2850 # Get vlan from network in case not present in port for those old openstacks and cases where
2851 # it is needed vlan at PT
2852 if not interface["vlan"]:
2853 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
2854 network = self.neutron.show_network(port["network_id"])
2855
2856 if (
2857 network["network"].get("provider:network_type")
2858 == "vlan"
2859 ):
2860 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
2861 interface["vlan"] = network["network"].get(
2862 "provider:segmentation_id"
2863 )
2864
2865 ips = []
2866 # look for floating ip address
2867 try:
2868 floating_ip_dict = self.neutron.list_floatingips(
2869 port_id=port["id"]
2870 )
2871
2872 if floating_ip_dict.get("floatingips"):
2873 ips.append(
2874 floating_ip_dict["floatingips"][0].get(
2875 "floating_ip_address"
2876 )
2877 )
2878 except Exception:
2879 pass
2880
2881 for subnet in port["fixed_ips"]:
2882 ips.append(subnet["ip_address"])
2883
2884 interface["ip_address"] = ";".join(ips)
2885 vm["interfaces"].append(interface)
2886 except Exception as e:
2887 self.logger.error(
2888 "Error getting vm interface information {}: {}".format(
2889 type(e).__name__, e
2890 ),
2891 exc_info=True,
2892 )
2893 except vimconn.VimConnNotFoundException as e:
2894 self.logger.error("Exception getting vm status: %s", str(e))
2895 vm["status"] = "DELETED"
2896 vm["error_msg"] = str(e)
2897 except vimconn.VimConnException as e:
2898 self.logger.error("Exception getting vm status: %s", str(e))
2899 vm["status"] = "VIM_ERROR"
2900 vm["error_msg"] = str(e)
2901
2902 vm_dict[vm_id] = vm
2903
2904 return vm_dict
2905
2906 def action_vminstance(self, vm_id, action_dict, created_items={}):
2907 """Send and action over a VM instance from VIM
2908 Returns None or the console dict if the action was successfully sent to the VIM
2909 """
2910 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
2911
2912 try:
2913 self._reload_connection()
2914 server = self.nova.servers.find(id=vm_id)
2915
2916 if "start" in action_dict:
2917 if action_dict["start"] == "rebuild":
2918 server.rebuild()
2919 else:
2920 if server.status == "PAUSED":
2921 server.unpause()
2922 elif server.status == "SUSPENDED":
2923 server.resume()
2924 elif server.status == "SHUTOFF":
2925 server.start()
2926 else:
2927 self.logger.debug(
2928 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
2929 )
2930 raise vimconn.VimConnException(
2931 "Cannot 'start' instance while it is in active state",
2932 http_code=vimconn.HTTP_Bad_Request,
2933 )
2934
2935 elif "pause" in action_dict:
2936 server.pause()
2937 elif "resume" in action_dict:
2938 server.resume()
2939 elif "shutoff" in action_dict or "shutdown" in action_dict:
2940 self.logger.debug("server status %s", server.status)
2941 if server.status == "ACTIVE":
2942 server.stop()
2943 else:
2944 self.logger.debug("ERROR: VM is not in Active state")
2945 raise vimconn.VimConnException(
2946 "VM is not in active state, stop operation is not allowed",
2947 http_code=vimconn.HTTP_Bad_Request,
2948 )
2949 elif "forceOff" in action_dict:
2950 server.stop() # TODO
2951 elif "terminate" in action_dict:
2952 server.delete()
2953 elif "createImage" in action_dict:
2954 server.create_image()
2955 # "path":path_schema,
2956 # "description":description_schema,
2957 # "name":name_schema,
2958 # "metadata":metadata_schema,
2959 # "imageRef": id_schema,
2960 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
2961 elif "rebuild" in action_dict:
2962 server.rebuild(server.image["id"])
2963 elif "reboot" in action_dict:
2964 server.reboot() # reboot_type="SOFT"
2965 elif "console" in action_dict:
2966 console_type = action_dict["console"]
2967
2968 if console_type is None or console_type == "novnc":
2969 console_dict = server.get_vnc_console("novnc")
2970 elif console_type == "xvpvnc":
2971 console_dict = server.get_vnc_console(console_type)
2972 elif console_type == "rdp-html5":
2973 console_dict = server.get_rdp_console(console_type)
2974 elif console_type == "spice-html5":
2975 console_dict = server.get_spice_console(console_type)
2976 else:
2977 raise vimconn.VimConnException(
2978 "console type '{}' not allowed".format(console_type),
2979 http_code=vimconn.HTTP_Bad_Request,
2980 )
2981
2982 try:
2983 console_url = console_dict["console"]["url"]
2984 # parse console_url
2985 protocol_index = console_url.find("//")
2986 suffix_index = (
2987 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2988 )
2989 port_index = (
2990 console_url[protocol_index + 2 : suffix_index].find(":")
2991 + protocol_index
2992 + 2
2993 )
2994
2995 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2996 raise vimconn.VimConnException(
2997 "Unexpected response from VIM " + str(console_dict)
2998 )
2999
3000 console_dict2 = {
3001 "protocol": console_url[0:protocol_index],
3002 "server": console_url[protocol_index + 2 : port_index],
3003 "port": int(console_url[port_index + 1 : suffix_index]),
3004 "suffix": console_url[suffix_index + 1 :],
3005 }
3006
3007 return console_dict2
3008 except Exception:
3009 raise vimconn.VimConnException(
3010 "Unexpected response from VIM " + str(console_dict)
3011 )
3012
3013 return None
3014 except (
3015 ksExceptions.ClientException,
3016 nvExceptions.ClientException,
3017 nvExceptions.NotFound,
3018 ConnectionError,
3019 ) as e:
3020 self._format_exception(e)
3021 # TODO insert exception vimconn.HTTP_Unauthorized
3022
3023 # ###### VIO Specific Changes #########
3024 def _generate_vlanID(self):
3025 """
3026 Method to get unused vlanID
3027 Args:
3028 None
3029 Returns:
3030 vlanID
3031 """
3032 # Get used VLAN IDs
3033 usedVlanIDs = []
3034 networks = self.get_network_list()
3035
3036 for net in networks:
3037 if net.get("provider:segmentation_id"):
3038 usedVlanIDs.append(net.get("provider:segmentation_id"))
3039
3040 used_vlanIDs = set(usedVlanIDs)
3041
3042 # find unused VLAN ID
3043 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
3044 try:
3045 start_vlanid, end_vlanid = map(
3046 int, vlanID_range.replace(" ", "").split("-")
3047 )
3048
3049 for vlanID in range(start_vlanid, end_vlanid + 1):
3050 if vlanID not in used_vlanIDs:
3051 return vlanID
3052 except Exception as exp:
3053 raise vimconn.VimConnException(
3054 "Exception {} occurred while generating VLAN ID.".format(exp)
3055 )
3056 else:
3057 raise vimconn.VimConnConflictException(
3058 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
3059 self.config.get("dataplane_net_vlan_range")
3060 )
3061 )
3062
3063 def _generate_multisegment_vlanID(self):
3064 """
3065 Method to get unused vlanID
3066 Args:
3067 None
3068 Returns:
3069 vlanID
3070 """
3071 # Get used VLAN IDs
3072 usedVlanIDs = []
3073 networks = self.get_network_list()
3074 for net in networks:
3075 if net.get("provider:network_type") == "vlan" and net.get(
3076 "provider:segmentation_id"
3077 ):
3078 usedVlanIDs.append(net.get("provider:segmentation_id"))
3079 elif net.get("segments"):
3080 for segment in net.get("segments"):
3081 if segment.get("provider:network_type") == "vlan" and segment.get(
3082 "provider:segmentation_id"
3083 ):
3084 usedVlanIDs.append(segment.get("provider:segmentation_id"))
3085
3086 used_vlanIDs = set(usedVlanIDs)
3087
3088 # find unused VLAN ID
3089 for vlanID_range in self.config.get("multisegment_vlan_range"):
3090 try:
3091 start_vlanid, end_vlanid = map(
3092 int, vlanID_range.replace(" ", "").split("-")
3093 )
3094
3095 for vlanID in range(start_vlanid, end_vlanid + 1):
3096 if vlanID not in used_vlanIDs:
3097 return vlanID
3098 except Exception as exp:
3099 raise vimconn.VimConnException(
3100 "Exception {} occurred while generating VLAN ID.".format(exp)
3101 )
3102 else:
3103 raise vimconn.VimConnConflictException(
3104 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
3105 self.config.get("multisegment_vlan_range")
3106 )
3107 )
3108
3109 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
3110 """
3111 Method to validate user given vlanID ranges
3112 Args: None
3113 Returns: None
3114 """
3115 for vlanID_range in input_vlan_range:
3116 vlan_range = vlanID_range.replace(" ", "")
3117 # validate format
3118 vlanID_pattern = r"(\d)*-(\d)*$"
3119 match_obj = re.match(vlanID_pattern, vlan_range)
3120 if not match_obj:
3121 raise vimconn.VimConnConflictException(
3122 "Invalid VLAN range for {}: {}.You must provide "
3123 "'{}' in format [start_ID - end_ID].".format(
3124 text_vlan_range, vlanID_range, text_vlan_range
3125 )
3126 )
3127
3128 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
3129 if start_vlanid <= 0:
3130 raise vimconn.VimConnConflictException(
3131 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
3132 "networks valid IDs are 1 to 4094 ".format(
3133 text_vlan_range, vlanID_range
3134 )
3135 )
3136
3137 if end_vlanid > 4094:
3138 raise vimconn.VimConnConflictException(
3139 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
3140 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
3141 text_vlan_range, vlanID_range
3142 )
3143 )
3144
3145 if start_vlanid > end_vlanid:
3146 raise vimconn.VimConnConflictException(
3147 "Invalid VLAN range for {}: {}. You must provide '{}'"
3148 " in format start_ID - end_ID and start_ID < end_ID ".format(
3149 text_vlan_range, vlanID_range, text_vlan_range
3150 )
3151 )
3152
3153 # NOT USED FUNCTIONS
3154
3155 def new_external_port(self, port_data):
3156 """Adds a external port to VIM
3157 Returns the port identifier"""
3158 # TODO openstack if needed
3159 return (
3160 -vimconn.HTTP_Internal_Server_Error,
3161 "osconnector.new_external_port() not implemented",
3162 )
3163
3164 def connect_port_network(self, port_id, network_id, admin=False):
3165 """Connects a external port to a network
3166 Returns status code of the VIM response"""
3167 # TODO openstack if needed
3168 return (
3169 -vimconn.HTTP_Internal_Server_Error,
3170 "osconnector.connect_port_network() not implemented",
3171 )
3172
3173 def new_user(self, user_name, user_passwd, tenant_id=None):
3174 """Adds a new user to openstack VIM
3175 Returns the user identifier"""
3176 self.logger.debug("osconnector: Adding a new user to VIM")
3177
3178 try:
3179 self._reload_connection()
3180 user = self.keystone.users.create(
3181 user_name, password=user_passwd, default_project=tenant_id
3182 )
3183 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
3184
3185 return user.id
3186 except ksExceptions.ConnectionError as e:
3187 error_value = -vimconn.HTTP_Bad_Request
3188 error_text = (
3189 type(e).__name__
3190 + ": "
3191 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3192 )
3193 except ksExceptions.ClientException as e: # TODO remove
3194 error_value = -vimconn.HTTP_Bad_Request
3195 error_text = (
3196 type(e).__name__
3197 + ": "
3198 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3199 )
3200
3201 # TODO insert exception vimconn.HTTP_Unauthorized
3202 # if reaching here is because an exception
3203 self.logger.debug("new_user " + error_text)
3204
3205 return error_value, error_text
3206
3207 def delete_user(self, user_id):
3208 """Delete a user from openstack VIM
3209 Returns the user identifier"""
3210 if self.debug:
3211 print("osconnector: Deleting a user from VIM")
3212
3213 try:
3214 self._reload_connection()
3215 self.keystone.users.delete(user_id)
3216
3217 return 1, user_id
3218 except ksExceptions.ConnectionError as e:
3219 error_value = -vimconn.HTTP_Bad_Request
3220 error_text = (
3221 type(e).__name__
3222 + ": "
3223 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3224 )
3225 except ksExceptions.NotFound as e:
3226 error_value = -vimconn.HTTP_Not_Found
3227 error_text = (
3228 type(e).__name__
3229 + ": "
3230 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3231 )
3232 except ksExceptions.ClientException as e: # TODO remove
3233 error_value = -vimconn.HTTP_Bad_Request
3234 error_text = (
3235 type(e).__name__
3236 + ": "
3237 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3238 )
3239
3240 # TODO insert exception vimconn.HTTP_Unauthorized
3241 # if reaching here is because an exception
3242 self.logger.debug("delete_tenant " + error_text)
3243
3244 return error_value, error_text
3245
3246 def get_hosts_info(self):
3247 """Get the information of deployed hosts
3248 Returns the hosts content"""
3249 if self.debug:
3250 print("osconnector: Getting Host info from VIM")
3251
3252 try:
3253 h_list = []
3254 self._reload_connection()
3255 hypervisors = self.nova.hypervisors.list()
3256
3257 for hype in hypervisors:
3258 h_list.append(hype.to_dict())
3259
3260 return 1, {"hosts": h_list}
3261 except nvExceptions.NotFound as e:
3262 error_value = -vimconn.HTTP_Not_Found
3263 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3264 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3265 error_value = -vimconn.HTTP_Bad_Request
3266 error_text = (
3267 type(e).__name__
3268 + ": "
3269 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3270 )
3271
3272 # TODO insert exception vimconn.HTTP_Unauthorized
3273 # if reaching here is because an exception
3274 self.logger.debug("get_hosts_info " + error_text)
3275
3276 return error_value, error_text
3277
3278 def get_hosts(self, vim_tenant):
3279 """Get the hosts and deployed instances
3280 Returns the hosts content"""
3281 r, hype_dict = self.get_hosts_info()
3282
3283 if r < 0:
3284 return r, hype_dict
3285
3286 hypervisors = hype_dict["hosts"]
3287
3288 try:
3289 servers = self.nova.servers.list()
3290 for hype in hypervisors:
3291 for server in servers:
3292 if (
3293 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
3294 == hype["hypervisor_hostname"]
3295 ):
3296 if "vm" in hype:
3297 hype["vm"].append(server.id)
3298 else:
3299 hype["vm"] = [server.id]
3300
3301 return 1, hype_dict
3302 except nvExceptions.NotFound as e:
3303 error_value = -vimconn.HTTP_Not_Found
3304 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
3305 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
3306 error_value = -vimconn.HTTP_Bad_Request
3307 error_text = (
3308 type(e).__name__
3309 + ": "
3310 + (str(e) if len(e.args) == 0 else str(e.args[0]))
3311 )
3312
3313 # TODO insert exception vimconn.HTTP_Unauthorized
3314 # if reaching here is because an exception
3315 self.logger.debug("get_hosts " + error_text)
3316
3317 return error_value, error_text
3318
3319 def new_classification(self, name, ctype, definition):
3320 self.logger.debug(
3321 "Adding a new (Traffic) Classification to VIM, named %s", name
3322 )
3323
3324 try:
3325 new_class = None
3326 self._reload_connection()
3327
3328 if ctype not in supportedClassificationTypes:
3329 raise vimconn.VimConnNotSupportedException(
3330 "OpenStack VIM connector does not support provided "
3331 "Classification Type {}, supported ones are: {}".format(
3332 ctype, supportedClassificationTypes
3333 )
3334 )
3335
3336 if not self._validate_classification(ctype, definition):
3337 raise vimconn.VimConnException(
3338 "Incorrect Classification definition for the type specified."
3339 )
3340
3341 classification_dict = definition
3342 classification_dict["name"] = name
3343 new_class = self.neutron.create_sfc_flow_classifier(
3344 {"flow_classifier": classification_dict}
3345 )
3346
3347 return new_class["flow_classifier"]["id"]
3348 except (
3349 neExceptions.ConnectionFailed,
3350 ksExceptions.ClientException,
3351 neExceptions.NeutronException,
3352 ConnectionError,
3353 ) as e:
3354 self.logger.error("Creation of Classification failed.")
3355 self._format_exception(e)
3356
3357 def get_classification(self, class_id):
3358 self.logger.debug(" Getting Classification %s from VIM", class_id)
3359 filter_dict = {"id": class_id}
3360 class_list = self.get_classification_list(filter_dict)
3361
3362 if len(class_list) == 0:
3363 raise vimconn.VimConnNotFoundException(
3364 "Classification '{}' not found".format(class_id)
3365 )
3366 elif len(class_list) > 1:
3367 raise vimconn.VimConnConflictException(
3368 "Found more than one Classification with this criteria"
3369 )
3370
3371 classification = class_list[0]
3372
3373 return classification
3374
3375 def get_classification_list(self, filter_dict={}):
3376 self.logger.debug(
3377 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
3378 )
3379
3380 try:
3381 filter_dict_os = filter_dict.copy()
3382 self._reload_connection()
3383
3384 if self.api_version3 and "tenant_id" in filter_dict_os:
3385 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3386
3387 classification_dict = self.neutron.list_sfc_flow_classifiers(
3388 **filter_dict_os
3389 )
3390 classification_list = classification_dict["flow_classifiers"]
3391 self.__classification_os2mano(classification_list)
3392
3393 return classification_list
3394 except (
3395 neExceptions.ConnectionFailed,
3396 ksExceptions.ClientException,
3397 neExceptions.NeutronException,
3398 ConnectionError,
3399 ) as e:
3400 self._format_exception(e)
3401
3402 def delete_classification(self, class_id):
3403 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3404
3405 try:
3406 self._reload_connection()
3407 self.neutron.delete_sfc_flow_classifier(class_id)
3408
3409 return class_id
3410 except (
3411 neExceptions.ConnectionFailed,
3412 neExceptions.NeutronException,
3413 ksExceptions.ClientException,
3414 neExceptions.NeutronException,
3415 ConnectionError,
3416 ) as e:
3417 self._format_exception(e)
3418
3419 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3420 self.logger.debug(
3421 "Adding a new Service Function Instance to VIM, named '%s'", name
3422 )
3423
3424 try:
3425 new_sfi = None
3426 self._reload_connection()
3427 correlation = None
3428
3429 if sfc_encap:
3430 correlation = "nsh"
3431
3432 if len(ingress_ports) != 1:
3433 raise vimconn.VimConnNotSupportedException(
3434 "OpenStack VIM connector can only have 1 ingress port per SFI"
3435 )
3436
3437 if len(egress_ports) != 1:
3438 raise vimconn.VimConnNotSupportedException(
3439 "OpenStack VIM connector can only have 1 egress port per SFI"
3440 )
3441
3442 sfi_dict = {
3443 "name": name,
3444 "ingress": ingress_ports[0],
3445 "egress": egress_ports[0],
3446 "service_function_parameters": {"correlation": correlation},
3447 }
3448 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3449
3450 return new_sfi["port_pair"]["id"]
3451 except (
3452 neExceptions.ConnectionFailed,
3453 ksExceptions.ClientException,
3454 neExceptions.NeutronException,
3455 ConnectionError,
3456 ) as e:
3457 if new_sfi:
3458 try:
3459 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3460 except Exception:
3461 self.logger.error(
3462 "Creation of Service Function Instance failed, with "
3463 "subsequent deletion failure as well."
3464 )
3465
3466 self._format_exception(e)
3467
3468 def get_sfi(self, sfi_id):
3469 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3470 filter_dict = {"id": sfi_id}
3471 sfi_list = self.get_sfi_list(filter_dict)
3472
3473 if len(sfi_list) == 0:
3474 raise vimconn.VimConnNotFoundException(
3475 "Service Function Instance '{}' not found".format(sfi_id)
3476 )
3477 elif len(sfi_list) > 1:
3478 raise vimconn.VimConnConflictException(
3479 "Found more than one Service Function Instance with this criteria"
3480 )
3481
3482 sfi = sfi_list[0]
3483
3484 return sfi
3485
3486 def get_sfi_list(self, filter_dict={}):
3487 self.logger.debug(
3488 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3489 )
3490
3491 try:
3492 self._reload_connection()
3493 filter_dict_os = filter_dict.copy()
3494
3495 if self.api_version3 and "tenant_id" in filter_dict_os:
3496 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3497
3498 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3499 sfi_list = sfi_dict["port_pairs"]
3500 self.__sfi_os2mano(sfi_list)
3501
3502 return sfi_list
3503 except (
3504 neExceptions.ConnectionFailed,
3505 ksExceptions.ClientException,
3506 neExceptions.NeutronException,
3507 ConnectionError,
3508 ) as e:
3509 self._format_exception(e)
3510
3511 def delete_sfi(self, sfi_id):
3512 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3513
3514 try:
3515 self._reload_connection()
3516 self.neutron.delete_sfc_port_pair(sfi_id)
3517
3518 return sfi_id
3519 except (
3520 neExceptions.ConnectionFailed,
3521 neExceptions.NeutronException,
3522 ksExceptions.ClientException,
3523 neExceptions.NeutronException,
3524 ConnectionError,
3525 ) as e:
3526 self._format_exception(e)
3527
3528 def new_sf(self, name, sfis, sfc_encap=True):
3529 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3530
3531 try:
3532 new_sf = None
3533 self._reload_connection()
3534 # correlation = None
3535 # if sfc_encap:
3536 # correlation = "nsh"
3537
3538 for instance in sfis:
3539 sfi = self.get_sfi(instance)
3540
3541 if sfi.get("sfc_encap") != sfc_encap:
3542 raise vimconn.VimConnNotSupportedException(
3543 "OpenStack VIM connector requires all SFIs of the "
3544 "same SF to share the same SFC Encapsulation"
3545 )
3546
3547 sf_dict = {"name": name, "port_pairs": sfis}
3548 new_sf = self.neutron.create_sfc_port_pair_group(
3549 {"port_pair_group": sf_dict}
3550 )
3551
3552 return new_sf["port_pair_group"]["id"]
3553 except (
3554 neExceptions.ConnectionFailed,
3555 ksExceptions.ClientException,
3556 neExceptions.NeutronException,
3557 ConnectionError,
3558 ) as e:
3559 if new_sf:
3560 try:
3561 self.neutron.delete_sfc_port_pair_group(
3562 new_sf["port_pair_group"]["id"]
3563 )
3564 except Exception:
3565 self.logger.error(
3566 "Creation of Service Function failed, with "
3567 "subsequent deletion failure as well."
3568 )
3569
3570 self._format_exception(e)
3571
3572 def get_sf(self, sf_id):
3573 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3574 filter_dict = {"id": sf_id}
3575 sf_list = self.get_sf_list(filter_dict)
3576
3577 if len(sf_list) == 0:
3578 raise vimconn.VimConnNotFoundException(
3579 "Service Function '{}' not found".format(sf_id)
3580 )
3581 elif len(sf_list) > 1:
3582 raise vimconn.VimConnConflictException(
3583 "Found more than one Service Function with this criteria"
3584 )
3585
3586 sf = sf_list[0]
3587
3588 return sf
3589
3590 def get_sf_list(self, filter_dict={}):
3591 self.logger.debug(
3592 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3593 )
3594
3595 try:
3596 self._reload_connection()
3597 filter_dict_os = filter_dict.copy()
3598
3599 if self.api_version3 and "tenant_id" in filter_dict_os:
3600 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3601
3602 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3603 sf_list = sf_dict["port_pair_groups"]
3604 self.__sf_os2mano(sf_list)
3605
3606 return sf_list
3607 except (
3608 neExceptions.ConnectionFailed,
3609 ksExceptions.ClientException,
3610 neExceptions.NeutronException,
3611 ConnectionError,
3612 ) as e:
3613 self._format_exception(e)
3614
3615 def delete_sf(self, sf_id):
3616 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3617
3618 try:
3619 self._reload_connection()
3620 self.neutron.delete_sfc_port_pair_group(sf_id)
3621
3622 return sf_id
3623 except (
3624 neExceptions.ConnectionFailed,
3625 neExceptions.NeutronException,
3626 ksExceptions.ClientException,
3627 neExceptions.NeutronException,
3628 ConnectionError,
3629 ) as e:
3630 self._format_exception(e)
3631
3632 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3633 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3634
3635 try:
3636 new_sfp = None
3637 self._reload_connection()
3638 # In networking-sfc the MPLS encapsulation is legacy
3639 # should be used when no full SFC Encapsulation is intended
3640 correlation = "mpls"
3641
3642 if sfc_encap:
3643 correlation = "nsh"
3644
3645 sfp_dict = {
3646 "name": name,
3647 "flow_classifiers": classifications,
3648 "port_pair_groups": sfs,
3649 "chain_parameters": {"correlation": correlation},
3650 }
3651
3652 if spi:
3653 sfp_dict["chain_id"] = spi
3654
3655 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3656
3657 return new_sfp["port_chain"]["id"]
3658 except (
3659 neExceptions.ConnectionFailed,
3660 ksExceptions.ClientException,
3661 neExceptions.NeutronException,
3662 ConnectionError,
3663 ) as e:
3664 if new_sfp:
3665 try:
3666 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3667 except Exception:
3668 self.logger.error(
3669 "Creation of Service Function Path failed, with "
3670 "subsequent deletion failure as well."
3671 )
3672
3673 self._format_exception(e)
3674
3675 def get_sfp(self, sfp_id):
3676 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3677
3678 filter_dict = {"id": sfp_id}
3679 sfp_list = self.get_sfp_list(filter_dict)
3680
3681 if len(sfp_list) == 0:
3682 raise vimconn.VimConnNotFoundException(
3683 "Service Function Path '{}' not found".format(sfp_id)
3684 )
3685 elif len(sfp_list) > 1:
3686 raise vimconn.VimConnConflictException(
3687 "Found more than one Service Function Path with this criteria"
3688 )
3689
3690 sfp = sfp_list[0]
3691
3692 return sfp
3693
3694 def get_sfp_list(self, filter_dict={}):
3695 self.logger.debug(
3696 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3697 )
3698
3699 try:
3700 self._reload_connection()
3701 filter_dict_os = filter_dict.copy()
3702
3703 if self.api_version3 and "tenant_id" in filter_dict_os:
3704 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3705
3706 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3707 sfp_list = sfp_dict["port_chains"]
3708 self.__sfp_os2mano(sfp_list)
3709
3710 return sfp_list
3711 except (
3712 neExceptions.ConnectionFailed,
3713 ksExceptions.ClientException,
3714 neExceptions.NeutronException,
3715 ConnectionError,
3716 ) as e:
3717 self._format_exception(e)
3718
3719 def delete_sfp(self, sfp_id):
3720 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3721
3722 try:
3723 self._reload_connection()
3724 self.neutron.delete_sfc_port_chain(sfp_id)
3725
3726 return sfp_id
3727 except (
3728 neExceptions.ConnectionFailed,
3729 neExceptions.NeutronException,
3730 ksExceptions.ClientException,
3731 neExceptions.NeutronException,
3732 ConnectionError,
3733 ) as e:
3734 self._format_exception(e)
3735
3736 def refresh_sfps_status(self, sfp_list):
3737 """Get the status of the service function path
3738 Params: the list of sfp identifiers
3739 Returns a dictionary with:
3740 vm_id: #VIM id of this service function path
3741 status: #Mandatory. Text with one of:
3742 # DELETED (not found at vim)
3743 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3744 # OTHER (Vim reported other status not understood)
3745 # ERROR (VIM indicates an ERROR status)
3746 # ACTIVE,
3747 # CREATING (on building process)
3748 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3749 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3750 """
3751 sfp_dict = {}
3752 self.logger.debug(
3753 "refresh_sfps status: Getting tenant SFP information from VIM"
3754 )
3755
3756 for sfp_id in sfp_list:
3757 sfp = {}
3758
3759 try:
3760 sfp_vim = self.get_sfp(sfp_id)
3761
3762 if sfp_vim["spi"]:
3763 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3764 else:
3765 sfp["status"] = "OTHER"
3766 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3767
3768 sfp["vim_info"] = self.serialize(sfp_vim)
3769
3770 if sfp_vim.get("fault"):
3771 sfp["error_msg"] = str(sfp_vim["fault"])
3772 except vimconn.VimConnNotFoundException as e:
3773 self.logger.error("Exception getting sfp status: %s", str(e))
3774 sfp["status"] = "DELETED"
3775 sfp["error_msg"] = str(e)
3776 except vimconn.VimConnException as e:
3777 self.logger.error("Exception getting sfp status: %s", str(e))
3778 sfp["status"] = "VIM_ERROR"
3779 sfp["error_msg"] = str(e)
3780
3781 sfp_dict[sfp_id] = sfp
3782
3783 return sfp_dict
3784
3785 def refresh_sfis_status(self, sfi_list):
3786 """Get the status of the service function instances
3787 Params: the list of sfi identifiers
3788 Returns a dictionary with:
3789 vm_id: #VIM id of this service function instance
3790 status: #Mandatory. Text with one of:
3791 # DELETED (not found at vim)
3792 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3793 # OTHER (Vim reported other status not understood)
3794 # ERROR (VIM indicates an ERROR status)
3795 # ACTIVE,
3796 # CREATING (on building process)
3797 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3798 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3799 """
3800 sfi_dict = {}
3801 self.logger.debug(
3802 "refresh_sfis status: Getting tenant sfi information from VIM"
3803 )
3804
3805 for sfi_id in sfi_list:
3806 sfi = {}
3807
3808 try:
3809 sfi_vim = self.get_sfi(sfi_id)
3810
3811 if sfi_vim:
3812 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
3813 else:
3814 sfi["status"] = "OTHER"
3815 sfi["error_msg"] = "VIM status reported " + sfi["status"]
3816
3817 sfi["vim_info"] = self.serialize(sfi_vim)
3818
3819 if sfi_vim.get("fault"):
3820 sfi["error_msg"] = str(sfi_vim["fault"])
3821 except vimconn.VimConnNotFoundException as e:
3822 self.logger.error("Exception getting sfi status: %s", str(e))
3823 sfi["status"] = "DELETED"
3824 sfi["error_msg"] = str(e)
3825 except vimconn.VimConnException as e:
3826 self.logger.error("Exception getting sfi status: %s", str(e))
3827 sfi["status"] = "VIM_ERROR"
3828 sfi["error_msg"] = str(e)
3829
3830 sfi_dict[sfi_id] = sfi
3831
3832 return sfi_dict
3833
3834 def refresh_sfs_status(self, sf_list):
3835 """Get the status of the service functions
3836 Params: the list of sf identifiers
3837 Returns a dictionary with:
3838 vm_id: #VIM id of this service function
3839 status: #Mandatory. Text with one of:
3840 # DELETED (not found at vim)
3841 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3842 # OTHER (Vim reported other status not understood)
3843 # ERROR (VIM indicates an ERROR status)
3844 # ACTIVE,
3845 # CREATING (on building process)
3846 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3847 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3848 """
3849 sf_dict = {}
3850 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
3851
3852 for sf_id in sf_list:
3853 sf = {}
3854
3855 try:
3856 sf_vim = self.get_sf(sf_id)
3857
3858 if sf_vim:
3859 sf["status"] = vmStatus2manoFormat["ACTIVE"]
3860 else:
3861 sf["status"] = "OTHER"
3862 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
3863
3864 sf["vim_info"] = self.serialize(sf_vim)
3865
3866 if sf_vim.get("fault"):
3867 sf["error_msg"] = str(sf_vim["fault"])
3868 except vimconn.VimConnNotFoundException as e:
3869 self.logger.error("Exception getting sf status: %s", str(e))
3870 sf["status"] = "DELETED"
3871 sf["error_msg"] = str(e)
3872 except vimconn.VimConnException as e:
3873 self.logger.error("Exception getting sf status: %s", str(e))
3874 sf["status"] = "VIM_ERROR"
3875 sf["error_msg"] = str(e)
3876
3877 sf_dict[sf_id] = sf
3878
3879 return sf_dict
3880
3881 def refresh_classifications_status(self, classification_list):
3882 """Get the status of the classifications
3883 Params: the list of classification identifiers
3884 Returns a dictionary with:
3885 vm_id: #VIM id of this classifier
3886 status: #Mandatory. Text with one of:
3887 # DELETED (not found at vim)
3888 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3889 # OTHER (Vim reported other status not understood)
3890 # ERROR (VIM indicates an ERROR status)
3891 # ACTIVE,
3892 # CREATING (on building process)
3893 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3894 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3895 """
3896 classification_dict = {}
3897 self.logger.debug(
3898 "refresh_classifications status: Getting tenant classification information from VIM"
3899 )
3900
3901 for classification_id in classification_list:
3902 classification = {}
3903
3904 try:
3905 classification_vim = self.get_classification(classification_id)
3906
3907 if classification_vim:
3908 classification["status"] = vmStatus2manoFormat["ACTIVE"]
3909 else:
3910 classification["status"] = "OTHER"
3911 classification["error_msg"] = (
3912 "VIM status reported " + classification["status"]
3913 )
3914
3915 classification["vim_info"] = self.serialize(classification_vim)
3916
3917 if classification_vim.get("fault"):
3918 classification["error_msg"] = str(classification_vim["fault"])
3919 except vimconn.VimConnNotFoundException as e:
3920 self.logger.error("Exception getting classification status: %s", str(e))
3921 classification["status"] = "DELETED"
3922 classification["error_msg"] = str(e)
3923 except vimconn.VimConnException as e:
3924 self.logger.error("Exception getting classification status: %s", str(e))
3925 classification["status"] = "VIM_ERROR"
3926 classification["error_msg"] = str(e)
3927
3928 classification_dict[classification_id] = classification
3929
3930 return classification_dict
3931
3932 def new_affinity_group(self, affinity_group_data):
3933 """Adds a server group to VIM
3934 affinity_group_data contains a dictionary with information, keys:
3935 name: name in VIM for the server group
3936 type: affinity or anti-affinity
3937 scope: Only nfvi-node allowed
3938 Returns the server group identifier"""
3939 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3940
3941 try:
3942 name = affinity_group_data["name"]
3943 policy = affinity_group_data["type"]
3944
3945 self._reload_connection()
3946 new_server_group = self.nova.server_groups.create(name, policy)
3947
3948 return new_server_group.id
3949 except (
3950 ksExceptions.ClientException,
3951 nvExceptions.ClientException,
3952 ConnectionError,
3953 KeyError,
3954 ) as e:
3955 self._format_exception(e)
3956
3957 def get_affinity_group(self, affinity_group_id):
3958 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3959 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3960 try:
3961 self._reload_connection()
3962 server_group = self.nova.server_groups.find(id=affinity_group_id)
3963
3964 return server_group.to_dict()
3965 except (
3966 nvExceptions.NotFound,
3967 nvExceptions.ClientException,
3968 ksExceptions.ClientException,
3969 ConnectionError,
3970 ) as e:
3971 self._format_exception(e)
3972
3973 def delete_affinity_group(self, affinity_group_id):
3974 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3975 self.logger.debug("Getting server group '%s'", affinity_group_id)
3976 try:
3977 self._reload_connection()
3978 self.nova.server_groups.delete(affinity_group_id)
3979
3980 return affinity_group_id
3981 except (
3982 nvExceptions.NotFound,
3983 ksExceptions.ClientException,
3984 nvExceptions.ClientException,
3985 ConnectionError,
3986 ) as e:
3987 self._format_exception(e)
3988
3989 def get_vdu_state(self, vm_id):
3990 """
3991 Getting the state of a vdu
3992 param:
3993 vm_id: ID of an instance
3994 """
3995 self.logger.debug("Getting the status of VM")
3996 self.logger.debug("VIM VM ID %s", vm_id)
3997 self._reload_connection()
3998 server = self.nova.servers.find(id=vm_id)
3999 server_dict = server.to_dict()
4000 vdu_data = [
4001 server_dict["status"],
4002 server_dict["flavor"]["id"],
4003 server_dict["OS-EXT-SRV-ATTR:host"],
4004 server_dict["OS-EXT-AZ:availability_zone"],
4005 ]
4006 self.logger.debug("vdu_data %s", vdu_data)
4007 return vdu_data
4008
4009 def check_compute_availability(self, host, server_flavor_details):
4010 self._reload_connection()
4011 hypervisor_search = self.nova.hypervisors.search(
4012 hypervisor_match=host, servers=True
4013 )
4014 for hypervisor in hypervisor_search:
4015 hypervisor_id = hypervisor.to_dict()["id"]
4016 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
4017 hypervisor_dict = hypervisor_details.to_dict()
4018 hypervisor_temp = json.dumps(hypervisor_dict)
4019 hypervisor_json = json.loads(hypervisor_temp)
4020 resources_available = [
4021 hypervisor_json["free_ram_mb"],
4022 hypervisor_json["disk_available_least"],
4023 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
4024 ]
4025 compute_available = all(
4026 x > y for x, y in zip(resources_available, server_flavor_details)
4027 )
4028 if compute_available:
4029 return host
4030
4031 def check_availability_zone(
4032 self, old_az, server_flavor_details, old_host, host=None
4033 ):
4034 self._reload_connection()
4035 az_check = {"zone_check": False, "compute_availability": None}
4036 aggregates_list = self.nova.aggregates.list()
4037 for aggregate in aggregates_list:
4038 aggregate_details = aggregate.to_dict()
4039 aggregate_temp = json.dumps(aggregate_details)
4040 aggregate_json = json.loads(aggregate_temp)
4041 if aggregate_json["availability_zone"] == old_az:
4042 hosts_list = aggregate_json["hosts"]
4043 if host is not None:
4044 if host in hosts_list:
4045 az_check["zone_check"] = True
4046 available_compute_id = self.check_compute_availability(
4047 host, server_flavor_details
4048 )
4049 if available_compute_id is not None:
4050 az_check["compute_availability"] = available_compute_id
4051 else:
4052 for check_host in hosts_list:
4053 if check_host != old_host:
4054 available_compute_id = self.check_compute_availability(
4055 check_host, server_flavor_details
4056 )
4057 if available_compute_id is not None:
4058 az_check["zone_check"] = True
4059 az_check["compute_availability"] = available_compute_id
4060 break
4061 else:
4062 az_check["zone_check"] = True
4063 return az_check
4064
4065 def migrate_instance(self, vm_id, compute_host=None):
4066 """
4067 Migrate a vdu
4068 param:
4069 vm_id: ID of an instance
4070 compute_host: Host to migrate the vdu to
4071 """
4072 self._reload_connection()
4073 vm_state = False
4074 instance_state = self.get_vdu_state(vm_id)
4075 server_flavor_id = instance_state[1]
4076 server_hypervisor_name = instance_state[2]
4077 server_availability_zone = instance_state[3]
4078 try:
4079 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
4080 server_flavor_details = [
4081 server_flavor["ram"],
4082 server_flavor["disk"],
4083 server_flavor["vcpus"],
4084 ]
4085 if compute_host == server_hypervisor_name:
4086 raise vimconn.VimConnException(
4087 "Unable to migrate instance '{}' to the same host '{}'".format(
4088 vm_id, compute_host
4089 ),
4090 http_code=vimconn.HTTP_Bad_Request,
4091 )
4092 az_status = self.check_availability_zone(
4093 server_availability_zone,
4094 server_flavor_details,
4095 server_hypervisor_name,
4096 compute_host,
4097 )
4098 availability_zone_check = az_status["zone_check"]
4099 available_compute_id = az_status.get("compute_availability")
4100
4101 if availability_zone_check is False:
4102 raise vimconn.VimConnException(
4103 "Unable to migrate instance '{}' to a different availability zone".format(
4104 vm_id
4105 ),
4106 http_code=vimconn.HTTP_Bad_Request,
4107 )
4108 if available_compute_id is not None:
4109 self.nova.servers.live_migrate(
4110 server=vm_id,
4111 host=available_compute_id,
4112 block_migration=True,
4113 disk_over_commit=False,
4114 )
4115 state = "MIGRATING"
4116 changed_compute_host = ""
4117 if state == "MIGRATING":
4118 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
4119 changed_compute_host = self.get_vdu_state(vm_id)[2]
4120 if vm_state and changed_compute_host == available_compute_id:
4121 self.logger.debug(
4122 "Instance '{}' migrated to the new compute host '{}'".format(
4123 vm_id, changed_compute_host
4124 )
4125 )
4126 return state, available_compute_id
4127 else:
4128 raise vimconn.VimConnException(
4129 "Migration Failed. Instance '{}' not moved to the new host {}".format(
4130 vm_id, available_compute_id
4131 ),
4132 http_code=vimconn.HTTP_Bad_Request,
4133 )
4134 else:
4135 raise vimconn.VimConnException(
4136 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
4137 available_compute_id
4138 ),
4139 http_code=vimconn.HTTP_Bad_Request,
4140 )
4141 except (
4142 nvExceptions.BadRequest,
4143 nvExceptions.ClientException,
4144 nvExceptions.NotFound,
4145 ) as e:
4146 self._format_exception(e)
4147
4148 def resize_instance(self, vm_id, new_flavor_id):
4149 """
4150 For resizing the vm based on the given
4151 flavor details
4152 param:
4153 vm_id : ID of an instance
4154 new_flavor_id : Flavor id to be resized
4155 Return the status of a resized instance
4156 """
4157 self._reload_connection()
4158 self.logger.debug("resize the flavor of an instance")
4159 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
4160 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
4161 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
4162 try:
4163 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
4164 if old_flavor_disk > new_flavor_disk:
4165 raise nvExceptions.BadRequest(
4166 400,
4167 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
4168 )
4169 else:
4170 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
4171 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
4172 if vm_state:
4173 instance_resized_status = self.confirm_resize(vm_id)
4174 return instance_resized_status
4175 else:
4176 raise nvExceptions.BadRequest(
4177 409,
4178 message="Cannot 'resize' vm_state is in ERROR",
4179 )
4180
4181 else:
4182 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
4183 raise nvExceptions.BadRequest(
4184 409,
4185 message="Cannot 'resize' instance while it is in vm_state resized",
4186 )
4187 except (
4188 nvExceptions.BadRequest,
4189 nvExceptions.ClientException,
4190 nvExceptions.NotFound,
4191 ) as e:
4192 self._format_exception(e)
4193
4194 def confirm_resize(self, vm_id):
4195 """
4196 Confirm the resize of an instance
4197 param:
4198 vm_id: ID of an instance
4199 """
4200 self._reload_connection()
4201 self.nova.servers.confirm_resize(server=vm_id)
4202 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
4203 self.__wait_for_vm(vm_id, "ACTIVE")
4204 instance_status = self.get_vdu_state(vm_id)[0]
4205 return instance_status