Find the interface ports regardless of VM's existence
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
5 # This file is part of openmano
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 ##
20
21 """
22 osconnector implements all the methods to interact with openstack using the python-neutronclient.
23
24 For the VNF forwarding graph, The OpenStack VIM connector calls the
25 networking-sfc Neutron extension methods, whose resources are mapped
26 to the VIM connector's SFC resources as follows:
27 - Classification (OSM) -> Flow Classifier (Neutron)
28 - Service Function Instance (OSM) -> Port Pair (Neutron)
29 - Service Function (OSM) -> Port Pair Group (Neutron)
30 - Service Function Path (OSM) -> Port Chain (Neutron)
31 """
32
33 import copy
34 from http.client import HTTPException
35 import json
36 import logging
37 from pprint import pformat
38 import random
39 import re
40 import time
41
42 from cinderclient import client as cClient
43 from glanceclient import client as glClient
44 import glanceclient.exc as gl1Exceptions
45 from keystoneauth1 import session
46 from keystoneauth1.identity import v2, v3
47 import keystoneclient.exceptions as ksExceptions
48 import keystoneclient.v2_0.client as ksClient_v2
49 import keystoneclient.v3.client as ksClient_v3
50 import netaddr
51 from neutronclient.common import exceptions as neExceptions
52 from neutronclient.neutron import client as neClient
53 from novaclient import client as nClient, exceptions as nvExceptions
54 from osm_ro_plugin import vimconn
55 from requests.exceptions import ConnectionError
56 import yaml
57
58 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
59 __date__ = "$22-sep-2017 23:59:59$"
60
61 """contain the openstack virtual machine status to openmano status"""
62 vmStatus2manoFormat = {
63 "ACTIVE": "ACTIVE",
64 "PAUSED": "PAUSED",
65 "SUSPENDED": "SUSPENDED",
66 "SHUTOFF": "INACTIVE",
67 "BUILD": "BUILD",
68 "ERROR": "ERROR",
69 "DELETED": "DELETED",
70 }
71 netStatus2manoFormat = {
72 "ACTIVE": "ACTIVE",
73 "PAUSED": "PAUSED",
74 "INACTIVE": "INACTIVE",
75 "BUILD": "BUILD",
76 "ERROR": "ERROR",
77 "DELETED": "DELETED",
78 }
79
80 supportedClassificationTypes = ["legacy_flow_classifier"]
81
82 # global var to have a timeout creating and deleting volumes
83 volume_timeout = 1800
84 server_timeout = 1800
85
86
87 class SafeDumper(yaml.SafeDumper):
88 def represent_data(self, data):
89 # Openstack APIs use custom subclasses of dict and YAML safe dumper
90 # is designed to not handle that (reference issue 142 of pyyaml)
91 if isinstance(data, dict) and data.__class__ != dict:
92 # A simple solution is to convert those items back to dicts
93 data = dict(data.items())
94
95 return super(SafeDumper, self).represent_data(data)
96
97
98 class vimconnector(vimconn.VimConnector):
99 def __init__(
100 self,
101 uuid,
102 name,
103 tenant_id,
104 tenant_name,
105 url,
106 url_admin=None,
107 user=None,
108 passwd=None,
109 log_level=None,
110 config={},
111 persistent_info={},
112 ):
113 """using common constructor parameters. In this case
114 'url' is the keystone authorization url,
115 'url_admin' is not use
116 """
117 api_version = config.get("APIversion")
118
119 if api_version and api_version not in ("v3.3", "v2.0", "2", "3"):
120 raise vimconn.VimConnException(
121 "Invalid value '{}' for config:APIversion. "
122 "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)
123 )
124
125 vim_type = config.get("vim_type")
126
127 if vim_type and vim_type not in ("vio", "VIO"):
128 raise vimconn.VimConnException(
129 "Invalid value '{}' for config:vim_type."
130 "Allowed values are 'vio' or 'VIO'".format(vim_type)
131 )
132
133 if config.get("dataplane_net_vlan_range") is not None:
134 # validate vlan ranges provided by user
135 self._validate_vlan_ranges(
136 config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range"
137 )
138
139 if config.get("multisegment_vlan_range") is not None:
140 # validate vlan ranges provided by user
141 self._validate_vlan_ranges(
142 config.get("multisegment_vlan_range"), "multisegment_vlan_range"
143 )
144
145 vimconn.VimConnector.__init__(
146 self,
147 uuid,
148 name,
149 tenant_id,
150 tenant_name,
151 url,
152 url_admin,
153 user,
154 passwd,
155 log_level,
156 config,
157 )
158
159 if self.config.get("insecure") and self.config.get("ca_cert"):
160 raise vimconn.VimConnException(
161 "options insecure and ca_cert are mutually exclusive"
162 )
163
164 self.verify = True
165
166 if self.config.get("insecure"):
167 self.verify = False
168
169 if self.config.get("ca_cert"):
170 self.verify = self.config.get("ca_cert")
171
172 if not url:
173 raise TypeError("url param can not be NoneType")
174
175 self.persistent_info = persistent_info
176 self.availability_zone = persistent_info.get("availability_zone", None)
177 self.session = persistent_info.get("session", {"reload_client": True})
178 self.my_tenant_id = self.session.get("my_tenant_id")
179 self.nova = self.session.get("nova")
180 self.neutron = self.session.get("neutron")
181 self.cinder = self.session.get("cinder")
182 self.glance = self.session.get("glance")
183 # self.glancev1 = self.session.get("glancev1")
184 self.keystone = self.session.get("keystone")
185 self.api_version3 = self.session.get("api_version3")
186 self.vim_type = self.config.get("vim_type")
187
188 if self.vim_type:
189 self.vim_type = self.vim_type.upper()
190
191 if self.config.get("use_internal_endpoint"):
192 self.endpoint_type = "internalURL"
193 else:
194 self.endpoint_type = None
195
196 logging.getLogger("urllib3").setLevel(logging.WARNING)
197 logging.getLogger("keystoneauth").setLevel(logging.WARNING)
198 logging.getLogger("novaclient").setLevel(logging.WARNING)
199 self.logger = logging.getLogger("ro.vim.openstack")
200
201 # allow security_groups to be a list or a single string
202 if isinstance(self.config.get("security_groups"), str):
203 self.config["security_groups"] = [self.config["security_groups"]]
204
205 self.security_groups_id = None
206
207 # ###### VIO Specific Changes #########
208 if self.vim_type == "VIO":
209 self.logger = logging.getLogger("ro.vim.vio")
210
211 if log_level:
212 self.logger.setLevel(getattr(logging, log_level))
213
214 def __getitem__(self, index):
215 """Get individuals parameters.
216 Throw KeyError"""
217 if index == "project_domain_id":
218 return self.config.get("project_domain_id")
219 elif index == "user_domain_id":
220 return self.config.get("user_domain_id")
221 else:
222 return vimconn.VimConnector.__getitem__(self, index)
223
224 def __setitem__(self, index, value):
225 """Set individuals parameters and it is marked as dirty so to force connection reload.
226 Throw KeyError"""
227 if index == "project_domain_id":
228 self.config["project_domain_id"] = value
229 elif index == "user_domain_id":
230 self.config["user_domain_id"] = value
231 else:
232 vimconn.VimConnector.__setitem__(self, index, value)
233
234 self.session["reload_client"] = True
235
236 def serialize(self, value):
237 """Serialization of python basic types.
238
239 In the case value is not serializable a message will be logged and a
240 simple representation of the data that cannot be converted back to
241 python is returned.
242 """
243 if isinstance(value, str):
244 return value
245
246 try:
247 return yaml.dump(
248 value, Dumper=SafeDumper, default_flow_style=True, width=256
249 )
250 except yaml.representer.RepresenterError:
251 self.logger.debug(
252 "The following entity cannot be serialized in YAML:\n\n%s\n\n",
253 pformat(value),
254 exc_info=True,
255 )
256
257 return str(value)
258
259 def _reload_connection(self):
260 """Called before any operation, it check if credentials has changed
261 Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
262 """
263 # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
264 if self.session["reload_client"]:
265 if self.config.get("APIversion"):
266 self.api_version3 = (
267 self.config["APIversion"] == "v3.3"
268 or self.config["APIversion"] == "3"
269 )
270 else: # get from ending auth_url that end with v3 or with v2.0
271 self.api_version3 = self.url.endswith("/v3") or self.url.endswith(
272 "/v3/"
273 )
274
275 self.session["api_version3"] = self.api_version3
276
277 if self.api_version3:
278 if self.config.get("project_domain_id") or self.config.get(
279 "project_domain_name"
280 ):
281 project_domain_id_default = None
282 else:
283 project_domain_id_default = "default"
284
285 if self.config.get("user_domain_id") or self.config.get(
286 "user_domain_name"
287 ):
288 user_domain_id_default = None
289 else:
290 user_domain_id_default = "default"
291 auth = v3.Password(
292 auth_url=self.url,
293 username=self.user,
294 password=self.passwd,
295 project_name=self.tenant_name,
296 project_id=self.tenant_id,
297 project_domain_id=self.config.get(
298 "project_domain_id", project_domain_id_default
299 ),
300 user_domain_id=self.config.get(
301 "user_domain_id", user_domain_id_default
302 ),
303 project_domain_name=self.config.get("project_domain_name"),
304 user_domain_name=self.config.get("user_domain_name"),
305 )
306 else:
307 auth = v2.Password(
308 auth_url=self.url,
309 username=self.user,
310 password=self.passwd,
311 tenant_name=self.tenant_name,
312 tenant_id=self.tenant_id,
313 )
314
315 sess = session.Session(auth=auth, verify=self.verify)
316 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
317 # Titanium cloud and StarlingX
318 region_name = self.config.get("region_name")
319
320 if self.api_version3:
321 self.keystone = ksClient_v3.Client(
322 session=sess,
323 endpoint_type=self.endpoint_type,
324 region_name=region_name,
325 )
326 else:
327 self.keystone = ksClient_v2.Client(
328 session=sess, endpoint_type=self.endpoint_type
329 )
330
331 self.session["keystone"] = self.keystone
332 # In order to enable microversion functionality an explicit microversion must be specified in "config".
333 # This implementation approach is due to the warning message in
334 # https://developer.openstack.org/api-guide/compute/microversions.html
335 # where it is stated that microversion backwards compatibility is not guaranteed and clients should
336 # always require an specific microversion.
337 # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config
338 version = self.config.get("microversion")
339
340 if not version:
341 version = "2.1"
342
343 # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
344 # Titanium cloud and StarlingX
345 self.nova = self.session["nova"] = nClient.Client(
346 str(version),
347 session=sess,
348 endpoint_type=self.endpoint_type,
349 region_name=region_name,
350 )
351 self.neutron = self.session["neutron"] = neClient.Client(
352 "2.0",
353 session=sess,
354 endpoint_type=self.endpoint_type,
355 region_name=region_name,
356 )
357 self.cinder = self.session["cinder"] = cClient.Client(
358 2,
359 session=sess,
360 endpoint_type=self.endpoint_type,
361 region_name=region_name,
362 )
363
364 try:
365 self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
366 except Exception:
367 self.logger.error("Cannot get project_id from session", exc_info=True)
368
369 if self.endpoint_type == "internalURL":
370 glance_service_id = self.keystone.services.list(name="glance")[0].id
371 glance_endpoint = self.keystone.endpoints.list(
372 glance_service_id, interface="internal"
373 )[0].url
374 else:
375 glance_endpoint = None
376
377 self.glance = self.session["glance"] = glClient.Client(
378 2, session=sess, endpoint=glance_endpoint
379 )
380 # using version 1 of glance client in new_image()
381 # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess,
382 # endpoint=glance_endpoint)
383 self.session["reload_client"] = False
384 self.persistent_info["session"] = self.session
385 # add availablity zone info inside self.persistent_info
386 self._set_availablity_zones()
387 self.persistent_info["availability_zone"] = self.availability_zone
388 # force to get again security_groups_ids next time they are needed
389 self.security_groups_id = None
390
391 def __net_os2mano(self, net_list_dict):
392 """Transform the net openstack format to mano format
393 net_list_dict can be a list of dict or a single dict"""
394 if type(net_list_dict) is dict:
395 net_list_ = (net_list_dict,)
396 elif type(net_list_dict) is list:
397 net_list_ = net_list_dict
398 else:
399 raise TypeError("param net_list_dict must be a list or a dictionary")
400 for net in net_list_:
401 if net.get("provider:network_type") == "vlan":
402 net["type"] = "data"
403 else:
404 net["type"] = "bridge"
405
406 def __classification_os2mano(self, class_list_dict):
407 """Transform the openstack format (Flow Classifier) to mano format
408 (Classification) class_list_dict can be a list of dict or a single dict
409 """
410 if isinstance(class_list_dict, dict):
411 class_list_ = [class_list_dict]
412 elif isinstance(class_list_dict, list):
413 class_list_ = class_list_dict
414 else:
415 raise TypeError("param class_list_dict must be a list or a dictionary")
416 for classification in class_list_:
417 id = classification.pop("id")
418 name = classification.pop("name")
419 description = classification.pop("description")
420 project_id = classification.pop("project_id")
421 tenant_id = classification.pop("tenant_id")
422 original_classification = copy.deepcopy(classification)
423 classification.clear()
424 classification["ctype"] = "legacy_flow_classifier"
425 classification["definition"] = original_classification
426 classification["id"] = id
427 classification["name"] = name
428 classification["description"] = description
429 classification["project_id"] = project_id
430 classification["tenant_id"] = tenant_id
431
432 def __sfi_os2mano(self, sfi_list_dict):
433 """Transform the openstack format (Port Pair) to mano format (SFI)
434 sfi_list_dict can be a list of dict or a single dict
435 """
436 if isinstance(sfi_list_dict, dict):
437 sfi_list_ = [sfi_list_dict]
438 elif isinstance(sfi_list_dict, list):
439 sfi_list_ = sfi_list_dict
440 else:
441 raise TypeError("param sfi_list_dict must be a list or a dictionary")
442
443 for sfi in sfi_list_:
444 sfi["ingress_ports"] = []
445 sfi["egress_ports"] = []
446
447 if sfi.get("ingress"):
448 sfi["ingress_ports"].append(sfi["ingress"])
449
450 if sfi.get("egress"):
451 sfi["egress_ports"].append(sfi["egress"])
452
453 del sfi["ingress"]
454 del sfi["egress"]
455 params = sfi.get("service_function_parameters")
456 sfc_encap = False
457
458 if params:
459 correlation = params.get("correlation")
460
461 if correlation:
462 sfc_encap = True
463
464 sfi["sfc_encap"] = sfc_encap
465 del sfi["service_function_parameters"]
466
467 def __sf_os2mano(self, sf_list_dict):
468 """Transform the openstack format (Port Pair Group) to mano format (SF)
469 sf_list_dict can be a list of dict or a single dict
470 """
471 if isinstance(sf_list_dict, dict):
472 sf_list_ = [sf_list_dict]
473 elif isinstance(sf_list_dict, list):
474 sf_list_ = sf_list_dict
475 else:
476 raise TypeError("param sf_list_dict must be a list or a dictionary")
477
478 for sf in sf_list_:
479 del sf["port_pair_group_parameters"]
480 sf["sfis"] = sf["port_pairs"]
481 del sf["port_pairs"]
482
483 def __sfp_os2mano(self, sfp_list_dict):
484 """Transform the openstack format (Port Chain) to mano format (SFP)
485 sfp_list_dict can be a list of dict or a single dict
486 """
487 if isinstance(sfp_list_dict, dict):
488 sfp_list_ = [sfp_list_dict]
489 elif isinstance(sfp_list_dict, list):
490 sfp_list_ = sfp_list_dict
491 else:
492 raise TypeError("param sfp_list_dict must be a list or a dictionary")
493
494 for sfp in sfp_list_:
495 params = sfp.pop("chain_parameters")
496 sfc_encap = False
497
498 if params:
499 correlation = params.get("correlation")
500
501 if correlation:
502 sfc_encap = True
503
504 sfp["sfc_encap"] = sfc_encap
505 sfp["spi"] = sfp.pop("chain_id")
506 sfp["classifications"] = sfp.pop("flow_classifiers")
507 sfp["service_functions"] = sfp.pop("port_pair_groups")
508
509 # placeholder for now; read TODO note below
510 def _validate_classification(self, type, definition):
511 # only legacy_flow_classifier Type is supported at this point
512 return True
513 # TODO(igordcard): this method should be an abstract method of an
514 # abstract Classification class to be implemented by the specific
515 # Types. Also, abstract vimconnector should call the validation
516 # method before the implemented VIM connectors are called.
517
518 def _format_exception(self, exception):
519 """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
520 message_error = str(exception)
521 tip = ""
522
523 if isinstance(
524 exception,
525 (
526 neExceptions.NetworkNotFoundClient,
527 nvExceptions.NotFound,
528 ksExceptions.NotFound,
529 gl1Exceptions.HTTPNotFound,
530 ),
531 ):
532 raise vimconn.VimConnNotFoundException(
533 type(exception).__name__ + ": " + message_error
534 )
535 elif isinstance(
536 exception,
537 (
538 HTTPException,
539 gl1Exceptions.HTTPException,
540 gl1Exceptions.CommunicationError,
541 ConnectionError,
542 ksExceptions.ConnectionError,
543 neExceptions.ConnectionFailed,
544 ),
545 ):
546 if type(exception).__name__ == "SSLError":
547 tip = " (maybe option 'insecure' must be added to the VIM)"
548
549 raise vimconn.VimConnConnectionException(
550 "Invalid URL or credentials{}: {}".format(tip, message_error)
551 )
552 elif isinstance(
553 exception,
554 (
555 KeyError,
556 nvExceptions.BadRequest,
557 ksExceptions.BadRequest,
558 ),
559 ):
560 raise vimconn.VimConnException(
561 type(exception).__name__ + ": " + message_error
562 )
563 elif isinstance(
564 exception,
565 (
566 nvExceptions.ClientException,
567 ksExceptions.ClientException,
568 neExceptions.NeutronException,
569 ),
570 ):
571 raise vimconn.VimConnUnexpectedResponse(
572 type(exception).__name__ + ": " + message_error
573 )
574 elif isinstance(exception, nvExceptions.Conflict):
575 raise vimconn.VimConnConflictException(
576 type(exception).__name__ + ": " + message_error
577 )
578 elif isinstance(exception, vimconn.VimConnException):
579 raise exception
580 else: # ()
581 self.logger.error("General Exception " + message_error, exc_info=True)
582
583 raise vimconn.VimConnConnectionException(
584 type(exception).__name__ + ": " + message_error
585 )
586
587 def _get_ids_from_name(self):
588 """
589 Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
590 :return: None
591 """
592 # get tenant_id if only tenant_name is supplied
593 self._reload_connection()
594
595 if not self.my_tenant_id:
596 raise vimconn.VimConnConnectionException(
597 "Error getting tenant information from name={} id={}".format(
598 self.tenant_name, self.tenant_id
599 )
600 )
601
602 if self.config.get("security_groups") and not self.security_groups_id:
603 # convert from name to id
604 neutron_sg_list = self.neutron.list_security_groups(
605 tenant_id=self.my_tenant_id
606 )["security_groups"]
607
608 self.security_groups_id = []
609 for sg in self.config.get("security_groups"):
610 for neutron_sg in neutron_sg_list:
611 if sg in (neutron_sg["id"], neutron_sg["name"]):
612 self.security_groups_id.append(neutron_sg["id"])
613 break
614 else:
615 self.security_groups_id = None
616
617 raise vimconn.VimConnConnectionException(
618 "Not found security group {} for this tenant".format(sg)
619 )
620
621 def check_vim_connectivity(self):
622 # just get network list to check connectivity and credentials
623 self.get_network_list(filter_dict={})
624
625 def get_tenant_list(self, filter_dict={}):
626 """Obtain tenants of VIM
627 filter_dict can contain the following keys:
628 name: filter by tenant name
629 id: filter by tenant uuid/id
630 <other VIM specific>
631 Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
632 """
633 self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
634
635 try:
636 self._reload_connection()
637
638 if self.api_version3:
639 project_class_list = self.keystone.projects.list(
640 name=filter_dict.get("name")
641 )
642 else:
643 project_class_list = self.keystone.tenants.findall(**filter_dict)
644
645 project_list = []
646
647 for project in project_class_list:
648 if filter_dict.get("id") and filter_dict["id"] != project.id:
649 continue
650
651 project_list.append(project.to_dict())
652
653 return project_list
654 except (
655 ksExceptions.ConnectionError,
656 ksExceptions.ClientException,
657 ConnectionError,
658 ) as e:
659 self._format_exception(e)
660
661 def new_tenant(self, tenant_name, tenant_description):
662 """Adds a new tenant to openstack VIM. Returns the tenant identifier"""
663 self.logger.debug("Adding a new tenant name: %s", tenant_name)
664
665 try:
666 self._reload_connection()
667
668 if self.api_version3:
669 project = self.keystone.projects.create(
670 tenant_name,
671 self.config.get("project_domain_id", "default"),
672 description=tenant_description,
673 is_domain=False,
674 )
675 else:
676 project = self.keystone.tenants.create(tenant_name, tenant_description)
677
678 return project.id
679 except (
680 ksExceptions.ConnectionError,
681 ksExceptions.ClientException,
682 ksExceptions.BadRequest,
683 ConnectionError,
684 ) as e:
685 self._format_exception(e)
686
687 def delete_tenant(self, tenant_id):
688 """Delete a tenant from openstack VIM. Returns the old tenant identifier"""
689 self.logger.debug("Deleting tenant %s from VIM", tenant_id)
690
691 try:
692 self._reload_connection()
693
694 if self.api_version3:
695 self.keystone.projects.delete(tenant_id)
696 else:
697 self.keystone.tenants.delete(tenant_id)
698
699 return tenant_id
700 except (
701 ksExceptions.ConnectionError,
702 ksExceptions.ClientException,
703 ksExceptions.NotFound,
704 ConnectionError,
705 ) as e:
706 self._format_exception(e)
707
708 def new_network(
709 self,
710 net_name,
711 net_type,
712 ip_profile=None,
713 shared=False,
714 provider_network_profile=None,
715 ):
716 """Adds a tenant network to VIM
717 Params:
718 'net_name': name of the network
719 'net_type': one of:
720 'bridge': overlay isolated network
721 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
722 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
723 'ip_profile': is a dict containing the IP parameters of the network
724 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
725 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
726 'gateway_address': (Optional) ip_schema, that is X.X.X.X
727 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
728 'dhcp_enabled': True or False
729 'dhcp_start_address': ip_schema, first IP to grant
730 'dhcp_count': number of IPs to grant.
731 'shared': if this network can be seen/use by other tenants/organization
732 'provider_network_profile': (optional) contains {segmentation-id: vlan, network-type: vlan|vxlan,
733 physical-network: physnet-label}
734 Returns a tuple with the network identifier and created_items, or raises an exception on error
735 created_items can be None or a dictionary where this method can include key-values that will be passed to
736 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
737 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
738 as not present.
739 """
740 self.logger.debug(
741 "Adding a new network to VIM name '%s', type '%s'", net_name, net_type
742 )
743 # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
744
745 try:
746 vlan = None
747
748 if provider_network_profile:
749 vlan = provider_network_profile.get("segmentation-id")
750
751 new_net = None
752 created_items = {}
753 self._reload_connection()
754 network_dict = {"name": net_name, "admin_state_up": True}
755
756 if net_type in ("data", "ptp"):
757 provider_physical_network = None
758
759 if provider_network_profile and provider_network_profile.get(
760 "physical-network"
761 ):
762 provider_physical_network = provider_network_profile.get(
763 "physical-network"
764 )
765
766 # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string
767 # or not declared, just ignore the checking
768 if (
769 isinstance(
770 self.config.get("dataplane_physical_net"), (tuple, list)
771 )
772 and provider_physical_network
773 not in self.config["dataplane_physical_net"]
774 ):
775 raise vimconn.VimConnConflictException(
776 "Invalid parameter 'provider-network:physical-network' "
777 "for network creation. '{}' is not one of the declared "
778 "list at VIM_config:dataplane_physical_net".format(
779 provider_physical_network
780 )
781 )
782
783 # use the default dataplane_physical_net
784 if not provider_physical_network:
785 provider_physical_network = self.config.get(
786 "dataplane_physical_net"
787 )
788
789 # if it is non empty list, use the first value. If it is a string use the value directly
790 if (
791 isinstance(provider_physical_network, (tuple, list))
792 and provider_physical_network
793 ):
794 provider_physical_network = provider_physical_network[0]
795
796 if not provider_physical_network:
797 raise vimconn.VimConnConflictException(
798 "missing information needed for underlay networks. Provide "
799 "'dataplane_physical_net' configuration at VIM or use the NS "
800 "instantiation parameter 'provider-network.physical-network'"
801 " for the VLD"
802 )
803
804 if not self.config.get("multisegment_support"):
805 network_dict[
806 "provider:physical_network"
807 ] = provider_physical_network
808
809 if (
810 provider_network_profile
811 and "network-type" in provider_network_profile
812 ):
813 network_dict[
814 "provider:network_type"
815 ] = provider_network_profile["network-type"]
816 else:
817 network_dict["provider:network_type"] = self.config.get(
818 "dataplane_network_type", "vlan"
819 )
820
821 if vlan:
822 network_dict["provider:segmentation_id"] = vlan
823 else:
824 # Multi-segment case
825 segment_list = []
826 segment1_dict = {
827 "provider:physical_network": "",
828 "provider:network_type": "vxlan",
829 }
830 segment_list.append(segment1_dict)
831 segment2_dict = {
832 "provider:physical_network": provider_physical_network,
833 "provider:network_type": "vlan",
834 }
835
836 if vlan:
837 segment2_dict["provider:segmentation_id"] = vlan
838 elif self.config.get("multisegment_vlan_range"):
839 vlanID = self._generate_multisegment_vlanID()
840 segment2_dict["provider:segmentation_id"] = vlanID
841
842 # else
843 # raise vimconn.VimConnConflictException(
844 # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment
845 # network")
846 segment_list.append(segment2_dict)
847 network_dict["segments"] = segment_list
848
849 # VIO Specific Changes. It needs a concrete VLAN
850 if self.vim_type == "VIO" and vlan is None:
851 if self.config.get("dataplane_net_vlan_range") is None:
852 raise vimconn.VimConnConflictException(
853 "You must provide 'dataplane_net_vlan_range' in format "
854 "[start_ID - end_ID] at VIM_config for creating underlay "
855 "networks"
856 )
857
858 network_dict["provider:segmentation_id"] = self._generate_vlanID()
859
860 network_dict["shared"] = shared
861
862 if self.config.get("disable_network_port_security"):
863 network_dict["port_security_enabled"] = False
864
865 if self.config.get("neutron_availability_zone_hints"):
866 hints = self.config.get("neutron_availability_zone_hints")
867
868 if isinstance(hints, str):
869 hints = [hints]
870
871 network_dict["availability_zone_hints"] = hints
872
873 new_net = self.neutron.create_network({"network": network_dict})
874 # print new_net
875 # create subnetwork, even if there is no profile
876
877 if not ip_profile:
878 ip_profile = {}
879
880 if not ip_profile.get("subnet_address"):
881 # Fake subnet is required
882 subnet_rand = random.randint(0, 255)
883 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
884
885 if "ip_version" not in ip_profile:
886 ip_profile["ip_version"] = "IPv4"
887
888 subnet = {
889 "name": net_name + "-subnet",
890 "network_id": new_net["network"]["id"],
891 "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6,
892 "cidr": ip_profile["subnet_address"],
893 }
894
895 # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
896 if ip_profile.get("gateway_address"):
897 subnet["gateway_ip"] = ip_profile["gateway_address"]
898 else:
899 subnet["gateway_ip"] = None
900
901 if ip_profile.get("dns_address"):
902 subnet["dns_nameservers"] = ip_profile["dns_address"].split(";")
903
904 if "dhcp_enabled" in ip_profile:
905 subnet["enable_dhcp"] = (
906 False
907 if ip_profile["dhcp_enabled"] == "false"
908 or ip_profile["dhcp_enabled"] is False
909 else True
910 )
911
912 if ip_profile.get("dhcp_start_address"):
913 subnet["allocation_pools"] = []
914 subnet["allocation_pools"].append(dict())
915 subnet["allocation_pools"][0]["start"] = ip_profile[
916 "dhcp_start_address"
917 ]
918
919 if ip_profile.get("dhcp_count"):
920 # parts = ip_profile["dhcp_start_address"].split(".")
921 # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
922 ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"]))
923 ip_int += ip_profile["dhcp_count"] - 1
924 ip_str = str(netaddr.IPAddress(ip_int))
925 subnet["allocation_pools"][0]["end"] = ip_str
926
927 # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
928 self.neutron.create_subnet({"subnet": subnet})
929
930 if net_type == "data" and self.config.get("multisegment_support"):
931 if self.config.get("l2gw_support"):
932 l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
933 for l2gw in l2gw_list:
934 l2gw_conn = {
935 "l2_gateway_id": l2gw["id"],
936 "network_id": new_net["network"]["id"],
937 "segmentation_id": str(vlanID),
938 }
939 new_l2gw_conn = self.neutron.create_l2_gateway_connection(
940 {"l2_gateway_connection": l2gw_conn}
941 )
942 created_items[
943 "l2gwconn:"
944 + str(new_l2gw_conn["l2_gateway_connection"]["id"])
945 ] = True
946
947 return new_net["network"]["id"], created_items
948 except Exception as e:
949 # delete l2gw connections (if any) before deleting the network
950 for k, v in created_items.items():
951 if not v: # skip already deleted
952 continue
953
954 try:
955 k_item, _, k_id = k.partition(":")
956
957 if k_item == "l2gwconn":
958 self.neutron.delete_l2_gateway_connection(k_id)
959 except Exception as e2:
960 self.logger.error(
961 "Error deleting l2 gateway connection: {}: {}".format(
962 type(e2).__name__, e2
963 )
964 )
965
966 if new_net:
967 self.neutron.delete_network(new_net["network"]["id"])
968
969 self._format_exception(e)
970
971 def get_network_list(self, filter_dict={}):
972 """Obtain tenant networks of VIM
973 Filter_dict can be:
974 name: network name
975 id: network uuid
976 shared: boolean
977 tenant_id: tenant
978 admin_state_up: boolean
979 status: 'ACTIVE'
980 Returns the network list of dictionaries
981 """
982 self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
983
984 try:
985 self._reload_connection()
986 filter_dict_os = filter_dict.copy()
987
988 if self.api_version3 and "tenant_id" in filter_dict_os:
989 # TODO check
990 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
991
992 net_dict = self.neutron.list_networks(**filter_dict_os)
993 net_list = net_dict["networks"]
994 self.__net_os2mano(net_list)
995
996 return net_list
997 except (
998 neExceptions.ConnectionFailed,
999 ksExceptions.ClientException,
1000 neExceptions.NeutronException,
1001 ConnectionError,
1002 ) as e:
1003 self._format_exception(e)
1004
1005 def get_network(self, net_id):
1006 """Obtain details of network from VIM
1007 Returns the network information from a network id"""
1008 self.logger.debug(" Getting tenant network %s from VIM", net_id)
1009 filter_dict = {"id": net_id}
1010 net_list = self.get_network_list(filter_dict)
1011
1012 if len(net_list) == 0:
1013 raise vimconn.VimConnNotFoundException(
1014 "Network '{}' not found".format(net_id)
1015 )
1016 elif len(net_list) > 1:
1017 raise vimconn.VimConnConflictException(
1018 "Found more than one network with this criteria"
1019 )
1020
1021 net = net_list[0]
1022 subnets = []
1023 for subnet_id in net.get("subnets", ()):
1024 try:
1025 subnet = self.neutron.show_subnet(subnet_id)
1026 except Exception as e:
1027 self.logger.error(
1028 "osconnector.get_network(): Error getting subnet %s %s"
1029 % (net_id, str(e))
1030 )
1031 subnet = {"id": subnet_id, "fault": str(e)}
1032
1033 subnets.append(subnet)
1034
1035 net["subnets"] = subnets
1036 net["encapsulation"] = net.get("provider:network_type")
1037 net["encapsulation_type"] = net.get("provider:network_type")
1038 net["segmentation_id"] = net.get("provider:segmentation_id")
1039 net["encapsulation_id"] = net.get("provider:segmentation_id")
1040
1041 return net
1042
1043 def delete_network(self, net_id, created_items=None):
1044 """
1045 Removes a tenant network from VIM and its associated elements
1046 :param net_id: VIM identifier of the network, provided by method new_network
1047 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1048 Returns the network identifier or raises an exception upon error or when network is not found
1049 """
1050 self.logger.debug("Deleting network '%s' from VIM", net_id)
1051
1052 if created_items is None:
1053 created_items = {}
1054
1055 try:
1056 self._reload_connection()
1057 # delete l2gw connections (if any) before deleting the network
1058 for k, v in created_items.items():
1059 if not v: # skip already deleted
1060 continue
1061
1062 try:
1063 k_item, _, k_id = k.partition(":")
1064 if k_item == "l2gwconn":
1065 self.neutron.delete_l2_gateway_connection(k_id)
1066 except Exception as e:
1067 self.logger.error(
1068 "Error deleting l2 gateway connection: {}: {}".format(
1069 type(e).__name__, e
1070 )
1071 )
1072
1073 # delete VM ports attached to this networks before the network
1074 ports = self.neutron.list_ports(network_id=net_id)
1075 for p in ports["ports"]:
1076 try:
1077 self.neutron.delete_port(p["id"])
1078 except Exception as e:
1079 self.logger.error("Error deleting port %s: %s", p["id"], str(e))
1080
1081 self.neutron.delete_network(net_id)
1082
1083 return net_id
1084 except (
1085 neExceptions.ConnectionFailed,
1086 neExceptions.NetworkNotFoundClient,
1087 neExceptions.NeutronException,
1088 ksExceptions.ClientException,
1089 neExceptions.NeutronException,
1090 ConnectionError,
1091 ) as e:
1092 self._format_exception(e)
1093
1094 def refresh_nets_status(self, net_list):
1095 """Get the status of the networks
1096 Params: the list of network identifiers
1097 Returns a dictionary with:
1098 net_id: #VIM id of this network
1099 status: #Mandatory. Text with one of:
1100 # DELETED (not found at vim)
1101 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1102 # OTHER (Vim reported other status not understood)
1103 # ERROR (VIM indicates an ERROR status)
1104 # ACTIVE, INACTIVE, DOWN (admin down),
1105 # BUILD (on building process)
1106 #
1107 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1108 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1109 """
1110 net_dict = {}
1111
1112 for net_id in net_list:
1113 net = {}
1114
1115 try:
1116 net_vim = self.get_network(net_id)
1117
1118 if net_vim["status"] in netStatus2manoFormat:
1119 net["status"] = netStatus2manoFormat[net_vim["status"]]
1120 else:
1121 net["status"] = "OTHER"
1122 net["error_msg"] = "VIM status reported " + net_vim["status"]
1123
1124 if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]:
1125 net["status"] = "DOWN"
1126
1127 net["vim_info"] = self.serialize(net_vim)
1128
1129 if net_vim.get("fault"): # TODO
1130 net["error_msg"] = str(net_vim["fault"])
1131 except vimconn.VimConnNotFoundException as e:
1132 self.logger.error("Exception getting net status: %s", str(e))
1133 net["status"] = "DELETED"
1134 net["error_msg"] = str(e)
1135 except vimconn.VimConnException as e:
1136 self.logger.error("Exception getting net status: %s", str(e))
1137 net["status"] = "VIM_ERROR"
1138 net["error_msg"] = str(e)
1139 net_dict[net_id] = net
1140 return net_dict
1141
1142 def get_flavor(self, flavor_id):
1143 """Obtain flavor details from the VIM. Returns the flavor dict details"""
1144 self.logger.debug("Getting flavor '%s'", flavor_id)
1145
1146 try:
1147 self._reload_connection()
1148 flavor = self.nova.flavors.find(id=flavor_id)
1149 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
1150
1151 return flavor.to_dict()
1152 except (
1153 nvExceptions.NotFound,
1154 nvExceptions.ClientException,
1155 ksExceptions.ClientException,
1156 ConnectionError,
1157 ) as e:
1158 self._format_exception(e)
1159
1160 def get_flavor_id_from_data(self, flavor_dict):
1161 """Obtain flavor id that match the flavor description
1162 Returns the flavor_id or raises a vimconnNotFoundException
1163 flavor_dict: contains the required ram, vcpus, disk
1164 If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
1165 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
1166 vimconnNotFoundException is raised
1167 """
1168 exact_match = False if self.config.get("use_existing_flavors") else True
1169
1170 try:
1171 self._reload_connection()
1172 flavor_candidate_id = None
1173 flavor_candidate_data = (10000, 10000, 10000)
1174 flavor_target = (
1175 flavor_dict["ram"],
1176 flavor_dict["vcpus"],
1177 flavor_dict["disk"],
1178 flavor_dict.get("ephemeral", 0),
1179 flavor_dict.get("swap", 0),
1180 )
1181 # numa=None
1182 extended = flavor_dict.get("extended", {})
1183 if extended:
1184 # TODO
1185 raise vimconn.VimConnNotFoundException(
1186 "Flavor with EPA still not implemented"
1187 )
1188 # if len(numas) > 1:
1189 # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa")
1190 # numa=numas[0]
1191 # numas = extended.get("numas")
1192 for flavor in self.nova.flavors.list():
1193 epa = flavor.get_keys()
1194
1195 if epa:
1196 continue
1197 # TODO
1198
1199 flavor_data = (
1200 flavor.ram,
1201 flavor.vcpus,
1202 flavor.disk,
1203 flavor.ephemeral,
1204 flavor.swap if isinstance(flavor.swap, int) else 0,
1205 )
1206 if flavor_data == flavor_target:
1207 return flavor.id
1208 elif (
1209 not exact_match
1210 and flavor_target < flavor_data < flavor_candidate_data
1211 ):
1212 flavor_candidate_id = flavor.id
1213 flavor_candidate_data = flavor_data
1214
1215 if not exact_match and flavor_candidate_id:
1216 return flavor_candidate_id
1217
1218 raise vimconn.VimConnNotFoundException(
1219 "Cannot find any flavor matching '{}'".format(flavor_dict)
1220 )
1221 except (
1222 nvExceptions.NotFound,
1223 nvExceptions.ClientException,
1224 ksExceptions.ClientException,
1225 ConnectionError,
1226 ) as e:
1227 self._format_exception(e)
1228
1229 def process_resource_quota(self, quota, prefix, extra_specs):
1230 """
1231 :param prefix:
1232 :param extra_specs:
1233 :return:
1234 """
1235 if "limit" in quota:
1236 extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
1237
1238 if "reserve" in quota:
1239 extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"]
1240
1241 if "shares" in quota:
1242 extra_specs["quota:" + prefix + "_shares_level"] = "custom"
1243 extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
1244
1245 def new_flavor(self, flavor_data, change_name_if_used=True):
1246 """Adds a tenant flavor to openstack VIM
1247 if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
1248 repetition
1249 Returns the flavor identifier
1250 """
1251 self.logger.debug("Adding flavor '%s'", str(flavor_data))
1252 retry = 0
1253 max_retries = 3
1254 name_suffix = 0
1255
1256 try:
1257 name = flavor_data["name"]
1258 while retry < max_retries:
1259 retry += 1
1260 try:
1261 self._reload_connection()
1262
1263 if change_name_if_used:
1264 # get used names
1265 fl_names = []
1266 fl = self.nova.flavors.list()
1267
1268 for f in fl:
1269 fl_names.append(f.name)
1270
1271 while name in fl_names:
1272 name_suffix += 1
1273 name = flavor_data["name"] + "-" + str(name_suffix)
1274
1275 ram = flavor_data.get("ram", 64)
1276 vcpus = flavor_data.get("vcpus", 1)
1277 extra_specs = {}
1278
1279 extended = flavor_data.get("extended")
1280 if extended:
1281 numas = extended.get("numas")
1282
1283 if numas:
1284 numa_nodes = len(numas)
1285
1286 if numa_nodes > 1:
1287 return -1, "Can not add flavor with more than one numa"
1288
1289 extra_specs["hw:numa_nodes"] = str(numa_nodes)
1290 extra_specs["hw:mem_page_size"] = "large"
1291 extra_specs["hw:cpu_policy"] = "dedicated"
1292 extra_specs["hw:numa_mempolicy"] = "strict"
1293
1294 if self.vim_type == "VIO":
1295 extra_specs[
1296 "vmware:extra_config"
1297 ] = '{"numa.nodeAffinity":"0"}'
1298 extra_specs["vmware:latency_sensitivity_level"] = "high"
1299
1300 for numa in numas:
1301 # overwrite ram and vcpus
1302 # check if key "memory" is present in numa else use ram value at flavor
1303 if "memory" in numa:
1304 ram = numa["memory"] * 1024
1305 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
1306 # implemented/virt-driver-cpu-thread-pinning.html
1307 extra_specs["hw:cpu_sockets"] = 1
1308
1309 if "paired-threads" in numa:
1310 vcpus = numa["paired-threads"] * 2
1311 # cpu_thread_policy "require" implies that the compute node must have an
1312 # STM architecture
1313 extra_specs["hw:cpu_thread_policy"] = "require"
1314 extra_specs["hw:cpu_policy"] = "dedicated"
1315 elif "cores" in numa:
1316 vcpus = numa["cores"]
1317 # cpu_thread_policy "prefer" implies that the host must not have an SMT
1318 # architecture, or a non-SMT architecture will be emulated
1319 extra_specs["hw:cpu_thread_policy"] = "isolate"
1320 extra_specs["hw:cpu_policy"] = "dedicated"
1321 elif "threads" in numa:
1322 vcpus = numa["threads"]
1323 # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
1324 # architecture
1325 extra_specs["hw:cpu_thread_policy"] = "prefer"
1326 extra_specs["hw:cpu_policy"] = "dedicated"
1327 # for interface in numa.get("interfaces",() ):
1328 # if interface["dedicated"]=="yes":
1329 # raise vimconn.VimConnException("Passthrough interfaces are not supported
1330 # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
1331 # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
1332 # when a way to connect it is available
1333 elif extended.get("cpu-quota"):
1334 self.process_resource_quota(
1335 extended.get("cpu-quota"), "cpu", extra_specs
1336 )
1337
1338 if extended.get("mem-quota"):
1339 self.process_resource_quota(
1340 extended.get("mem-quota"), "memory", extra_specs
1341 )
1342
1343 if extended.get("vif-quota"):
1344 self.process_resource_quota(
1345 extended.get("vif-quota"), "vif", extra_specs
1346 )
1347
1348 if extended.get("disk-io-quota"):
1349 self.process_resource_quota(
1350 extended.get("disk-io-quota"), "disk_io", extra_specs
1351 )
1352
1353 # Set the mempage size as specified in the descriptor
1354 if extended.get("mempage-size"):
1355 if extended.get("mempage-size") == "LARGE":
1356 extra_specs["hw:mem_page_size"] = "large"
1357 elif extended.get("mempage-size") == "SMALL":
1358 extra_specs["hw:mem_page_size"] = "small"
1359 elif extended.get("mempage-size") == "SIZE_2MB":
1360 extra_specs["hw:mem_page_size"] = "2MB"
1361 elif extended.get("mempage-size") == "SIZE_1GB":
1362 extra_specs["hw:mem_page_size"] = "1GB"
1363 elif extended.get("mempage-size") == "PREFER_LARGE":
1364 extra_specs["hw:mem_page_size"] = "any"
1365 else:
1366 # The validations in NBI should make reaching here not possible.
1367 # If this message is shown, check validations
1368 self.logger.debug(
1369 "Invalid mempage-size %s. Will be ignored",
1370 extended.get("mempage-size"),
1371 )
1372
1373 # create flavor
1374 new_flavor = self.nova.flavors.create(
1375 name=name,
1376 ram=ram,
1377 vcpus=vcpus,
1378 disk=flavor_data.get("disk", 0),
1379 ephemeral=flavor_data.get("ephemeral", 0),
1380 swap=flavor_data.get("swap", 0),
1381 is_public=flavor_data.get("is_public", True),
1382 )
1383 # add metadata
1384 if extra_specs:
1385 new_flavor.set_keys(extra_specs)
1386
1387 return new_flavor.id
1388 except nvExceptions.Conflict as e:
1389 if change_name_if_used and retry < max_retries:
1390 continue
1391
1392 self._format_exception(e)
1393 # except nvExceptions.BadRequest as e:
1394 except (
1395 ksExceptions.ClientException,
1396 nvExceptions.ClientException,
1397 ConnectionError,
1398 KeyError,
1399 ) as e:
1400 self._format_exception(e)
1401
1402 def delete_flavor(self, flavor_id):
1403 """Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
1404 try:
1405 self._reload_connection()
1406 self.nova.flavors.delete(flavor_id)
1407
1408 return flavor_id
1409 # except nvExceptions.BadRequest as e:
1410 except (
1411 nvExceptions.NotFound,
1412 ksExceptions.ClientException,
1413 nvExceptions.ClientException,
1414 ConnectionError,
1415 ) as e:
1416 self._format_exception(e)
1417
1418 def new_image(self, image_dict):
1419 """
1420 Adds a tenant image to VIM. imge_dict is a dictionary with:
1421 name: name
1422 disk_format: qcow2, vhd, vmdk, raw (by default), ...
1423 location: path or URI
1424 public: "yes" or "no"
1425 metadata: metadata of the image
1426 Returns the image_id
1427 """
1428 retry = 0
1429 max_retries = 3
1430
1431 while retry < max_retries:
1432 retry += 1
1433 try:
1434 self._reload_connection()
1435
1436 # determine format http://docs.openstack.org/developer/glance/formats.html
1437 if "disk_format" in image_dict:
1438 disk_format = image_dict["disk_format"]
1439 else: # autodiscover based on extension
1440 if image_dict["location"].endswith(".qcow2"):
1441 disk_format = "qcow2"
1442 elif image_dict["location"].endswith(".vhd"):
1443 disk_format = "vhd"
1444 elif image_dict["location"].endswith(".vmdk"):
1445 disk_format = "vmdk"
1446 elif image_dict["location"].endswith(".vdi"):
1447 disk_format = "vdi"
1448 elif image_dict["location"].endswith(".iso"):
1449 disk_format = "iso"
1450 elif image_dict["location"].endswith(".aki"):
1451 disk_format = "aki"
1452 elif image_dict["location"].endswith(".ari"):
1453 disk_format = "ari"
1454 elif image_dict["location"].endswith(".ami"):
1455 disk_format = "ami"
1456 else:
1457 disk_format = "raw"
1458
1459 self.logger.debug(
1460 "new_image: '%s' loading from '%s'",
1461 image_dict["name"],
1462 image_dict["location"],
1463 )
1464 if self.vim_type == "VIO":
1465 container_format = "bare"
1466 if "container_format" in image_dict:
1467 container_format = image_dict["container_format"]
1468
1469 new_image = self.glance.images.create(
1470 name=image_dict["name"],
1471 container_format=container_format,
1472 disk_format=disk_format,
1473 )
1474 else:
1475 new_image = self.glance.images.create(name=image_dict["name"])
1476
1477 if image_dict["location"].startswith("http"):
1478 # TODO there is not a method to direct download. It must be downloaded locally with requests
1479 raise vimconn.VimConnNotImplemented("Cannot create image from URL")
1480 else: # local path
1481 with open(image_dict["location"]) as fimage:
1482 self.glance.images.upload(new_image.id, fimage)
1483 # new_image = self.glancev1.images.create(name=image_dict["name"], is_public=
1484 # image_dict.get("public","yes")=="yes",
1485 # container_format="bare", data=fimage, disk_format=disk_format)
1486
1487 metadata_to_load = image_dict.get("metadata")
1488
1489 # TODO location is a reserved word for current openstack versions. fixed for VIO please check
1490 # for openstack
1491 if self.vim_type == "VIO":
1492 metadata_to_load["upload_location"] = image_dict["location"]
1493 else:
1494 metadata_to_load["location"] = image_dict["location"]
1495
1496 self.glance.images.update(new_image.id, **metadata_to_load)
1497
1498 return new_image.id
1499 except (
1500 nvExceptions.Conflict,
1501 ksExceptions.ClientException,
1502 nvExceptions.ClientException,
1503 ) as e:
1504 self._format_exception(e)
1505 except (
1506 HTTPException,
1507 gl1Exceptions.HTTPException,
1508 gl1Exceptions.CommunicationError,
1509 ConnectionError,
1510 ) as e:
1511 if retry == max_retries:
1512 continue
1513
1514 self._format_exception(e)
1515 except IOError as e: # can not open the file
1516 raise vimconn.VimConnConnectionException(
1517 "{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
1518 http_code=vimconn.HTTP_Bad_Request,
1519 )
1520
1521 def delete_image(self, image_id):
1522 """Deletes a tenant image from openstack VIM. Returns the old id"""
1523 try:
1524 self._reload_connection()
1525 self.glance.images.delete(image_id)
1526
1527 return image_id
1528 except (
1529 nvExceptions.NotFound,
1530 ksExceptions.ClientException,
1531 nvExceptions.ClientException,
1532 gl1Exceptions.CommunicationError,
1533 gl1Exceptions.HTTPNotFound,
1534 ConnectionError,
1535 ) as e: # TODO remove
1536 self._format_exception(e)
1537
1538 def get_image_id_from_path(self, path):
1539 """Get the image id from image path in the VIM database. Returns the image_id"""
1540 try:
1541 self._reload_connection()
1542 images = self.glance.images.list()
1543
1544 for image in images:
1545 if image.metadata.get("location") == path:
1546 return image.id
1547
1548 raise vimconn.VimConnNotFoundException(
1549 "image with location '{}' not found".format(path)
1550 )
1551 except (
1552 ksExceptions.ClientException,
1553 nvExceptions.ClientException,
1554 gl1Exceptions.CommunicationError,
1555 ConnectionError,
1556 ) as e:
1557 self._format_exception(e)
1558
1559 def get_image_list(self, filter_dict={}):
1560 """Obtain tenant images from VIM
1561 Filter_dict can be:
1562 id: image id
1563 name: image name
1564 checksum: image checksum
1565 Returns the image list of dictionaries:
1566 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1567 List can be empty
1568 """
1569 self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
1570
1571 try:
1572 self._reload_connection()
1573 # filter_dict_os = filter_dict.copy()
1574 # First we filter by the available filter fields: name, id. The others are removed.
1575 image_list = self.glance.images.list()
1576 filtered_list = []
1577
1578 for image in image_list:
1579 try:
1580 if filter_dict.get("name") and image["name"] != filter_dict["name"]:
1581 continue
1582
1583 if filter_dict.get("id") and image["id"] != filter_dict["id"]:
1584 continue
1585
1586 if (
1587 filter_dict.get("checksum")
1588 and image["checksum"] != filter_dict["checksum"]
1589 ):
1590 continue
1591
1592 filtered_list.append(image.copy())
1593 except gl1Exceptions.HTTPNotFound:
1594 pass
1595
1596 return filtered_list
1597 except (
1598 ksExceptions.ClientException,
1599 nvExceptions.ClientException,
1600 gl1Exceptions.CommunicationError,
1601 ConnectionError,
1602 ) as e:
1603 self._format_exception(e)
1604
1605 def __wait_for_vm(self, vm_id, status):
1606 """wait until vm is in the desired status and return True.
1607 If the VM gets in ERROR status, return false.
1608 If the timeout is reached generate an exception"""
1609 elapsed_time = 0
1610 while elapsed_time < server_timeout:
1611 vm_status = self.nova.servers.get(vm_id).status
1612
1613 if vm_status == status:
1614 return True
1615
1616 if vm_status == "ERROR":
1617 return False
1618
1619 time.sleep(5)
1620 elapsed_time += 5
1621
1622 # if we exceeded the timeout rollback
1623 if elapsed_time >= server_timeout:
1624 raise vimconn.VimConnException(
1625 "Timeout waiting for instance " + vm_id + " to get " + status,
1626 http_code=vimconn.HTTP_Request_Timeout,
1627 )
1628
1629 def _get_openstack_availablity_zones(self):
1630 """
1631 Get from openstack availability zones available
1632 :return:
1633 """
1634 try:
1635 openstack_availability_zone = self.nova.availability_zones.list()
1636 openstack_availability_zone = [
1637 str(zone.zoneName)
1638 for zone in openstack_availability_zone
1639 if zone.zoneName != "internal"
1640 ]
1641
1642 return openstack_availability_zone
1643 except Exception:
1644 return None
1645
1646 def _set_availablity_zones(self):
1647 """
1648 Set vim availablity zone
1649 :return:
1650 """
1651 if "availability_zone" in self.config:
1652 vim_availability_zones = self.config.get("availability_zone")
1653
1654 if isinstance(vim_availability_zones, str):
1655 self.availability_zone = [vim_availability_zones]
1656 elif isinstance(vim_availability_zones, list):
1657 self.availability_zone = vim_availability_zones
1658 else:
1659 self.availability_zone = self._get_openstack_availablity_zones()
1660
1661 def _get_vm_availability_zone(
1662 self, availability_zone_index, availability_zone_list
1663 ):
1664 """
1665 Return thge availability zone to be used by the created VM.
1666 :return: The VIM availability zone to be used or None
1667 """
1668 if availability_zone_index is None:
1669 if not self.config.get("availability_zone"):
1670 return None
1671 elif isinstance(self.config.get("availability_zone"), str):
1672 return self.config["availability_zone"]
1673 else:
1674 # TODO consider using a different parameter at config for default AV and AV list match
1675 return self.config["availability_zone"][0]
1676
1677 vim_availability_zones = self.availability_zone
1678 # check if VIM offer enough availability zones describe in the VNFD
1679 if vim_availability_zones and len(availability_zone_list) <= len(
1680 vim_availability_zones
1681 ):
1682 # check if all the names of NFV AV match VIM AV names
1683 match_by_index = False
1684 for av in availability_zone_list:
1685 if av not in vim_availability_zones:
1686 match_by_index = True
1687 break
1688
1689 if match_by_index:
1690 return vim_availability_zones[availability_zone_index]
1691 else:
1692 return availability_zone_list[availability_zone_index]
1693 else:
1694 raise vimconn.VimConnConflictException(
1695 "No enough availability zones at VIM for this deployment"
1696 )
1697
1698 def new_vminstance(
1699 self,
1700 name,
1701 description,
1702 start,
1703 image_id,
1704 flavor_id,
1705 affinity_group_list,
1706 net_list,
1707 cloud_config=None,
1708 disk_list=None,
1709 availability_zone_index=None,
1710 availability_zone_list=None,
1711 ):
1712 """Adds a VM instance to VIM
1713 Params:
1714 start: indicates if VM must start or boot in pause mode. Ignored
1715 image_id,flavor_id: image and flavor uuid
1716 affinity_group_list: list of affinity groups, each one is a dictionary.
1717 Ignore if empty.
1718 net_list: list of interfaces, each one is a dictionary with:
1719 name:
1720 net_id: network uuid to connect
1721 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
1722 model: interface model, ignored #TODO
1723 mac_address: used for SR-IOV ifaces #TODO for other types
1724 use: 'data', 'bridge', 'mgmt'
1725 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
1726 vim_id: filled/added by this function
1727 floating_ip: True/False (or it can be None)
1728 port_security: True/False
1729 'cloud_config': (optional) dictionary with:
1730 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1731 'users': (optional) list of users to be inserted, each item is a dict with:
1732 'name': (mandatory) user name,
1733 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1734 'user-data': (optional) string is a text script to be passed directly to cloud-init
1735 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1736 'dest': (mandatory) string with the destination absolute path
1737 'encoding': (optional, by default text). Can be one of:
1738 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1739 'content' (mandatory): string with the content of the file
1740 'permissions': (optional) string with file permissions, typically octal notation '0644'
1741 'owner': (optional) file owner, string with the format 'owner:group'
1742 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1743 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1744 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1745 'size': (mandatory) string with the size of the disk in GB
1746 'vim_id' (optional) should use this existing volume id
1747 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1748 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1749 availability_zone_index is None
1750 #TODO ip, security groups
1751 Returns a tuple with the instance identifier and created_items or raises an exception on error
1752 created_items can be None or a dictionary where this method can include key-values that will be passed to
1753 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1754 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1755 as not present.
1756 """
1757 self.logger.debug(
1758 "new_vminstance input: image='%s' flavor='%s' nics='%s'",
1759 image_id,
1760 flavor_id,
1761 str(net_list),
1762 )
1763
1764 try:
1765 server = None
1766 created_items = {}
1767 # metadata = {}
1768 net_list_vim = []
1769 external_network = []
1770 # ^list of external networks to be connected to instance, later on used to create floating_ip
1771 no_secured_ports = [] # List of port-is with port-security disabled
1772 self._reload_connection()
1773 # metadata_vpci = {} # For a specific neutron plugin
1774 block_device_mapping = None
1775
1776 for net in net_list:
1777 if not net.get("net_id"): # skip non connected iface
1778 continue
1779
1780 port_dict = {
1781 "network_id": net["net_id"],
1782 "name": net.get("name"),
1783 "admin_state_up": True,
1784 }
1785
1786 if (
1787 self.config.get("security_groups")
1788 and net.get("port_security") is not False
1789 and not self.config.get("no_port_security_extension")
1790 ):
1791 if not self.security_groups_id:
1792 self._get_ids_from_name()
1793
1794 port_dict["security_groups"] = self.security_groups_id
1795
1796 if net["type"] == "virtual":
1797 pass
1798 # if "vpci" in net:
1799 # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
1800 elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
1801 # if "vpci" in net:
1802 # if "VF" not in metadata_vpci:
1803 # metadata_vpci["VF"]=[]
1804 # metadata_vpci["VF"].append([ net["vpci"], "" ])
1805 port_dict["binding:vnic_type"] = "direct"
1806
1807 # VIO specific Changes
1808 if self.vim_type == "VIO":
1809 # Need to create port with port_security_enabled = False and no-security-groups
1810 port_dict["port_security_enabled"] = False
1811 port_dict["provider_security_groups"] = []
1812 port_dict["security_groups"] = []
1813 else: # For PT PCI-PASSTHROUGH
1814 # if "vpci" in net:
1815 # if "PF" not in metadata_vpci:
1816 # metadata_vpci["PF"]=[]
1817 # metadata_vpci["PF"].append([ net["vpci"], "" ])
1818 port_dict["binding:vnic_type"] = "direct-physical"
1819
1820 if not port_dict["name"]:
1821 port_dict["name"] = name
1822
1823 if net.get("mac_address"):
1824 port_dict["mac_address"] = net["mac_address"]
1825
1826 if net.get("ip_address"):
1827 port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
1828 # TODO add "subnet_id": <subnet_id>
1829
1830 new_port = self.neutron.create_port({"port": port_dict})
1831 created_items["port:" + str(new_port["port"]["id"])] = True
1832 net["mac_adress"] = new_port["port"]["mac_address"]
1833 net["vim_id"] = new_port["port"]["id"]
1834 # if try to use a network without subnetwork, it will return a emtpy list
1835 fixed_ips = new_port["port"].get("fixed_ips")
1836
1837 if fixed_ips:
1838 net["ip"] = fixed_ips[0].get("ip_address")
1839 else:
1840 net["ip"] = None
1841
1842 port = {"port-id": new_port["port"]["id"]}
1843 if float(self.nova.api_version.get_string()) >= 2.32:
1844 port["tag"] = new_port["port"]["name"]
1845
1846 net_list_vim.append(port)
1847
1848 if net.get("floating_ip", False):
1849 net["exit_on_floating_ip_error"] = True
1850 external_network.append(net)
1851 elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
1852 net["exit_on_floating_ip_error"] = False
1853 external_network.append(net)
1854 net["floating_ip"] = self.config.get("use_floating_ip")
1855
1856 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
1857 # is dropped.
1858 # As a workaround we wait until the VM is active and then disable the port-security
1859 if net.get("port_security") is False and not self.config.get(
1860 "no_port_security_extension"
1861 ):
1862 no_secured_ports.append(
1863 (
1864 new_port["port"]["id"],
1865 net.get("port_security_disable_strategy"),
1866 )
1867 )
1868
1869 # if metadata_vpci:
1870 # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
1871 # if len(metadata["pci_assignement"]) >255:
1872 # #limit the metadata size
1873 # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
1874 # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
1875 # metadata = {}
1876
1877 self.logger.debug(
1878 "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
1879 name,
1880 image_id,
1881 flavor_id,
1882 str(net_list_vim),
1883 description,
1884 )
1885
1886 # cloud config
1887 config_drive, userdata = self._create_user_data(cloud_config)
1888
1889 # get availability Zone
1890 vm_av_zone = self._get_vm_availability_zone(
1891 availability_zone_index, availability_zone_list
1892 )
1893
1894 # Create additional volumes in case these are present in disk_list
1895 existing_vim_volumes = []
1896 base_disk_index = ord("b")
1897 boot_volume_id = None
1898 if disk_list:
1899 block_device_mapping = {}
1900 for disk in disk_list:
1901 if disk.get("vim_id"):
1902 block_device_mapping["_vd" + chr(base_disk_index)] = disk[
1903 "vim_id"
1904 ]
1905 existing_vim_volumes.append({"id": disk["vim_id"]})
1906 else:
1907 if "image_id" in disk:
1908 base_disk_index = ord("a")
1909 volume = self.cinder.volumes.create(
1910 size=disk["size"],
1911 name=name + "_vd" + chr(base_disk_index),
1912 imageRef=disk["image_id"],
1913 # Make sure volume is in the same AZ as the VM to be attached to
1914 availability_zone=vm_av_zone,
1915 )
1916 boot_volume_id = volume.id
1917 else:
1918 volume = self.cinder.volumes.create(
1919 size=disk["size"],
1920 name=name + "_vd" + chr(base_disk_index),
1921 # Make sure volume is in the same AZ as the VM to be attached to
1922 availability_zone=vm_av_zone,
1923 )
1924
1925 created_items["volume:" + str(volume.id)] = True
1926 block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
1927
1928 base_disk_index += 1
1929
1930 # Wait until created volumes are with status available
1931 elapsed_time = 0
1932 while elapsed_time < volume_timeout:
1933 for created_item in created_items:
1934 v, _, volume_id = created_item.partition(":")
1935 if v == "volume":
1936 if self.cinder.volumes.get(volume_id).status != "available":
1937 break
1938 else: # all ready: break from while
1939 break
1940
1941 time.sleep(5)
1942 elapsed_time += 5
1943
1944 # Wait until existing volumes in vim are with status available
1945 while elapsed_time < volume_timeout:
1946 for volume in existing_vim_volumes:
1947 if self.cinder.volumes.get(volume["id"]).status != "available":
1948 break
1949 else: # all ready: break from while
1950 break
1951
1952 time.sleep(5)
1953 elapsed_time += 5
1954
1955 # If we exceeded the timeout rollback
1956 if elapsed_time >= volume_timeout:
1957 raise vimconn.VimConnException(
1958 "Timeout creating volumes for instance " + name,
1959 http_code=vimconn.HTTP_Request_Timeout,
1960 )
1961 if boot_volume_id:
1962 self.cinder.volumes.set_bootable(boot_volume_id, True)
1963
1964 # Manage affinity groups/server groups
1965 server_group_id = None
1966 scheduller_hints = {}
1967
1968 if affinity_group_list:
1969 # Only first id on the list will be used. Openstack restriction
1970 server_group_id = affinity_group_list[0]["affinity_group_id"]
1971 scheduller_hints["group"] = server_group_id
1972
1973 self.logger.debug(
1974 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
1975 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
1976 "block_device_mapping={}, server_group={})".format(
1977 name,
1978 image_id,
1979 flavor_id,
1980 net_list_vim,
1981 self.config.get("security_groups"),
1982 vm_av_zone,
1983 self.config.get("keypair"),
1984 userdata,
1985 config_drive,
1986 block_device_mapping,
1987 server_group_id,
1988 )
1989 )
1990 server = self.nova.servers.create(
1991 name,
1992 image_id,
1993 flavor_id,
1994 nics=net_list_vim,
1995 security_groups=self.config.get("security_groups"),
1996 # TODO remove security_groups in future versions. Already at neutron port
1997 availability_zone=vm_av_zone,
1998 key_name=self.config.get("keypair"),
1999 userdata=userdata,
2000 config_drive=config_drive,
2001 block_device_mapping=block_device_mapping,
2002 scheduler_hints=scheduller_hints,
2003 ) # , description=description)
2004
2005 vm_start_time = time.time()
2006 # Previously mentioned workaround to wait until the VM is active and then disable the port-security
2007 if no_secured_ports:
2008 self.__wait_for_vm(server.id, "ACTIVE")
2009
2010 for port in no_secured_ports:
2011 port_update = {
2012 "port": {"port_security_enabled": False, "security_groups": None}
2013 }
2014
2015 if port[1] == "allow-address-pairs":
2016 port_update = {
2017 "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
2018 }
2019
2020 try:
2021 self.neutron.update_port(port[0], port_update)
2022 except Exception:
2023 raise vimconn.VimConnException(
2024 "It was not possible to disable port security for port {}".format(
2025 port[0]
2026 )
2027 )
2028
2029 # print "DONE :-)", server
2030
2031 # pool_id = None
2032 for floating_network in external_network:
2033 try:
2034 assigned = False
2035 floating_ip_retries = 3
2036 # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
2037 # several times
2038 while not assigned:
2039 floating_ips = self.neutron.list_floatingips().get(
2040 "floatingips", ()
2041 )
2042 random.shuffle(floating_ips) # randomize
2043 for fip in floating_ips:
2044 if (
2045 fip.get("port_id")
2046 or fip.get("tenant_id") != server.tenant_id
2047 ):
2048 continue
2049
2050 if isinstance(floating_network["floating_ip"], str):
2051 if (
2052 fip.get("floating_network_id")
2053 != floating_network["floating_ip"]
2054 ):
2055 continue
2056
2057 free_floating_ip = fip["id"]
2058 break
2059 else:
2060 if (
2061 isinstance(floating_network["floating_ip"], str)
2062 and floating_network["floating_ip"].lower() != "true"
2063 ):
2064 pool_id = floating_network["floating_ip"]
2065 else:
2066 # Find the external network
2067 external_nets = list()
2068
2069 for net in self.neutron.list_networks()["networks"]:
2070 if net["router:external"]:
2071 external_nets.append(net)
2072
2073 if len(external_nets) == 0:
2074 raise vimconn.VimConnException(
2075 "Cannot create floating_ip automatically since "
2076 "no external network is present",
2077 http_code=vimconn.HTTP_Conflict,
2078 )
2079
2080 if len(external_nets) > 1:
2081 raise vimconn.VimConnException(
2082 "Cannot create floating_ip automatically since "
2083 "multiple external networks are present",
2084 http_code=vimconn.HTTP_Conflict,
2085 )
2086
2087 pool_id = external_nets[0].get("id")
2088
2089 param = {
2090 "floatingip": {
2091 "floating_network_id": pool_id,
2092 "tenant_id": server.tenant_id,
2093 }
2094 }
2095
2096 try:
2097 # self.logger.debug("Creating floating IP")
2098 new_floating_ip = self.neutron.create_floatingip(param)
2099 free_floating_ip = new_floating_ip["floatingip"]["id"]
2100 created_items[
2101 "floating_ip:" + str(free_floating_ip)
2102 ] = True
2103 except Exception as e:
2104 raise vimconn.VimConnException(
2105 type(e).__name__
2106 + ": Cannot create new floating_ip "
2107 + str(e),
2108 http_code=vimconn.HTTP_Conflict,
2109 )
2110
2111 try:
2112 # for race condition ensure not already assigned
2113 fip = self.neutron.show_floatingip(free_floating_ip)
2114
2115 if fip["floatingip"]["port_id"]:
2116 continue
2117
2118 # the vim_id key contains the neutron.port_id
2119 self.neutron.update_floatingip(
2120 free_floating_ip,
2121 {"floatingip": {"port_id": floating_network["vim_id"]}},
2122 )
2123 # for race condition ensure not re-assigned to other VM after 5 seconds
2124 time.sleep(5)
2125 fip = self.neutron.show_floatingip(free_floating_ip)
2126
2127 if (
2128 fip["floatingip"]["port_id"]
2129 != floating_network["vim_id"]
2130 ):
2131 self.logger.error(
2132 "floating_ip {} re-assigned to other port".format(
2133 free_floating_ip
2134 )
2135 )
2136 continue
2137
2138 self.logger.debug(
2139 "Assigned floating_ip {} to VM {}".format(
2140 free_floating_ip, server.id
2141 )
2142 )
2143 assigned = True
2144 except Exception as e:
2145 # openstack need some time after VM creation to assign an IP. So retry if fails
2146 vm_status = self.nova.servers.get(server.id).status
2147
2148 if vm_status not in ("ACTIVE", "ERROR"):
2149 if time.time() - vm_start_time < server_timeout:
2150 time.sleep(5)
2151 continue
2152 elif floating_ip_retries > 0:
2153 floating_ip_retries -= 1
2154 continue
2155
2156 raise vimconn.VimConnException(
2157 "Cannot create floating_ip: {} {}".format(
2158 type(e).__name__, e
2159 ),
2160 http_code=vimconn.HTTP_Conflict,
2161 )
2162
2163 except Exception as e:
2164 if not floating_network["exit_on_floating_ip_error"]:
2165 self.logger.error("Cannot create floating_ip. %s", str(e))
2166 continue
2167
2168 raise
2169
2170 return server.id, created_items
2171 # except nvExceptions.NotFound as e:
2172 # error_value=-vimconn.HTTP_Not_Found
2173 # error_text= "vm instance %s not found" % vm_id
2174 # except TypeError as e:
2175 # raise vimconn.VimConnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
2176
2177 except Exception as e:
2178 server_id = None
2179 if server:
2180 server_id = server.id
2181
2182 try:
2183 self.delete_vminstance(server_id, created_items)
2184 except Exception as e2:
2185 self.logger.error("new_vminstance rollback fail {}".format(e2))
2186
2187 self._format_exception(e)
2188
2189 def get_vminstance(self, vm_id):
2190 """Returns the VM instance information from VIM"""
2191 # self.logger.debug("Getting VM from VIM")
2192 try:
2193 self._reload_connection()
2194 server = self.nova.servers.find(id=vm_id)
2195 # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
2196
2197 return server.to_dict()
2198 except (
2199 ksExceptions.ClientException,
2200 nvExceptions.ClientException,
2201 nvExceptions.NotFound,
2202 ConnectionError,
2203 ) as e:
2204 self._format_exception(e)
2205
2206 def get_vminstance_console(self, vm_id, console_type="vnc"):
2207 """
2208 Get a console for the virtual machine
2209 Params:
2210 vm_id: uuid of the VM
2211 console_type, can be:
2212 "novnc" (by default), "xvpvnc" for VNC types,
2213 "rdp-html5" for RDP types, "spice-html5" for SPICE types
2214 Returns dict with the console parameters:
2215 protocol: ssh, ftp, http, https, ...
2216 server: usually ip address
2217 port: the http, ssh, ... port
2218 suffix: extra text, e.g. the http path and query string
2219 """
2220 self.logger.debug("Getting VM CONSOLE from VIM")
2221
2222 try:
2223 self._reload_connection()
2224 server = self.nova.servers.find(id=vm_id)
2225
2226 if console_type is None or console_type == "novnc":
2227 console_dict = server.get_vnc_console("novnc")
2228 elif console_type == "xvpvnc":
2229 console_dict = server.get_vnc_console(console_type)
2230 elif console_type == "rdp-html5":
2231 console_dict = server.get_rdp_console(console_type)
2232 elif console_type == "spice-html5":
2233 console_dict = server.get_spice_console(console_type)
2234 else:
2235 raise vimconn.VimConnException(
2236 "console type '{}' not allowed".format(console_type),
2237 http_code=vimconn.HTTP_Bad_Request,
2238 )
2239
2240 console_dict1 = console_dict.get("console")
2241
2242 if console_dict1:
2243 console_url = console_dict1.get("url")
2244
2245 if console_url:
2246 # parse console_url
2247 protocol_index = console_url.find("//")
2248 suffix_index = (
2249 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2250 )
2251 port_index = (
2252 console_url[protocol_index + 2 : suffix_index].find(":")
2253 + protocol_index
2254 + 2
2255 )
2256
2257 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2258 return (
2259 -vimconn.HTTP_Internal_Server_Error,
2260 "Unexpected response from VIM",
2261 )
2262
2263 console_dict = {
2264 "protocol": console_url[0:protocol_index],
2265 "server": console_url[protocol_index + 2 : port_index],
2266 "port": console_url[port_index:suffix_index],
2267 "suffix": console_url[suffix_index + 1 :],
2268 }
2269 protocol_index += 2
2270
2271 return console_dict
2272 raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
2273 except (
2274 nvExceptions.NotFound,
2275 ksExceptions.ClientException,
2276 nvExceptions.ClientException,
2277 nvExceptions.BadRequest,
2278 ConnectionError,
2279 ) as e:
2280 self._format_exception(e)
2281
2282 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
2283 """Removes a VM instance from VIM. Returns the old identifier"""
2284 # print "osconnector: Getting VM from VIM"
2285 if created_items is None:
2286 created_items = {}
2287
2288 try:
2289 self._reload_connection()
2290 # delete VM ports attached to this networks before the virtual machine
2291 for k, v in created_items.items():
2292 if not v: # skip already deleted
2293 continue
2294
2295 try:
2296 k_item, _, k_id = k.partition(":")
2297 if k_item == "port":
2298 port_dict = self.neutron.list_ports()
2299 existing_ports = [
2300 port["id"] for port in port_dict["ports"] if port_dict
2301 ]
2302 if k_id in existing_ports:
2303 self.neutron.delete_port(k_id)
2304 except Exception as e:
2305 self.logger.error(
2306 "Error deleting port: {}: {}".format(type(e).__name__, e)
2307 )
2308
2309 # #commented because detaching the volumes makes the servers.delete not work properly ?!?
2310 # #dettach volumes attached
2311 # server = self.nova.servers.get(vm_id)
2312 # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
2313 # #for volume in volumes_attached_dict:
2314 # # self.cinder.volumes.detach(volume["id"])
2315
2316 if vm_id:
2317 self.nova.servers.delete(vm_id)
2318
2319 # delete volumes. Although having detached, they should have in active status before deleting
2320 # we ensure in this loop
2321 keep_waiting = True
2322 elapsed_time = 0
2323
2324 while keep_waiting and elapsed_time < volume_timeout:
2325 keep_waiting = False
2326
2327 for k, v in created_items.items():
2328 if not v: # skip already deleted
2329 continue
2330
2331 try:
2332 k_item, _, k_id = k.partition(":")
2333 if k_item == "volume":
2334 if self.cinder.volumes.get(k_id).status != "available":
2335 keep_waiting = True
2336 else:
2337 if k_id not in volumes_to_hold:
2338 self.cinder.volumes.delete(k_id)
2339 created_items[k] = None
2340 elif k_item == "floating_ip": # floating ip
2341 self.neutron.delete_floatingip(k_id)
2342 created_items[k] = None
2343
2344 except Exception as e:
2345 self.logger.error("Error deleting {}: {}".format(k, e))
2346
2347 if keep_waiting:
2348 time.sleep(1)
2349 elapsed_time += 1
2350
2351 return None
2352 except (
2353 nvExceptions.NotFound,
2354 ksExceptions.ClientException,
2355 nvExceptions.ClientException,
2356 ConnectionError,
2357 ) as e:
2358 self._format_exception(e)
2359
2360 def refresh_vms_status(self, vm_list):
2361 """Get the status of the virtual machines and their interfaces/ports
2362 Params: the list of VM identifiers
2363 Returns a dictionary with:
2364 vm_id: #VIM id of this Virtual Machine
2365 status: #Mandatory. Text with one of:
2366 # DELETED (not found at vim)
2367 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2368 # OTHER (Vim reported other status not understood)
2369 # ERROR (VIM indicates an ERROR status)
2370 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2371 # CREATING (on building process), ERROR
2372 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2373 #
2374 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2375 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2376 interfaces:
2377 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2378 mac_address: #Text format XX:XX:XX:XX:XX:XX
2379 vim_net_id: #network id where this interface is connected
2380 vim_interface_id: #interface/port VIM id
2381 ip_address: #null, or text with IPv4, IPv6 address
2382 compute_node: #identification of compute node where PF,VF interface is allocated
2383 pci: #PCI address of the NIC that hosts the PF,VF
2384 vlan: #physical VLAN used for VF
2385 """
2386 vm_dict = {}
2387 self.logger.debug(
2388 "refresh_vms status: Getting tenant VM instance information from VIM"
2389 )
2390
2391 for vm_id in vm_list:
2392 vm = {}
2393
2394 try:
2395 vm_vim = self.get_vminstance(vm_id)
2396
2397 if vm_vim["status"] in vmStatus2manoFormat:
2398 vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
2399 else:
2400 vm["status"] = "OTHER"
2401 vm["error_msg"] = "VIM status reported " + vm_vim["status"]
2402
2403 vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
2404 vm_vim.pop("user_data", None)
2405 vm["vim_info"] = self.serialize(vm_vim)
2406
2407 vm["interfaces"] = []
2408 if vm_vim.get("fault"):
2409 vm["error_msg"] = str(vm_vim["fault"])
2410
2411 # get interfaces
2412 try:
2413 self._reload_connection()
2414 port_dict = self.neutron.list_ports(device_id=vm_id)
2415
2416 for port in port_dict["ports"]:
2417 interface = {}
2418 interface["vim_info"] = self.serialize(port)
2419 interface["mac_address"] = port.get("mac_address")
2420 interface["vim_net_id"] = port["network_id"]
2421 interface["vim_interface_id"] = port["id"]
2422 # check if OS-EXT-SRV-ATTR:host is there,
2423 # in case of non-admin credentials, it will be missing
2424
2425 if vm_vim.get("OS-EXT-SRV-ATTR:host"):
2426 interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
2427
2428 interface["pci"] = None
2429
2430 # check if binding:profile is there,
2431 # in case of non-admin credentials, it will be missing
2432 if port.get("binding:profile"):
2433 if port["binding:profile"].get("pci_slot"):
2434 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
2435 # the slot to 0x00
2436 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
2437 # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
2438 pci = port["binding:profile"]["pci_slot"]
2439 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
2440 interface["pci"] = pci
2441
2442 interface["vlan"] = None
2443
2444 if port.get("binding:vif_details"):
2445 interface["vlan"] = port["binding:vif_details"].get("vlan")
2446
2447 # Get vlan from network in case not present in port for those old openstacks and cases where
2448 # it is needed vlan at PT
2449 if not interface["vlan"]:
2450 # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
2451 network = self.neutron.show_network(port["network_id"])
2452
2453 if (
2454 network["network"].get("provider:network_type")
2455 == "vlan"
2456 ):
2457 # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
2458 interface["vlan"] = network["network"].get(
2459 "provider:segmentation_id"
2460 )
2461
2462 ips = []
2463 # look for floating ip address
2464 try:
2465 floating_ip_dict = self.neutron.list_floatingips(
2466 port_id=port["id"]
2467 )
2468
2469 if floating_ip_dict.get("floatingips"):
2470 ips.append(
2471 floating_ip_dict["floatingips"][0].get(
2472 "floating_ip_address"
2473 )
2474 )
2475 except Exception:
2476 pass
2477
2478 for subnet in port["fixed_ips"]:
2479 ips.append(subnet["ip_address"])
2480
2481 interface["ip_address"] = ";".join(ips)
2482 vm["interfaces"].append(interface)
2483 except Exception as e:
2484 self.logger.error(
2485 "Error getting vm interface information {}: {}".format(
2486 type(e).__name__, e
2487 ),
2488 exc_info=True,
2489 )
2490 except vimconn.VimConnNotFoundException as e:
2491 self.logger.error("Exception getting vm status: %s", str(e))
2492 vm["status"] = "DELETED"
2493 vm["error_msg"] = str(e)
2494 except vimconn.VimConnException as e:
2495 self.logger.error("Exception getting vm status: %s", str(e))
2496 vm["status"] = "VIM_ERROR"
2497 vm["error_msg"] = str(e)
2498
2499 vm_dict[vm_id] = vm
2500
2501 return vm_dict
2502
2503 def action_vminstance(self, vm_id, action_dict, created_items={}):
2504 """Send and action over a VM instance from VIM
2505 Returns None or the console dict if the action was successfully sent to the VIM"""
2506 self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
2507
2508 try:
2509 self._reload_connection()
2510 server = self.nova.servers.find(id=vm_id)
2511
2512 if "start" in action_dict:
2513 if action_dict["start"] == "rebuild":
2514 server.rebuild()
2515 else:
2516 if server.status == "PAUSED":
2517 server.unpause()
2518 elif server.status == "SUSPENDED":
2519 server.resume()
2520 elif server.status == "SHUTOFF":
2521 server.start()
2522 else:
2523 self.logger.debug(
2524 "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
2525 )
2526 raise vimconn.VimConnException(
2527 "Cannot 'start' instance while it is in active state",
2528 http_code=vimconn.HTTP_Bad_Request,
2529 )
2530
2531 elif "pause" in action_dict:
2532 server.pause()
2533 elif "resume" in action_dict:
2534 server.resume()
2535 elif "shutoff" in action_dict or "shutdown" in action_dict:
2536 self.logger.debug("server status %s", server.status)
2537 if server.status == "ACTIVE":
2538 server.stop()
2539 else:
2540 self.logger.debug("ERROR: VM is not in Active state")
2541 raise vimconn.VimConnException(
2542 "VM is not in active state, stop operation is not allowed",
2543 http_code=vimconn.HTTP_Bad_Request,
2544 )
2545 elif "forceOff" in action_dict:
2546 server.stop() # TODO
2547 elif "terminate" in action_dict:
2548 server.delete()
2549 elif "createImage" in action_dict:
2550 server.create_image()
2551 # "path":path_schema,
2552 # "description":description_schema,
2553 # "name":name_schema,
2554 # "metadata":metadata_schema,
2555 # "imageRef": id_schema,
2556 # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
2557 elif "rebuild" in action_dict:
2558 server.rebuild(server.image["id"])
2559 elif "reboot" in action_dict:
2560 server.reboot() # reboot_type="SOFT"
2561 elif "console" in action_dict:
2562 console_type = action_dict["console"]
2563
2564 if console_type is None or console_type == "novnc":
2565 console_dict = server.get_vnc_console("novnc")
2566 elif console_type == "xvpvnc":
2567 console_dict = server.get_vnc_console(console_type)
2568 elif console_type == "rdp-html5":
2569 console_dict = server.get_rdp_console(console_type)
2570 elif console_type == "spice-html5":
2571 console_dict = server.get_spice_console(console_type)
2572 else:
2573 raise vimconn.VimConnException(
2574 "console type '{}' not allowed".format(console_type),
2575 http_code=vimconn.HTTP_Bad_Request,
2576 )
2577
2578 try:
2579 console_url = console_dict["console"]["url"]
2580 # parse console_url
2581 protocol_index = console_url.find("//")
2582 suffix_index = (
2583 console_url[protocol_index + 2 :].find("/") + protocol_index + 2
2584 )
2585 port_index = (
2586 console_url[protocol_index + 2 : suffix_index].find(":")
2587 + protocol_index
2588 + 2
2589 )
2590
2591 if protocol_index < 0 or port_index < 0 or suffix_index < 0:
2592 raise vimconn.VimConnException(
2593 "Unexpected response from VIM " + str(console_dict)
2594 )
2595
2596 console_dict2 = {
2597 "protocol": console_url[0:protocol_index],
2598 "server": console_url[protocol_index + 2 : port_index],
2599 "port": int(console_url[port_index + 1 : suffix_index]),
2600 "suffix": console_url[suffix_index + 1 :],
2601 }
2602
2603 return console_dict2
2604 except Exception:
2605 raise vimconn.VimConnException(
2606 "Unexpected response from VIM " + str(console_dict)
2607 )
2608
2609 return None
2610 except (
2611 ksExceptions.ClientException,
2612 nvExceptions.ClientException,
2613 nvExceptions.NotFound,
2614 ConnectionError,
2615 ) as e:
2616 self._format_exception(e)
2617 # TODO insert exception vimconn.HTTP_Unauthorized
2618
2619 # ###### VIO Specific Changes #########
2620 def _generate_vlanID(self):
2621 """
2622 Method to get unused vlanID
2623 Args:
2624 None
2625 Returns:
2626 vlanID
2627 """
2628 # Get used VLAN IDs
2629 usedVlanIDs = []
2630 networks = self.get_network_list()
2631
2632 for net in networks:
2633 if net.get("provider:segmentation_id"):
2634 usedVlanIDs.append(net.get("provider:segmentation_id"))
2635
2636 used_vlanIDs = set(usedVlanIDs)
2637
2638 # find unused VLAN ID
2639 for vlanID_range in self.config.get("dataplane_net_vlan_range"):
2640 try:
2641 start_vlanid, end_vlanid = map(
2642 int, vlanID_range.replace(" ", "").split("-")
2643 )
2644
2645 for vlanID in range(start_vlanid, end_vlanid + 1):
2646 if vlanID not in used_vlanIDs:
2647 return vlanID
2648 except Exception as exp:
2649 raise vimconn.VimConnException(
2650 "Exception {} occurred while generating VLAN ID.".format(exp)
2651 )
2652 else:
2653 raise vimconn.VimConnConflictException(
2654 "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
2655 self.config.get("dataplane_net_vlan_range")
2656 )
2657 )
2658
2659 def _generate_multisegment_vlanID(self):
2660 """
2661 Method to get unused vlanID
2662 Args:
2663 None
2664 Returns:
2665 vlanID
2666 """
2667 # Get used VLAN IDs
2668 usedVlanIDs = []
2669 networks = self.get_network_list()
2670 for net in networks:
2671 if net.get("provider:network_type") == "vlan" and net.get(
2672 "provider:segmentation_id"
2673 ):
2674 usedVlanIDs.append(net.get("provider:segmentation_id"))
2675 elif net.get("segments"):
2676 for segment in net.get("segments"):
2677 if segment.get("provider:network_type") == "vlan" and segment.get(
2678 "provider:segmentation_id"
2679 ):
2680 usedVlanIDs.append(segment.get("provider:segmentation_id"))
2681
2682 used_vlanIDs = set(usedVlanIDs)
2683
2684 # find unused VLAN ID
2685 for vlanID_range in self.config.get("multisegment_vlan_range"):
2686 try:
2687 start_vlanid, end_vlanid = map(
2688 int, vlanID_range.replace(" ", "").split("-")
2689 )
2690
2691 for vlanID in range(start_vlanid, end_vlanid + 1):
2692 if vlanID not in used_vlanIDs:
2693 return vlanID
2694 except Exception as exp:
2695 raise vimconn.VimConnException(
2696 "Exception {} occurred while generating VLAN ID.".format(exp)
2697 )
2698 else:
2699 raise vimconn.VimConnConflictException(
2700 "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
2701 self.config.get("multisegment_vlan_range")
2702 )
2703 )
2704
2705 def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
2706 """
2707 Method to validate user given vlanID ranges
2708 Args: None
2709 Returns: None
2710 """
2711 for vlanID_range in input_vlan_range:
2712 vlan_range = vlanID_range.replace(" ", "")
2713 # validate format
2714 vlanID_pattern = r"(\d)*-(\d)*$"
2715 match_obj = re.match(vlanID_pattern, vlan_range)
2716 if not match_obj:
2717 raise vimconn.VimConnConflictException(
2718 "Invalid VLAN range for {}: {}.You must provide "
2719 "'{}' in format [start_ID - end_ID].".format(
2720 text_vlan_range, vlanID_range, text_vlan_range
2721 )
2722 )
2723
2724 start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
2725 if start_vlanid <= 0:
2726 raise vimconn.VimConnConflictException(
2727 "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
2728 "networks valid IDs are 1 to 4094 ".format(
2729 text_vlan_range, vlanID_range
2730 )
2731 )
2732
2733 if end_vlanid > 4094:
2734 raise vimconn.VimConnConflictException(
2735 "Invalid VLAN range for {}: {}. End VLAN ID can not be "
2736 "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
2737 text_vlan_range, vlanID_range
2738 )
2739 )
2740
2741 if start_vlanid > end_vlanid:
2742 raise vimconn.VimConnConflictException(
2743 "Invalid VLAN range for {}: {}. You must provide '{}'"
2744 " in format start_ID - end_ID and start_ID < end_ID ".format(
2745 text_vlan_range, vlanID_range, text_vlan_range
2746 )
2747 )
2748
2749 # NOT USED FUNCTIONS
2750
2751 def new_external_port(self, port_data):
2752 """Adds a external port to VIM
2753 Returns the port identifier"""
2754 # TODO openstack if needed
2755 return (
2756 -vimconn.HTTP_Internal_Server_Error,
2757 "osconnector.new_external_port() not implemented",
2758 )
2759
2760 def connect_port_network(self, port_id, network_id, admin=False):
2761 """Connects a external port to a network
2762 Returns status code of the VIM response"""
2763 # TODO openstack if needed
2764 return (
2765 -vimconn.HTTP_Internal_Server_Error,
2766 "osconnector.connect_port_network() not implemented",
2767 )
2768
2769 def new_user(self, user_name, user_passwd, tenant_id=None):
2770 """Adds a new user to openstack VIM
2771 Returns the user identifier"""
2772 self.logger.debug("osconnector: Adding a new user to VIM")
2773
2774 try:
2775 self._reload_connection()
2776 user = self.keystone.users.create(
2777 user_name, password=user_passwd, default_project=tenant_id
2778 )
2779 # self.keystone.tenants.add_user(self.k_creds["username"], #role)
2780
2781 return user.id
2782 except ksExceptions.ConnectionError as e:
2783 error_value = -vimconn.HTTP_Bad_Request
2784 error_text = (
2785 type(e).__name__
2786 + ": "
2787 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2788 )
2789 except ksExceptions.ClientException as e: # TODO remove
2790 error_value = -vimconn.HTTP_Bad_Request
2791 error_text = (
2792 type(e).__name__
2793 + ": "
2794 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2795 )
2796
2797 # TODO insert exception vimconn.HTTP_Unauthorized
2798 # if reaching here is because an exception
2799 self.logger.debug("new_user " + error_text)
2800
2801 return error_value, error_text
2802
2803 def delete_user(self, user_id):
2804 """Delete a user from openstack VIM
2805 Returns the user identifier"""
2806 if self.debug:
2807 print("osconnector: Deleting a user from VIM")
2808
2809 try:
2810 self._reload_connection()
2811 self.keystone.users.delete(user_id)
2812
2813 return 1, user_id
2814 except ksExceptions.ConnectionError as e:
2815 error_value = -vimconn.HTTP_Bad_Request
2816 error_text = (
2817 type(e).__name__
2818 + ": "
2819 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2820 )
2821 except ksExceptions.NotFound as e:
2822 error_value = -vimconn.HTTP_Not_Found
2823 error_text = (
2824 type(e).__name__
2825 + ": "
2826 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2827 )
2828 except ksExceptions.ClientException as e: # TODO remove
2829 error_value = -vimconn.HTTP_Bad_Request
2830 error_text = (
2831 type(e).__name__
2832 + ": "
2833 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2834 )
2835
2836 # TODO insert exception vimconn.HTTP_Unauthorized
2837 # if reaching here is because an exception
2838 self.logger.debug("delete_tenant " + error_text)
2839
2840 return error_value, error_text
2841
2842 def get_hosts_info(self):
2843 """Get the information of deployed hosts
2844 Returns the hosts content"""
2845 if self.debug:
2846 print("osconnector: Getting Host info from VIM")
2847
2848 try:
2849 h_list = []
2850 self._reload_connection()
2851 hypervisors = self.nova.hypervisors.list()
2852
2853 for hype in hypervisors:
2854 h_list.append(hype.to_dict())
2855
2856 return 1, {"hosts": h_list}
2857 except nvExceptions.NotFound as e:
2858 error_value = -vimconn.HTTP_Not_Found
2859 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2860 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2861 error_value = -vimconn.HTTP_Bad_Request
2862 error_text = (
2863 type(e).__name__
2864 + ": "
2865 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2866 )
2867
2868 # TODO insert exception vimconn.HTTP_Unauthorized
2869 # if reaching here is because an exception
2870 self.logger.debug("get_hosts_info " + error_text)
2871
2872 return error_value, error_text
2873
2874 def get_hosts(self, vim_tenant):
2875 """Get the hosts and deployed instances
2876 Returns the hosts content"""
2877 r, hype_dict = self.get_hosts_info()
2878
2879 if r < 0:
2880 return r, hype_dict
2881
2882 hypervisors = hype_dict["hosts"]
2883
2884 try:
2885 servers = self.nova.servers.list()
2886 for hype in hypervisors:
2887 for server in servers:
2888 if (
2889 server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
2890 == hype["hypervisor_hostname"]
2891 ):
2892 if "vm" in hype:
2893 hype["vm"].append(server.id)
2894 else:
2895 hype["vm"] = [server.id]
2896
2897 return 1, hype_dict
2898 except nvExceptions.NotFound as e:
2899 error_value = -vimconn.HTTP_Not_Found
2900 error_text = str(e) if len(e.args) == 0 else str(e.args[0])
2901 except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
2902 error_value = -vimconn.HTTP_Bad_Request
2903 error_text = (
2904 type(e).__name__
2905 + ": "
2906 + (str(e) if len(e.args) == 0 else str(e.args[0]))
2907 )
2908
2909 # TODO insert exception vimconn.HTTP_Unauthorized
2910 # if reaching here is because an exception
2911 self.logger.debug("get_hosts " + error_text)
2912
2913 return error_value, error_text
2914
2915 def new_classification(self, name, ctype, definition):
2916 self.logger.debug(
2917 "Adding a new (Traffic) Classification to VIM, named %s", name
2918 )
2919
2920 try:
2921 new_class = None
2922 self._reload_connection()
2923
2924 if ctype not in supportedClassificationTypes:
2925 raise vimconn.VimConnNotSupportedException(
2926 "OpenStack VIM connector does not support provided "
2927 "Classification Type {}, supported ones are: {}".format(
2928 ctype, supportedClassificationTypes
2929 )
2930 )
2931
2932 if not self._validate_classification(ctype, definition):
2933 raise vimconn.VimConnException(
2934 "Incorrect Classification definition for the type specified."
2935 )
2936
2937 classification_dict = definition
2938 classification_dict["name"] = name
2939 new_class = self.neutron.create_sfc_flow_classifier(
2940 {"flow_classifier": classification_dict}
2941 )
2942
2943 return new_class["flow_classifier"]["id"]
2944 except (
2945 neExceptions.ConnectionFailed,
2946 ksExceptions.ClientException,
2947 neExceptions.NeutronException,
2948 ConnectionError,
2949 ) as e:
2950 self.logger.error("Creation of Classification failed.")
2951 self._format_exception(e)
2952
2953 def get_classification(self, class_id):
2954 self.logger.debug(" Getting Classification %s from VIM", class_id)
2955 filter_dict = {"id": class_id}
2956 class_list = self.get_classification_list(filter_dict)
2957
2958 if len(class_list) == 0:
2959 raise vimconn.VimConnNotFoundException(
2960 "Classification '{}' not found".format(class_id)
2961 )
2962 elif len(class_list) > 1:
2963 raise vimconn.VimConnConflictException(
2964 "Found more than one Classification with this criteria"
2965 )
2966
2967 classification = class_list[0]
2968
2969 return classification
2970
2971 def get_classification_list(self, filter_dict={}):
2972 self.logger.debug(
2973 "Getting Classifications from VIM filter: '%s'", str(filter_dict)
2974 )
2975
2976 try:
2977 filter_dict_os = filter_dict.copy()
2978 self._reload_connection()
2979
2980 if self.api_version3 and "tenant_id" in filter_dict_os:
2981 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
2982
2983 classification_dict = self.neutron.list_sfc_flow_classifiers(
2984 **filter_dict_os
2985 )
2986 classification_list = classification_dict["flow_classifiers"]
2987 self.__classification_os2mano(classification_list)
2988
2989 return classification_list
2990 except (
2991 neExceptions.ConnectionFailed,
2992 ksExceptions.ClientException,
2993 neExceptions.NeutronException,
2994 ConnectionError,
2995 ) as e:
2996 self._format_exception(e)
2997
2998 def delete_classification(self, class_id):
2999 self.logger.debug("Deleting Classification '%s' from VIM", class_id)
3000
3001 try:
3002 self._reload_connection()
3003 self.neutron.delete_sfc_flow_classifier(class_id)
3004
3005 return class_id
3006 except (
3007 neExceptions.ConnectionFailed,
3008 neExceptions.NeutronException,
3009 ksExceptions.ClientException,
3010 neExceptions.NeutronException,
3011 ConnectionError,
3012 ) as e:
3013 self._format_exception(e)
3014
3015 def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
3016 self.logger.debug(
3017 "Adding a new Service Function Instance to VIM, named '%s'", name
3018 )
3019
3020 try:
3021 new_sfi = None
3022 self._reload_connection()
3023 correlation = None
3024
3025 if sfc_encap:
3026 correlation = "nsh"
3027
3028 if len(ingress_ports) != 1:
3029 raise vimconn.VimConnNotSupportedException(
3030 "OpenStack VIM connector can only have 1 ingress port per SFI"
3031 )
3032
3033 if len(egress_ports) != 1:
3034 raise vimconn.VimConnNotSupportedException(
3035 "OpenStack VIM connector can only have 1 egress port per SFI"
3036 )
3037
3038 sfi_dict = {
3039 "name": name,
3040 "ingress": ingress_ports[0],
3041 "egress": egress_ports[0],
3042 "service_function_parameters": {"correlation": correlation},
3043 }
3044 new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
3045
3046 return new_sfi["port_pair"]["id"]
3047 except (
3048 neExceptions.ConnectionFailed,
3049 ksExceptions.ClientException,
3050 neExceptions.NeutronException,
3051 ConnectionError,
3052 ) as e:
3053 if new_sfi:
3054 try:
3055 self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
3056 except Exception:
3057 self.logger.error(
3058 "Creation of Service Function Instance failed, with "
3059 "subsequent deletion failure as well."
3060 )
3061
3062 self._format_exception(e)
3063
3064 def get_sfi(self, sfi_id):
3065 self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
3066 filter_dict = {"id": sfi_id}
3067 sfi_list = self.get_sfi_list(filter_dict)
3068
3069 if len(sfi_list) == 0:
3070 raise vimconn.VimConnNotFoundException(
3071 "Service Function Instance '{}' not found".format(sfi_id)
3072 )
3073 elif len(sfi_list) > 1:
3074 raise vimconn.VimConnConflictException(
3075 "Found more than one Service Function Instance with this criteria"
3076 )
3077
3078 sfi = sfi_list[0]
3079
3080 return sfi
3081
3082 def get_sfi_list(self, filter_dict={}):
3083 self.logger.debug(
3084 "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
3085 )
3086
3087 try:
3088 self._reload_connection()
3089 filter_dict_os = filter_dict.copy()
3090
3091 if self.api_version3 and "tenant_id" in filter_dict_os:
3092 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3093
3094 sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
3095 sfi_list = sfi_dict["port_pairs"]
3096 self.__sfi_os2mano(sfi_list)
3097
3098 return sfi_list
3099 except (
3100 neExceptions.ConnectionFailed,
3101 ksExceptions.ClientException,
3102 neExceptions.NeutronException,
3103 ConnectionError,
3104 ) as e:
3105 self._format_exception(e)
3106
3107 def delete_sfi(self, sfi_id):
3108 self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
3109
3110 try:
3111 self._reload_connection()
3112 self.neutron.delete_sfc_port_pair(sfi_id)
3113
3114 return sfi_id
3115 except (
3116 neExceptions.ConnectionFailed,
3117 neExceptions.NeutronException,
3118 ksExceptions.ClientException,
3119 neExceptions.NeutronException,
3120 ConnectionError,
3121 ) as e:
3122 self._format_exception(e)
3123
3124 def new_sf(self, name, sfis, sfc_encap=True):
3125 self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
3126
3127 try:
3128 new_sf = None
3129 self._reload_connection()
3130 # correlation = None
3131 # if sfc_encap:
3132 # correlation = "nsh"
3133
3134 for instance in sfis:
3135 sfi = self.get_sfi(instance)
3136
3137 if sfi.get("sfc_encap") != sfc_encap:
3138 raise vimconn.VimConnNotSupportedException(
3139 "OpenStack VIM connector requires all SFIs of the "
3140 "same SF to share the same SFC Encapsulation"
3141 )
3142
3143 sf_dict = {"name": name, "port_pairs": sfis}
3144 new_sf = self.neutron.create_sfc_port_pair_group(
3145 {"port_pair_group": sf_dict}
3146 )
3147
3148 return new_sf["port_pair_group"]["id"]
3149 except (
3150 neExceptions.ConnectionFailed,
3151 ksExceptions.ClientException,
3152 neExceptions.NeutronException,
3153 ConnectionError,
3154 ) as e:
3155 if new_sf:
3156 try:
3157 self.neutron.delete_sfc_port_pair_group(
3158 new_sf["port_pair_group"]["id"]
3159 )
3160 except Exception:
3161 self.logger.error(
3162 "Creation of Service Function failed, with "
3163 "subsequent deletion failure as well."
3164 )
3165
3166 self._format_exception(e)
3167
3168 def get_sf(self, sf_id):
3169 self.logger.debug("Getting Service Function %s from VIM", sf_id)
3170 filter_dict = {"id": sf_id}
3171 sf_list = self.get_sf_list(filter_dict)
3172
3173 if len(sf_list) == 0:
3174 raise vimconn.VimConnNotFoundException(
3175 "Service Function '{}' not found".format(sf_id)
3176 )
3177 elif len(sf_list) > 1:
3178 raise vimconn.VimConnConflictException(
3179 "Found more than one Service Function with this criteria"
3180 )
3181
3182 sf = sf_list[0]
3183
3184 return sf
3185
3186 def get_sf_list(self, filter_dict={}):
3187 self.logger.debug(
3188 "Getting Service Function from VIM filter: '%s'", str(filter_dict)
3189 )
3190
3191 try:
3192 self._reload_connection()
3193 filter_dict_os = filter_dict.copy()
3194
3195 if self.api_version3 and "tenant_id" in filter_dict_os:
3196 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3197
3198 sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
3199 sf_list = sf_dict["port_pair_groups"]
3200 self.__sf_os2mano(sf_list)
3201
3202 return sf_list
3203 except (
3204 neExceptions.ConnectionFailed,
3205 ksExceptions.ClientException,
3206 neExceptions.NeutronException,
3207 ConnectionError,
3208 ) as e:
3209 self._format_exception(e)
3210
3211 def delete_sf(self, sf_id):
3212 self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
3213
3214 try:
3215 self._reload_connection()
3216 self.neutron.delete_sfc_port_pair_group(sf_id)
3217
3218 return sf_id
3219 except (
3220 neExceptions.ConnectionFailed,
3221 neExceptions.NeutronException,
3222 ksExceptions.ClientException,
3223 neExceptions.NeutronException,
3224 ConnectionError,
3225 ) as e:
3226 self._format_exception(e)
3227
3228 def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
3229 self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
3230
3231 try:
3232 new_sfp = None
3233 self._reload_connection()
3234 # In networking-sfc the MPLS encapsulation is legacy
3235 # should be used when no full SFC Encapsulation is intended
3236 correlation = "mpls"
3237
3238 if sfc_encap:
3239 correlation = "nsh"
3240
3241 sfp_dict = {
3242 "name": name,
3243 "flow_classifiers": classifications,
3244 "port_pair_groups": sfs,
3245 "chain_parameters": {"correlation": correlation},
3246 }
3247
3248 if spi:
3249 sfp_dict["chain_id"] = spi
3250
3251 new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
3252
3253 return new_sfp["port_chain"]["id"]
3254 except (
3255 neExceptions.ConnectionFailed,
3256 ksExceptions.ClientException,
3257 neExceptions.NeutronException,
3258 ConnectionError,
3259 ) as e:
3260 if new_sfp:
3261 try:
3262 self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
3263 except Exception:
3264 self.logger.error(
3265 "Creation of Service Function Path failed, with "
3266 "subsequent deletion failure as well."
3267 )
3268
3269 self._format_exception(e)
3270
3271 def get_sfp(self, sfp_id):
3272 self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
3273
3274 filter_dict = {"id": sfp_id}
3275 sfp_list = self.get_sfp_list(filter_dict)
3276
3277 if len(sfp_list) == 0:
3278 raise vimconn.VimConnNotFoundException(
3279 "Service Function Path '{}' not found".format(sfp_id)
3280 )
3281 elif len(sfp_list) > 1:
3282 raise vimconn.VimConnConflictException(
3283 "Found more than one Service Function Path with this criteria"
3284 )
3285
3286 sfp = sfp_list[0]
3287
3288 return sfp
3289
3290 def get_sfp_list(self, filter_dict={}):
3291 self.logger.debug(
3292 "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
3293 )
3294
3295 try:
3296 self._reload_connection()
3297 filter_dict_os = filter_dict.copy()
3298
3299 if self.api_version3 and "tenant_id" in filter_dict_os:
3300 filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
3301
3302 sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
3303 sfp_list = sfp_dict["port_chains"]
3304 self.__sfp_os2mano(sfp_list)
3305
3306 return sfp_list
3307 except (
3308 neExceptions.ConnectionFailed,
3309 ksExceptions.ClientException,
3310 neExceptions.NeutronException,
3311 ConnectionError,
3312 ) as e:
3313 self._format_exception(e)
3314
3315 def delete_sfp(self, sfp_id):
3316 self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
3317
3318 try:
3319 self._reload_connection()
3320 self.neutron.delete_sfc_port_chain(sfp_id)
3321
3322 return sfp_id
3323 except (
3324 neExceptions.ConnectionFailed,
3325 neExceptions.NeutronException,
3326 ksExceptions.ClientException,
3327 neExceptions.NeutronException,
3328 ConnectionError,
3329 ) as e:
3330 self._format_exception(e)
3331
3332 def refresh_sfps_status(self, sfp_list):
3333 """Get the status of the service function path
3334 Params: the list of sfp identifiers
3335 Returns a dictionary with:
3336 vm_id: #VIM id of this service function path
3337 status: #Mandatory. Text with one of:
3338 # DELETED (not found at vim)
3339 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3340 # OTHER (Vim reported other status not understood)
3341 # ERROR (VIM indicates an ERROR status)
3342 # ACTIVE,
3343 # CREATING (on building process)
3344 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3345 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
3346 """
3347 sfp_dict = {}
3348 self.logger.debug(
3349 "refresh_sfps status: Getting tenant SFP information from VIM"
3350 )
3351
3352 for sfp_id in sfp_list:
3353 sfp = {}
3354
3355 try:
3356 sfp_vim = self.get_sfp(sfp_id)
3357
3358 if sfp_vim["spi"]:
3359 sfp["status"] = vmStatus2manoFormat["ACTIVE"]
3360 else:
3361 sfp["status"] = "OTHER"
3362 sfp["error_msg"] = "VIM status reported " + sfp["status"]
3363
3364 sfp["vim_info"] = self.serialize(sfp_vim)
3365
3366 if sfp_vim.get("fault"):
3367 sfp["error_msg"] = str(sfp_vim["fault"])
3368 except vimconn.VimConnNotFoundException as e:
3369 self.logger.error("Exception getting sfp status: %s", str(e))
3370 sfp["status"] = "DELETED"
3371 sfp["error_msg"] = str(e)
3372 except vimconn.VimConnException as e:
3373 self.logger.error("Exception getting sfp status: %s", str(e))
3374 sfp["status"] = "VIM_ERROR"
3375 sfp["error_msg"] = str(e)
3376
3377 sfp_dict[sfp_id] = sfp
3378
3379 return sfp_dict
3380
3381 def refresh_sfis_status(self, sfi_list):
3382 """Get the status of the service function instances
3383 Params: the list of sfi identifiers
3384 Returns a dictionary with:
3385 vm_id: #VIM id of this service function instance
3386 status: #Mandatory. Text with one of:
3387 # DELETED (not found at vim)
3388 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3389 # OTHER (Vim reported other status not understood)
3390 # ERROR (VIM indicates an ERROR status)
3391 # ACTIVE,
3392 # CREATING (on building process)
3393 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3394 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3395 """
3396 sfi_dict = {}
3397 self.logger.debug(
3398 "refresh_sfis status: Getting tenant sfi information from VIM"
3399 )
3400
3401 for sfi_id in sfi_list:
3402 sfi = {}
3403
3404 try:
3405 sfi_vim = self.get_sfi(sfi_id)
3406
3407 if sfi_vim:
3408 sfi["status"] = vmStatus2manoFormat["ACTIVE"]
3409 else:
3410 sfi["status"] = "OTHER"
3411 sfi["error_msg"] = "VIM status reported " + sfi["status"]
3412
3413 sfi["vim_info"] = self.serialize(sfi_vim)
3414
3415 if sfi_vim.get("fault"):
3416 sfi["error_msg"] = str(sfi_vim["fault"])
3417 except vimconn.VimConnNotFoundException as e:
3418 self.logger.error("Exception getting sfi status: %s", str(e))
3419 sfi["status"] = "DELETED"
3420 sfi["error_msg"] = str(e)
3421 except vimconn.VimConnException as e:
3422 self.logger.error("Exception getting sfi status: %s", str(e))
3423 sfi["status"] = "VIM_ERROR"
3424 sfi["error_msg"] = str(e)
3425
3426 sfi_dict[sfi_id] = sfi
3427
3428 return sfi_dict
3429
3430 def refresh_sfs_status(self, sf_list):
3431 """Get the status of the service functions
3432 Params: the list of sf identifiers
3433 Returns a dictionary with:
3434 vm_id: #VIM id of this service function
3435 status: #Mandatory. Text with one of:
3436 # DELETED (not found at vim)
3437 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3438 # OTHER (Vim reported other status not understood)
3439 # ERROR (VIM indicates an ERROR status)
3440 # ACTIVE,
3441 # CREATING (on building process)
3442 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3443 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3444 """
3445 sf_dict = {}
3446 self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
3447
3448 for sf_id in sf_list:
3449 sf = {}
3450
3451 try:
3452 sf_vim = self.get_sf(sf_id)
3453
3454 if sf_vim:
3455 sf["status"] = vmStatus2manoFormat["ACTIVE"]
3456 else:
3457 sf["status"] = "OTHER"
3458 sf["error_msg"] = "VIM status reported " + sf_vim["status"]
3459
3460 sf["vim_info"] = self.serialize(sf_vim)
3461
3462 if sf_vim.get("fault"):
3463 sf["error_msg"] = str(sf_vim["fault"])
3464 except vimconn.VimConnNotFoundException as e:
3465 self.logger.error("Exception getting sf status: %s", str(e))
3466 sf["status"] = "DELETED"
3467 sf["error_msg"] = str(e)
3468 except vimconn.VimConnException as e:
3469 self.logger.error("Exception getting sf status: %s", str(e))
3470 sf["status"] = "VIM_ERROR"
3471 sf["error_msg"] = str(e)
3472
3473 sf_dict[sf_id] = sf
3474
3475 return sf_dict
3476
3477 def refresh_classifications_status(self, classification_list):
3478 """Get the status of the classifications
3479 Params: the list of classification identifiers
3480 Returns a dictionary with:
3481 vm_id: #VIM id of this classifier
3482 status: #Mandatory. Text with one of:
3483 # DELETED (not found at vim)
3484 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3485 # OTHER (Vim reported other status not understood)
3486 # ERROR (VIM indicates an ERROR status)
3487 # ACTIVE,
3488 # CREATING (on building process)
3489 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3490 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3491 """
3492 classification_dict = {}
3493 self.logger.debug(
3494 "refresh_classifications status: Getting tenant classification information from VIM"
3495 )
3496
3497 for classification_id in classification_list:
3498 classification = {}
3499
3500 try:
3501 classification_vim = self.get_classification(classification_id)
3502
3503 if classification_vim:
3504 classification["status"] = vmStatus2manoFormat["ACTIVE"]
3505 else:
3506 classification["status"] = "OTHER"
3507 classification["error_msg"] = (
3508 "VIM status reported " + classification["status"]
3509 )
3510
3511 classification["vim_info"] = self.serialize(classification_vim)
3512
3513 if classification_vim.get("fault"):
3514 classification["error_msg"] = str(classification_vim["fault"])
3515 except vimconn.VimConnNotFoundException as e:
3516 self.logger.error("Exception getting classification status: %s", str(e))
3517 classification["status"] = "DELETED"
3518 classification["error_msg"] = str(e)
3519 except vimconn.VimConnException as e:
3520 self.logger.error("Exception getting classification status: %s", str(e))
3521 classification["status"] = "VIM_ERROR"
3522 classification["error_msg"] = str(e)
3523
3524 classification_dict[classification_id] = classification
3525
3526 return classification_dict
3527
3528 def new_affinity_group(self, affinity_group_data):
3529 """Adds a server group to VIM
3530 affinity_group_data contains a dictionary with information, keys:
3531 name: name in VIM for the server group
3532 type: affinity or anti-affinity
3533 scope: Only nfvi-node allowed
3534 Returns the server group identifier"""
3535 self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
3536
3537 try:
3538 name = affinity_group_data["name"]
3539 policy = affinity_group_data["type"]
3540
3541 self._reload_connection()
3542 new_server_group = self.nova.server_groups.create(name, policy)
3543
3544 return new_server_group.id
3545 except (
3546 ksExceptions.ClientException,
3547 nvExceptions.ClientException,
3548 ConnectionError,
3549 KeyError,
3550 ) as e:
3551 self._format_exception(e)
3552
3553 def get_affinity_group(self, affinity_group_id):
3554 """Obtain server group details from the VIM. Returns the server group detais as a dict"""
3555 self.logger.debug("Getting flavor '%s'", affinity_group_id)
3556 try:
3557 self._reload_connection()
3558 server_group = self.nova.server_groups.find(id=affinity_group_id)
3559
3560 return server_group.to_dict()
3561 except (
3562 nvExceptions.NotFound,
3563 nvExceptions.ClientException,
3564 ksExceptions.ClientException,
3565 ConnectionError,
3566 ) as e:
3567 self._format_exception(e)
3568
3569 def delete_affinity_group(self, affinity_group_id):
3570 """Deletes a server group from the VIM. Returns the old affinity_group_id"""
3571 self.logger.debug("Getting server group '%s'", affinity_group_id)
3572 try:
3573 self._reload_connection()
3574 self.nova.server_groups.delete(affinity_group_id)
3575
3576 return affinity_group_id
3577 except (
3578 nvExceptions.NotFound,
3579 ksExceptions.ClientException,
3580 nvExceptions.ClientException,
3581 ConnectionError,
3582 ) as e:
3583 self._format_exception(e)
3584
3585 def get_vdu_state(self, vm_id):
3586 """
3587 Getting the state of a vdu
3588 param:
3589 vm_id: ID of an instance
3590 """
3591 self.logger.debug("Getting the status of VM")
3592 self.logger.debug("VIM VM ID %s", vm_id)
3593 self._reload_connection()
3594 server = self.nova.servers.find(id=vm_id)
3595 server_dict = server.to_dict()
3596 vdu_data = [
3597 server_dict["status"],
3598 server_dict["flavor"]["id"],
3599 server_dict["OS-EXT-SRV-ATTR:host"],
3600 server_dict["OS-EXT-AZ:availability_zone"],
3601 ]
3602 self.logger.debug("vdu_data %s", vdu_data)
3603 return vdu_data
3604
3605 def check_compute_availability(self, host, server_flavor_details):
3606 self._reload_connection()
3607 hypervisor_search = self.nova.hypervisors.search(
3608 hypervisor_match=host, servers=True
3609 )
3610 for hypervisor in hypervisor_search:
3611 hypervisor_id = hypervisor.to_dict()["id"]
3612 hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
3613 hypervisor_dict = hypervisor_details.to_dict()
3614 hypervisor_temp = json.dumps(hypervisor_dict)
3615 hypervisor_json = json.loads(hypervisor_temp)
3616 resources_available = [
3617 hypervisor_json["free_ram_mb"],
3618 hypervisor_json["disk_available_least"],
3619 hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
3620 ]
3621 compute_available = all(
3622 x > y for x, y in zip(resources_available, server_flavor_details)
3623 )
3624 if compute_available:
3625 return host
3626
3627 def check_availability_zone(
3628 self, old_az, server_flavor_details, old_host, host=None
3629 ):
3630 self._reload_connection()
3631 az_check = {"zone_check": False, "compute_availability": None}
3632 aggregates_list = self.nova.aggregates.list()
3633 for aggregate in aggregates_list:
3634 aggregate_details = aggregate.to_dict()
3635 aggregate_temp = json.dumps(aggregate_details)
3636 aggregate_json = json.loads(aggregate_temp)
3637 if aggregate_json["availability_zone"] == old_az:
3638 hosts_list = aggregate_json["hosts"]
3639 if host is not None:
3640 if host in hosts_list:
3641 az_check["zone_check"] = True
3642 available_compute_id = self.check_compute_availability(
3643 host, server_flavor_details
3644 )
3645 if available_compute_id is not None:
3646 az_check["compute_availability"] = available_compute_id
3647 else:
3648 for check_host in hosts_list:
3649 if check_host != old_host:
3650 available_compute_id = self.check_compute_availability(
3651 check_host, server_flavor_details
3652 )
3653 if available_compute_id is not None:
3654 az_check["zone_check"] = True
3655 az_check["compute_availability"] = available_compute_id
3656 break
3657 else:
3658 az_check["zone_check"] = True
3659 return az_check
3660
3661 def migrate_instance(self, vm_id, compute_host=None):
3662 """
3663 Migrate a vdu
3664 param:
3665 vm_id: ID of an instance
3666 compute_host: Host to migrate the vdu to
3667 """
3668 self._reload_connection()
3669 vm_state = False
3670 instance_state = self.get_vdu_state(vm_id)
3671 server_flavor_id = instance_state[1]
3672 server_hypervisor_name = instance_state[2]
3673 server_availability_zone = instance_state[3]
3674 try:
3675 server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
3676 server_flavor_details = [
3677 server_flavor["ram"],
3678 server_flavor["disk"],
3679 server_flavor["vcpus"],
3680 ]
3681 if compute_host == server_hypervisor_name:
3682 raise vimconn.VimConnException(
3683 "Unable to migrate instance '{}' to the same host '{}'".format(
3684 vm_id, compute_host
3685 ),
3686 http_code=vimconn.HTTP_Bad_Request,
3687 )
3688 az_status = self.check_availability_zone(
3689 server_availability_zone,
3690 server_flavor_details,
3691 server_hypervisor_name,
3692 compute_host,
3693 )
3694 availability_zone_check = az_status["zone_check"]
3695 available_compute_id = az_status.get("compute_availability")
3696
3697 if availability_zone_check is False:
3698 raise vimconn.VimConnException(
3699 "Unable to migrate instance '{}' to a different availability zone".format(
3700 vm_id
3701 ),
3702 http_code=vimconn.HTTP_Bad_Request,
3703 )
3704 if available_compute_id is not None:
3705 self.nova.servers.live_migrate(
3706 server=vm_id,
3707 host=available_compute_id,
3708 block_migration=True,
3709 disk_over_commit=False,
3710 )
3711 state = "MIGRATING"
3712 changed_compute_host = ""
3713 if state == "MIGRATING":
3714 vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
3715 changed_compute_host = self.get_vdu_state(vm_id)[2]
3716 if vm_state and changed_compute_host == available_compute_id:
3717 self.logger.debug(
3718 "Instance '{}' migrated to the new compute host '{}'".format(
3719 vm_id, changed_compute_host
3720 )
3721 )
3722 return state, available_compute_id
3723 else:
3724 raise vimconn.VimConnException(
3725 "Migration Failed. Instance '{}' not moved to the new host {}".format(
3726 vm_id, available_compute_id
3727 ),
3728 http_code=vimconn.HTTP_Bad_Request,
3729 )
3730 else:
3731 raise vimconn.VimConnException(
3732 "Compute '{}' not available or does not have enough resources to migrate the instance".format(
3733 available_compute_id
3734 ),
3735 http_code=vimconn.HTTP_Bad_Request,
3736 )
3737 except (
3738 nvExceptions.BadRequest,
3739 nvExceptions.ClientException,
3740 nvExceptions.NotFound,
3741 ) as e:
3742 self._format_exception(e)
3743
3744 def resize_instance(self, vm_id, new_flavor_id):
3745 """
3746 For resizing the vm based on the given
3747 flavor details
3748 param:
3749 vm_id : ID of an instance
3750 new_flavor_id : Flavor id to be resized
3751 Return the status of a resized instance
3752 """
3753 self._reload_connection()
3754 self.logger.debug("resize the flavor of an instance")
3755 instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
3756 old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
3757 new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
3758 try:
3759 if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
3760 if old_flavor_disk > new_flavor_disk:
3761 raise nvExceptions.BadRequest(
3762 400,
3763 message="Server disk resize failed. Resize to lower disk flavor is not allowed",
3764 )
3765 else:
3766 self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
3767 vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
3768 if vm_state:
3769 instance_resized_status = self.confirm_resize(vm_id)
3770 return instance_resized_status
3771 else:
3772 raise nvExceptions.BadRequest(
3773 409,
3774 message="Cannot 'resize' vm_state is in ERROR",
3775 )
3776
3777 else:
3778 self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
3779 raise nvExceptions.BadRequest(
3780 409,
3781 message="Cannot 'resize' instance while it is in vm_state resized",
3782 )
3783 except (
3784 nvExceptions.BadRequest,
3785 nvExceptions.ClientException,
3786 nvExceptions.NotFound,
3787 ) as e:
3788 self._format_exception(e)
3789
3790 def confirm_resize(self, vm_id):
3791 """
3792 Confirm the resize of an instance
3793 param:
3794 vm_id: ID of an instance
3795 """
3796 self._reload_connection()
3797 self.nova.servers.confirm_resize(server=vm_id)
3798 if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
3799 self.__wait_for_vm(vm_id, "ACTIVE")
3800 instance_status = self.get_vdu_state(vm_id)[0]
3801 return instance_status