a3c25ca019000ee2183aaf43652d61fc1767f7c0
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 from lxml import etree as lxmlElementTree
29 from osm_ro_plugin import vimconn
30 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
31 from pyVim.connect import SmartConnect, Disconnect
32 from pyVmomi import vim, vmodl # @UnresolvedImport
33 from pyvcloud.vcd.client import BasicLoginCredentials, Client
34 from pyvcloud.vcd.org import Org
35 from pyvcloud.vcd.vapp import VApp
36 from pyvcloud.vcd.vdc import VDC
37 from xml.etree import ElementTree as XmlElementTree
38 from xml.sax.saxutils import escape
39 import atexit
40 import hashlib
41 import json
42 import logging
43 import netaddr
44 import os
45 import random
46 import re
47 import requests
48 import shutil
49 import socket
50 import ssl
51 import struct
52 import subprocess
53 import tempfile
54 import time
55 import traceback
56 import uuid
57 import yaml
58
59 # global variable for vcd connector type
60 STANDALONE = "standalone"
61
62 # key for flavor dicts
63 FLAVOR_RAM_KEY = "ram"
64 FLAVOR_VCPUS_KEY = "vcpus"
65 FLAVOR_DISK_KEY = "disk"
66 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
67 # global variable for wait time
68 INTERVAL_TIME = 5
69 MAX_WAIT_TIME = 1800
70
71 API_VERSION = "27.0"
72
73 # -1: "Could not be created",
74 # 0: "Unresolved",
75 # 1: "Resolved",
76 # 2: "Deployed",
77 # 3: "Suspended",
78 # 4: "Powered on",
79 # 5: "Waiting for user input",
80 # 6: "Unknown state",
81 # 7: "Unrecognized state",
82 # 8: "Powered off",
83 # 9: "Inconsistent state",
84 # 10: "Children do not all have the same status",
85 # 11: "Upload initiated, OVF descriptor pending",
86 # 12: "Upload initiated, copying contents",
87 # 13: "Upload initiated , disk contents pending",
88 # 14: "Upload has been quarantined",
89 # 15: "Upload quarantine period has expired"
90
91 # mapping vCD status to MANO
92 vcdStatusCode2manoFormat = {
93 4: "ACTIVE",
94 7: "PAUSED",
95 3: "SUSPENDED",
96 8: "INACTIVE",
97 12: "BUILD",
98 -1: "ERROR",
99 14: "DELETED",
100 }
101
102 #
103 netStatus2manoFormat = {
104 "ACTIVE": "ACTIVE",
105 "PAUSED": "PAUSED",
106 "INACTIVE": "INACTIVE",
107 "BUILD": "BUILD",
108 "ERROR": "ERROR",
109 "DELETED": "DELETED",
110 }
111
112
113 class vimconnector(vimconn.VimConnector):
114 # dict used to store flavor in memory
115 flavorlist = {}
116
117 def __init__(
118 self,
119 uuid=None,
120 name=None,
121 tenant_id=None,
122 tenant_name=None,
123 url=None,
124 url_admin=None,
125 user=None,
126 passwd=None,
127 log_level=None,
128 config={},
129 persistent_info={},
130 ):
131 """
132 Constructor create vmware connector to vCloud director.
133
134 By default construct doesn't validate connection state. So client can create object with None arguments.
135 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
136
137 a) It initialize organization UUID
138 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
139
140 Args:
141 uuid - is organization uuid.
142 name - is organization name that must be presented in vCloud director.
143 tenant_id - is VDC uuid it must be presented in vCloud director
144 tenant_name - is VDC name.
145 url - is hostname or ip address of vCloud director
146 url_admin - same as above.
147 user - is user that administrator for organization. Caller must make sure that
148 username has right privileges.
149
150 password - is password for a user.
151
152 VMware connector also requires PVDC administrative privileges and separate account.
153 This variables must be passed via config argument dict contains keys
154
155 dict['admin_username']
156 dict['admin_password']
157 config - Provide NSX and vCenter information
158
159 Returns:
160 Nothing.
161 """
162
163 vimconn.VimConnector.__init__(
164 self,
165 uuid,
166 name,
167 tenant_id,
168 tenant_name,
169 url,
170 url_admin,
171 user,
172 passwd,
173 log_level,
174 config,
175 )
176
177 self.logger = logging.getLogger("ro.vim.vmware")
178 self.logger.setLevel(10)
179 self.persistent_info = persistent_info
180
181 self.name = name
182 self.id = uuid
183 self.url = url
184 self.url_admin = url_admin
185 self.tenant_id = tenant_id
186 self.tenant_name = tenant_name
187 self.user = user
188 self.passwd = passwd
189 self.config = config
190 self.admin_password = None
191 self.admin_user = None
192 self.org_name = ""
193 self.nsx_manager = None
194 self.nsx_user = None
195 self.nsx_password = None
196 self.availability_zone = None
197
198 # Disable warnings from self-signed certificates.
199 requests.packages.urllib3.disable_warnings()
200
201 if tenant_name is not None:
202 orgnameandtenant = tenant_name.split(":")
203
204 if len(orgnameandtenant) == 2:
205 self.tenant_name = orgnameandtenant[1]
206 self.org_name = orgnameandtenant[0]
207 else:
208 self.tenant_name = tenant_name
209
210 if "orgname" in config:
211 self.org_name = config["orgname"]
212
213 if log_level:
214 self.logger.setLevel(getattr(logging, log_level))
215
216 try:
217 self.admin_user = config["admin_username"]
218 self.admin_password = config["admin_password"]
219 except KeyError:
220 raise vimconn.VimConnException(
221 message="Error admin username or admin password is empty."
222 )
223
224 try:
225 self.nsx_manager = config["nsx_manager"]
226 self.nsx_user = config["nsx_user"]
227 self.nsx_password = config["nsx_password"]
228 except KeyError:
229 raise vimconn.VimConnException(
230 message="Error: nsx manager or nsx user or nsx password is empty in Config"
231 )
232
233 self.vcenter_ip = config.get("vcenter_ip", None)
234 self.vcenter_port = config.get("vcenter_port", None)
235 self.vcenter_user = config.get("vcenter_user", None)
236 self.vcenter_password = config.get("vcenter_password", None)
237
238 # Set availability zone for Affinity rules
239 self.availability_zone = self.set_availability_zones()
240
241 # ############# Stub code for SRIOV #################
242 # try:
243 # self.dvs_name = config['dv_switch_name']
244 # except KeyError:
245 # raise vimconn.VimConnException(message="Error:
246 # distributed virtaul switch name is empty in Config")
247 #
248 # self.vlanID_range = config.get("vlanID_range", None)
249
250 self.org_uuid = None
251 self.client = None
252
253 if not url:
254 raise vimconn.VimConnException("url param can not be NoneType")
255
256 if not self.url_admin: # try to use normal url
257 self.url_admin = self.url
258
259 logging.debug(
260 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
261 self.id, self.org_name, self.tenant_id, self.tenant_name
262 )
263 )
264 logging.debug(
265 "vcd url {} vcd username: {} vcd password: {}".format(
266 self.url, self.user, self.passwd
267 )
268 )
269 logging.debug(
270 "vcd admin username {} vcd admin passowrd {}".format(
271 self.admin_user, self.admin_password
272 )
273 )
274
275 # initialize organization
276 if self.user is not None and self.passwd is not None and self.url:
277 self.init_organization()
278
279 def __getitem__(self, index):
280 if index == "name":
281 return self.name
282
283 if index == "tenant_id":
284 return self.tenant_id
285
286 if index == "tenant_name":
287 return self.tenant_name
288 elif index == "id":
289 return self.id
290 elif index == "org_name":
291 return self.org_name
292 elif index == "org_uuid":
293 return self.org_uuid
294 elif index == "user":
295 return self.user
296 elif index == "passwd":
297 return self.passwd
298 elif index == "url":
299 return self.url
300 elif index == "url_admin":
301 return self.url_admin
302 elif index == "config":
303 return self.config
304 else:
305 raise KeyError("Invalid key '{}'".format(index))
306
307 def __setitem__(self, index, value):
308 if index == "name":
309 self.name = value
310
311 if index == "tenant_id":
312 self.tenant_id = value
313
314 if index == "tenant_name":
315 self.tenant_name = value
316 elif index == "id":
317 self.id = value
318 elif index == "org_name":
319 self.org_name = value
320 elif index == "org_uuid":
321 self.org_uuid = value
322 elif index == "user":
323 self.user = value
324 elif index == "passwd":
325 self.passwd = value
326 elif index == "url":
327 self.url = value
328 elif index == "url_admin":
329 self.url_admin = value
330 else:
331 raise KeyError("Invalid key '{}'".format(index))
332
333 def connect_as_admin(self):
334 """Method connect as pvdc admin user to vCloud director.
335 There are certain action that can be done only by provider vdc admin user.
336 Organization creation / provider network creation etc.
337
338 Returns:
339 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
340 """
341 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
342
343 try:
344 host = self.url
345 org = "System"
346 client_as_admin = Client(
347 host, verify_ssl_certs=False, api_version=API_VERSION
348 )
349 client_as_admin.set_credentials(
350 BasicLoginCredentials(self.admin_user, org, self.admin_password)
351 )
352 except Exception as e:
353 raise vimconn.VimConnException(
354 "Can't connect to vCloud director as: {} with exception {}".format(
355 self.admin_user, e
356 )
357 )
358
359 return client_as_admin
360
361 def connect(self):
362 """Method connect as normal user to vCloud director.
363
364 Returns:
365 The return client object that latter can be used to connect to vCloud director as admin for VDC
366 """
367 try:
368 self.logger.debug(
369 "Logging into vCD {} as {} to datacenter {}.".format(
370 self.org_name, self.user, self.org_name
371 )
372 )
373 host = self.url
374 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
375 client.set_credentials(
376 BasicLoginCredentials(self.user, self.org_name, self.passwd)
377 )
378 except Exception as e:
379 raise vimconn.VimConnConnectionException(
380 "Can't connect to vCloud director org: "
381 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
382 )
383
384 return client
385
386 def init_organization(self):
387 """Method initialize organization UUID and VDC parameters.
388
389 At bare minimum client must provide organization name that present in vCloud director and VDC.
390
391 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
392 The Org - UUID will be initialized at the run time if data center present in vCloud director.
393
394 Returns:
395 The return vca object that letter can be used to connect to vcloud direct as admin
396 """
397 client = self.connect()
398
399 if not client:
400 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
401
402 self.client = client
403 try:
404 if self.org_uuid is None:
405 org_list = client.get_org_list()
406 for org in org_list.Org:
407 # we set org UUID at the init phase but we can do it only when we have valid credential.
408 if org.get("name") == self.org_name:
409 self.org_uuid = org.get("href").split("/")[-1]
410 self.logger.debug(
411 "Setting organization UUID {}".format(self.org_uuid)
412 )
413 break
414 else:
415 raise vimconn.VimConnException(
416 "Vcloud director organization {} not found".format(
417 self.org_name
418 )
419 )
420
421 # if well good we require for org details
422 org_details_dict = self.get_org(org_uuid=self.org_uuid)
423
424 # we have two case if we want to initialize VDC ID or VDC name at run time
425 # tenant_name provided but no tenant id
426 if (
427 self.tenant_id is None
428 and self.tenant_name is not None
429 and "vdcs" in org_details_dict
430 ):
431 vdcs_dict = org_details_dict["vdcs"]
432 for vdc in vdcs_dict:
433 if vdcs_dict[vdc] == self.tenant_name:
434 self.tenant_id = vdc
435 self.logger.debug(
436 "Setting vdc uuid {} for organization UUID {}".format(
437 self.tenant_id, self.org_name
438 )
439 )
440 break
441 else:
442 raise vimconn.VimConnException(
443 "Tenant name indicated but not present in vcloud director."
444 )
445
446 # case two we have tenant_id but we don't have tenant name so we find and set it.
447 if (
448 self.tenant_id is not None
449 and self.tenant_name is None
450 and "vdcs" in org_details_dict
451 ):
452 vdcs_dict = org_details_dict["vdcs"]
453 for vdc in vdcs_dict:
454 if vdc == self.tenant_id:
455 self.tenant_name = vdcs_dict[vdc]
456 self.logger.debug(
457 "Setting vdc uuid {} for organization UUID {}".format(
458 self.tenant_id, self.org_name
459 )
460 )
461 break
462 else:
463 raise vimconn.VimConnException(
464 "Tenant id indicated but not present in vcloud director"
465 )
466
467 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
468 except Exception as e:
469 self.logger.debug(
470 "Failed initialize organization UUID for org {}: {}".format(
471 self.org_name, e
472 ),
473 )
474 self.logger.debug(traceback.format_exc())
475 self.org_uuid = None
476
477 def new_tenant(self, tenant_name=None, tenant_description=None):
478 """Method adds a new tenant to VIM with this name.
479 This action requires access to create VDC action in vCloud director.
480
481 Args:
482 tenant_name is tenant_name to be created.
483 tenant_description not used for this call
484
485 Return:
486 returns the tenant identifier in UUID format.
487 If action is failed method will throw vimconn.VimConnException method
488 """
489 vdc_task = self.create_vdc(vdc_name=tenant_name)
490 if vdc_task is not None:
491 vdc_uuid, _ = vdc_task.popitem()
492 self.logger.info(
493 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
494 )
495
496 return vdc_uuid
497 else:
498 raise vimconn.VimConnException(
499 "Failed create tenant {}".format(tenant_name)
500 )
501
502 def delete_tenant(self, tenant_id=None):
503 """Delete a tenant from VIM
504 Args:
505 tenant_id is tenant_id to be deleted.
506
507 Return:
508 returns the tenant identifier in UUID format.
509 If action is failed method will throw exception
510 """
511 vca = self.connect_as_admin()
512 if not vca:
513 raise vimconn.VimConnConnectionException("Failed to connect vCD")
514
515 if tenant_id is not None:
516 if vca._session:
517 # Get OrgVDC
518 url_list = [self.url, "/api/vdc/", tenant_id]
519 orgvdc_herf = "".join(url_list)
520
521 headers = {
522 "Accept": "application/*+xml;version=" + API_VERSION,
523 "x-vcloud-authorization": vca._session.headers[
524 "x-vcloud-authorization"
525 ],
526 }
527 response = self.perform_request(
528 req_type="GET", url=orgvdc_herf, headers=headers
529 )
530
531 if response.status_code != requests.codes.ok:
532 self.logger.debug(
533 "delete_tenant():GET REST API call {} failed. "
534 "Return status code {}".format(
535 orgvdc_herf, response.status_code
536 )
537 )
538
539 raise vimconn.VimConnNotFoundException(
540 "Fail to get tenant {}".format(tenant_id)
541 )
542
543 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
544 namespaces = {
545 prefix: uri
546 for prefix, uri in lxmlroot_respond.nsmap.items()
547 if prefix
548 }
549 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
550 vdc_remove_href = lxmlroot_respond.find(
551 "xmlns:Link[@rel='remove']", namespaces
552 ).attrib["href"]
553 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
554
555 response = self.perform_request(
556 req_type="DELETE", url=vdc_remove_href, headers=headers
557 )
558
559 if response.status_code == 202:
560 time.sleep(5)
561
562 return tenant_id
563 else:
564 self.logger.debug(
565 "delete_tenant(): DELETE REST API call {} failed. "
566 "Return status code {}".format(
567 vdc_remove_href, response.status_code
568 )
569 )
570
571 raise vimconn.VimConnException(
572 "Fail to delete tenant with ID {}".format(tenant_id)
573 )
574 else:
575 self.logger.debug(
576 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
577 )
578
579 raise vimconn.VimConnNotFoundException(
580 "Fail to get tenant {}".format(tenant_id)
581 )
582
583 def get_tenant_list(self, filter_dict={}):
584 """Obtain tenants of VIM
585 filter_dict can contain the following keys:
586 name: filter by tenant name
587 id: filter by tenant uuid/id
588 <other VIM specific>
589 Returns the tenant list of dictionaries:
590 [{'name':'<name>, 'id':'<id>, ...}, ...]
591
592 """
593 org_dict = self.get_org(self.org_uuid)
594 vdcs_dict = org_dict["vdcs"]
595
596 vdclist = []
597 try:
598 for k in vdcs_dict:
599 entry = {"name": vdcs_dict[k], "id": k}
600 # if caller didn't specify dictionary we return all tenants.
601
602 if filter_dict is not None and filter_dict:
603 filtered_entry = entry.copy()
604 filtered_dict = set(entry.keys()) - set(filter_dict)
605
606 for unwanted_key in filtered_dict:
607 del entry[unwanted_key]
608
609 if filter_dict == entry:
610 vdclist.append(filtered_entry)
611 else:
612 vdclist.append(entry)
613 except Exception:
614 self.logger.debug("Error in get_tenant_list()")
615 self.logger.debug(traceback.format_exc())
616
617 raise vimconn.VimConnException("Incorrect state. {}")
618
619 return vdclist
620
621 def new_network(
622 self,
623 net_name,
624 net_type,
625 ip_profile=None,
626 shared=False,
627 provider_network_profile=None,
628 ):
629 """Adds a tenant network to VIM
630 Params:
631 'net_name': name of the network
632 'net_type': one of:
633 'bridge': overlay isolated network
634 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
635 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
636 'ip_profile': is a dict containing the IP parameters of the network
637 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
638 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
639 'gateway_address': (Optional) ip_schema, that is X.X.X.X
640 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
641 'dhcp_enabled': True or False
642 'dhcp_start_address': ip_schema, first IP to grant
643 'dhcp_count': number of IPs to grant.
644 'shared': if this network can be seen/use by other tenants/organization
645 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
646 Returns a tuple with the network identifier and created_items, or raises an exception on error
647 created_items can be None or a dictionary where this method can include key-values that will be passed to
648 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
649 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
650 as not present.
651 """
652
653 self.logger.debug(
654 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
655 net_name, net_type, ip_profile, shared, provider_network_profile
656 )
657 )
658 # vlan = None
659 # if provider_network_profile:
660 # vlan = provider_network_profile.get("segmentation-id")
661
662 created_items = {}
663 isshared = "false"
664
665 if shared:
666 isshared = "true"
667
668 # ############# Stub code for SRIOV #################
669 # if net_type == "data" or net_type == "ptp":
670 # if self.config.get('dv_switch_name') == None:
671 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
672 # network_uuid = self.create_dvPort_group(net_name)
673 parent_network_uuid = None
674
675 if provider_network_profile is not None:
676 for k, v in provider_network_profile.items():
677 if k == "physical_network":
678 parent_network_uuid = self.get_physical_network_by_name(v)
679
680 network_uuid = self.create_network(
681 network_name=net_name,
682 net_type=net_type,
683 ip_profile=ip_profile,
684 isshared=isshared,
685 parent_network_uuid=parent_network_uuid,
686 )
687
688 if network_uuid is not None:
689 return network_uuid, created_items
690 else:
691 raise vimconn.VimConnUnexpectedResponse(
692 "Failed create a new network {}".format(net_name)
693 )
694
695 def get_vcd_network_list(self):
696 """Method available organization for a logged in tenant
697
698 Returns:
699 The return vca object that letter can be used to connect to vcloud direct as admin
700 """
701
702 self.logger.debug(
703 "get_vcd_network_list(): retrieving network list for vcd {}".format(
704 self.tenant_name
705 )
706 )
707
708 if not self.tenant_name:
709 raise vimconn.VimConnConnectionException("Tenant name is empty.")
710
711 _, vdc = self.get_vdc_details()
712 if vdc is None:
713 raise vimconn.VimConnConnectionException(
714 "Can't retrieve information for a VDC {}".format(self.tenant_name)
715 )
716
717 vdc_uuid = vdc.get("id").split(":")[3]
718 if self.client._session:
719 headers = {
720 "Accept": "application/*+xml;version=" + API_VERSION,
721 "x-vcloud-authorization": self.client._session.headers[
722 "x-vcloud-authorization"
723 ],
724 }
725 response = self.perform_request(
726 req_type="GET", url=vdc.get("href"), headers=headers
727 )
728
729 if response.status_code != 200:
730 self.logger.error("Failed to get vdc content")
731 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
732 else:
733 content = XmlElementTree.fromstring(response.text)
734
735 network_list = []
736 try:
737 for item in content:
738 if item.tag.split("}")[-1] == "AvailableNetworks":
739 for net in item:
740 response = self.perform_request(
741 req_type="GET", url=net.get("href"), headers=headers
742 )
743
744 if response.status_code != 200:
745 self.logger.error("Failed to get network content")
746 raise vimconn.VimConnNotFoundException(
747 "Failed to get network content"
748 )
749 else:
750 net_details = XmlElementTree.fromstring(response.text)
751
752 filter_dict = {}
753 net_uuid = net_details.get("id").split(":")
754
755 if len(net_uuid) != 4:
756 continue
757 else:
758 net_uuid = net_uuid[3]
759 # create dict entry
760 self.logger.debug(
761 "get_vcd_network_list(): Adding network {} "
762 "to a list vcd id {} network {}".format(
763 net_uuid, vdc_uuid, net_details.get("name")
764 )
765 )
766 filter_dict["name"] = net_details.get("name")
767 filter_dict["id"] = net_uuid
768
769 if [
770 i.text
771 for i in net_details
772 if i.tag.split("}")[-1] == "IsShared"
773 ][0] == "true":
774 shared = True
775 else:
776 shared = False
777
778 filter_dict["shared"] = shared
779 filter_dict["tenant_id"] = vdc_uuid
780
781 if int(net_details.get("status")) == 1:
782 filter_dict["admin_state_up"] = True
783 else:
784 filter_dict["admin_state_up"] = False
785
786 filter_dict["status"] = "ACTIVE"
787 filter_dict["type"] = "bridge"
788 network_list.append(filter_dict)
789 self.logger.debug(
790 "get_vcd_network_list adding entry {}".format(
791 filter_dict
792 )
793 )
794 except Exception:
795 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
796 pass
797
798 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
799
800 return network_list
801
802 def get_network_list(self, filter_dict={}):
803 """Obtain tenant networks of VIM
804 Filter_dict can be:
805 name: network name OR/AND
806 id: network uuid OR/AND
807 shared: boolean OR/AND
808 tenant_id: tenant OR/AND
809 admin_state_up: boolean
810 status: 'ACTIVE'
811
812 [{key : value , key : value}]
813
814 Returns the network list of dictionaries:
815 [{<the fields at Filter_dict plus some VIM specific>}, ...]
816 List can be empty
817 """
818
819 self.logger.debug(
820 "get_network_list(): retrieving network list for vcd {}".format(
821 self.tenant_name
822 )
823 )
824
825 if not self.tenant_name:
826 raise vimconn.VimConnConnectionException("Tenant name is empty.")
827
828 _, vdc = self.get_vdc_details()
829 if vdc is None:
830 raise vimconn.VimConnConnectionException(
831 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
832 )
833
834 try:
835 vdcid = vdc.get("id").split(":")[3]
836
837 if self.client._session:
838 headers = {
839 "Accept": "application/*+xml;version=" + API_VERSION,
840 "x-vcloud-authorization": self.client._session.headers[
841 "x-vcloud-authorization"
842 ],
843 }
844 response = self.perform_request(
845 req_type="GET", url=vdc.get("href"), headers=headers
846 )
847
848 if response.status_code != 200:
849 self.logger.error("Failed to get vdc content")
850 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
851 else:
852 content = XmlElementTree.fromstring(response.text)
853
854 network_list = []
855 for item in content:
856 if item.tag.split("}")[-1] == "AvailableNetworks":
857 for net in item:
858 response = self.perform_request(
859 req_type="GET", url=net.get("href"), headers=headers
860 )
861
862 if response.status_code != 200:
863 self.logger.error("Failed to get network content")
864 raise vimconn.VimConnNotFoundException(
865 "Failed to get network content"
866 )
867 else:
868 net_details = XmlElementTree.fromstring(response.text)
869
870 filter_entry = {}
871 net_uuid = net_details.get("id").split(":")
872
873 if len(net_uuid) != 4:
874 continue
875 else:
876 net_uuid = net_uuid[3]
877 # create dict entry
878 self.logger.debug(
879 "get_network_list(): Adding net {}"
880 " to a list vcd id {} network {}".format(
881 net_uuid, vdcid, net_details.get("name")
882 )
883 )
884 filter_entry["name"] = net_details.get("name")
885 filter_entry["id"] = net_uuid
886
887 if [
888 i.text
889 for i in net_details
890 if i.tag.split("}")[-1] == "IsShared"
891 ][0] == "true":
892 shared = True
893 else:
894 shared = False
895
896 filter_entry["shared"] = shared
897 filter_entry["tenant_id"] = vdcid
898
899 if int(net_details.get("status")) == 1:
900 filter_entry["admin_state_up"] = True
901 else:
902 filter_entry["admin_state_up"] = False
903
904 filter_entry["status"] = "ACTIVE"
905 filter_entry["type"] = "bridge"
906 filtered_entry = filter_entry.copy()
907
908 if filter_dict is not None and filter_dict:
909 # we remove all the key : value we don't care and match only
910 # respected field
911 filtered_dict = set(filter_entry.keys()) - set(
912 filter_dict
913 )
914
915 for unwanted_key in filtered_dict:
916 del filter_entry[unwanted_key]
917
918 if filter_dict == filter_entry:
919 network_list.append(filtered_entry)
920 else:
921 network_list.append(filtered_entry)
922 except Exception as e:
923 self.logger.debug("Error in get_network_list", exc_info=True)
924
925 if isinstance(e, vimconn.VimConnException):
926 raise
927 else:
928 raise vimconn.VimConnNotFoundException(
929 "Failed : Networks list not found {} ".format(e)
930 )
931
932 self.logger.debug("Returning {}".format(network_list))
933
934 return network_list
935
936 def get_network(self, net_id):
937 """Method obtains network details of net_id VIM network
938 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
939 try:
940 _, vdc = self.get_vdc_details()
941 vdc_id = vdc.get("id").split(":")[3]
942
943 if self.client._session:
944 headers = {
945 "Accept": "application/*+xml;version=" + API_VERSION,
946 "x-vcloud-authorization": self.client._session.headers[
947 "x-vcloud-authorization"
948 ],
949 }
950 response = self.perform_request(
951 req_type="GET", url=vdc.get("href"), headers=headers
952 )
953
954 if response.status_code != 200:
955 self.logger.error("Failed to get vdc content")
956 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
957 else:
958 content = XmlElementTree.fromstring(response.text)
959
960 filter_dict = {}
961
962 for item in content:
963 if item.tag.split("}")[-1] == "AvailableNetworks":
964 for net in item:
965 response = self.perform_request(
966 req_type="GET", url=net.get("href"), headers=headers
967 )
968
969 if response.status_code != 200:
970 self.logger.error("Failed to get network content")
971 raise vimconn.VimConnNotFoundException(
972 "Failed to get network content"
973 )
974 else:
975 net_details = XmlElementTree.fromstring(response.text)
976
977 vdc_network_id = net_details.get("id").split(":")
978 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
979 filter_dict["name"] = net_details.get("name")
980 filter_dict["id"] = vdc_network_id[3]
981
982 if [
983 i.text
984 for i in net_details
985 if i.tag.split("}")[-1] == "IsShared"
986 ][0] == "true":
987 shared = True
988 else:
989 shared = False
990
991 filter_dict["shared"] = shared
992 filter_dict["tenant_id"] = vdc_id
993
994 if int(net_details.get("status")) == 1:
995 filter_dict["admin_state_up"] = True
996 else:
997 filter_dict["admin_state_up"] = False
998
999 filter_dict["status"] = "ACTIVE"
1000 filter_dict["type"] = "bridge"
1001 self.logger.debug("Returning {}".format(filter_dict))
1002
1003 return filter_dict
1004 else:
1005 raise vimconn.VimConnNotFoundException(
1006 "Network {} not found".format(net_id)
1007 )
1008 except Exception as e:
1009 self.logger.debug("Error in get_network")
1010 self.logger.debug(traceback.format_exc())
1011
1012 if isinstance(e, vimconn.VimConnException):
1013 raise
1014 else:
1015 raise vimconn.VimConnNotFoundException(
1016 "Failed : Network not found {} ".format(e)
1017 )
1018
1019 return filter_dict
1020
1021 def delete_network(self, net_id, created_items=None):
1022 """
1023 Removes a tenant network from VIM and its associated elements
1024 :param net_id: VIM identifier of the network, provided by method new_network
1025 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1026 Returns the network identifier or raises an exception upon error or when network is not found
1027 """
1028
1029 # ############# Stub code for SRIOV #################
1030 # dvport_group = self.get_dvport_group(net_id)
1031 # if dvport_group:
1032 # #delete portgroup
1033 # status = self.destroy_dvport_group(net_id)
1034 # if status:
1035 # # Remove vlanID from persistent info
1036 # if net_id in self.persistent_info["used_vlanIDs"]:
1037 # del self.persistent_info["used_vlanIDs"][net_id]
1038 #
1039 # return net_id
1040
1041 vcd_network = self.get_vcd_network(network_uuid=net_id)
1042 if vcd_network is not None and vcd_network:
1043 if self.delete_network_action(network_uuid=net_id):
1044 return net_id
1045 else:
1046 raise vimconn.VimConnNotFoundException(
1047 "Network {} not found".format(net_id)
1048 )
1049
1050 def refresh_nets_status(self, net_list):
1051 """Get the status of the networks
1052 Params: the list of network identifiers
1053 Returns a dictionary with:
1054 net_id: #VIM id of this network
1055 status: #Mandatory. Text with one of:
1056 # DELETED (not found at vim)
1057 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1058 # OTHER (Vim reported other status not understood)
1059 # ERROR (VIM indicates an ERROR status)
1060 # ACTIVE, INACTIVE, DOWN (admin down),
1061 # BUILD (on building process)
1062 #
1063 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1064 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1065
1066 """
1067 dict_entry = {}
1068 try:
1069 for net in net_list:
1070 errormsg = ""
1071 vcd_network = self.get_vcd_network(network_uuid=net)
1072 if vcd_network is not None and vcd_network:
1073 if vcd_network["status"] == "1":
1074 status = "ACTIVE"
1075 else:
1076 status = "DOWN"
1077 else:
1078 status = "DELETED"
1079 errormsg = "Network not found."
1080
1081 dict_entry[net] = {
1082 "status": status,
1083 "error_msg": errormsg,
1084 "vim_info": yaml.safe_dump(vcd_network),
1085 }
1086 except Exception:
1087 self.logger.debug("Error in refresh_nets_status")
1088 self.logger.debug(traceback.format_exc())
1089
1090 return dict_entry
1091
1092 def get_flavor(self, flavor_id):
1093 """Obtain flavor details from the VIM
1094 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
1095 """
1096 if flavor_id not in vimconnector.flavorlist:
1097 raise vimconn.VimConnNotFoundException("Flavor not found.")
1098
1099 return vimconnector.flavorlist[flavor_id]
1100
1101 def new_flavor(self, flavor_data):
1102 """Adds a tenant flavor to VIM
1103 flavor_data contains a dictionary with information, keys:
1104 name: flavor name
1105 ram: memory (cloud type) in MBytes
1106 vpcus: cpus (cloud type)
1107 extended: EPA parameters
1108 - numas: #items requested in same NUMA
1109 memory: number of 1G huge pages memory
1110 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
1111 threads
1112 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
1113 - name: interface name
1114 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
1115 bandwidth: X Gbps; requested guarantee bandwidth
1116 vpci: requested virtual PCI address
1117 disk: disk size
1118 is_public:
1119 #TODO to concrete
1120 Returns the flavor identifier"""
1121
1122 # generate a new uuid put to internal dict and return it.
1123 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1124 new_flavor = flavor_data
1125 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1126 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1127 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1128
1129 if not isinstance(ram, int):
1130 raise vimconn.VimConnException("Non-integer value for ram")
1131 elif not isinstance(cpu, int):
1132 raise vimconn.VimConnException("Non-integer value for cpu")
1133 elif not isinstance(disk, int):
1134 raise vimconn.VimConnException("Non-integer value for disk")
1135
1136 extended_flv = flavor_data.get("extended")
1137 if extended_flv:
1138 numas = extended_flv.get("numas")
1139 if numas:
1140 for numa in numas:
1141 # overwrite ram and vcpus
1142 if "memory" in numa:
1143 ram = numa["memory"] * 1024
1144
1145 if "paired-threads" in numa:
1146 cpu = numa["paired-threads"] * 2
1147 elif "cores" in numa:
1148 cpu = numa["cores"]
1149 elif "threads" in numa:
1150 cpu = numa["threads"]
1151
1152 new_flavor[FLAVOR_RAM_KEY] = ram
1153 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1154 new_flavor[FLAVOR_DISK_KEY] = disk
1155 # generate a new uuid put to internal dict and return it.
1156 flavor_id = uuid.uuid4()
1157 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1158 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1159
1160 return str(flavor_id)
1161
1162 def delete_flavor(self, flavor_id):
1163 """Deletes a tenant flavor from VIM identify by its id
1164
1165 Returns the used id or raise an exception
1166 """
1167 if flavor_id not in vimconnector.flavorlist:
1168 raise vimconn.VimConnNotFoundException("Flavor not found.")
1169
1170 vimconnector.flavorlist.pop(flavor_id, None)
1171
1172 return flavor_id
1173
1174 def new_image(self, image_dict):
1175 """
1176 Adds a tenant image to VIM
1177 Returns:
1178 200, image-id if the image is created
1179 <0, message if there is an error
1180 """
1181 return self.get_image_id_from_path(image_dict["location"])
1182
1183 def delete_image(self, image_id):
1184 """
1185 Deletes a tenant image from VIM
1186 Args:
1187 image_id is ID of Image to be deleted
1188 Return:
1189 returns the image identifier in UUID format or raises an exception on error
1190 """
1191 conn = self.connect_as_admin()
1192
1193 if not conn:
1194 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1195
1196 # Get Catalog details
1197 url_list = [self.url, "/api/catalog/", image_id]
1198 catalog_herf = "".join(url_list)
1199
1200 headers = {
1201 "Accept": "application/*+xml;version=" + API_VERSION,
1202 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1203 }
1204
1205 response = self.perform_request(
1206 req_type="GET", url=catalog_herf, headers=headers
1207 )
1208
1209 if response.status_code != requests.codes.ok:
1210 self.logger.debug(
1211 "delete_image():GET REST API call {} failed. "
1212 "Return status code {}".format(catalog_herf, response.status_code)
1213 )
1214
1215 raise vimconn.VimConnNotFoundException(
1216 "Fail to get image {}".format(image_id)
1217 )
1218
1219 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1220 namespaces = {
1221 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1222 }
1223 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1224
1225 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1226 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1227
1228 for catalogItem in catalogItems:
1229 catalogItem_href = catalogItem.attrib["href"]
1230
1231 response = self.perform_request(
1232 req_type="GET", url=catalogItem_href, headers=headers
1233 )
1234
1235 if response.status_code != requests.codes.ok:
1236 self.logger.debug(
1237 "delete_image():GET REST API call {} failed. "
1238 "Return status code {}".format(catalog_herf, response.status_code)
1239 )
1240 raise vimconn.VimConnNotFoundException(
1241 "Fail to get catalogItem {} for catalog {}".format(
1242 catalogItem, image_id
1243 )
1244 )
1245
1246 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1247 namespaces = {
1248 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1249 }
1250 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1251 catalogitem_remove_href = lxmlroot_respond.find(
1252 "xmlns:Link[@rel='remove']", namespaces
1253 ).attrib["href"]
1254
1255 # Remove catalogItem
1256 response = self.perform_request(
1257 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1258 )
1259
1260 if response.status_code == requests.codes.no_content:
1261 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1262 else:
1263 raise vimconn.VimConnException(
1264 "Fail to delete Catalog Item {}".format(catalogItem)
1265 )
1266
1267 # Remove catalog
1268 url_list = [self.url, "/api/admin/catalog/", image_id]
1269 catalog_remove_herf = "".join(url_list)
1270 response = self.perform_request(
1271 req_type="DELETE", url=catalog_remove_herf, headers=headers
1272 )
1273
1274 if response.status_code == requests.codes.no_content:
1275 self.logger.debug("Deleted Catalog {}".format(image_id))
1276
1277 return image_id
1278 else:
1279 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1280
1281 def catalog_exists(self, catalog_name, catalogs):
1282 """
1283
1284 :param catalog_name:
1285 :param catalogs:
1286 :return:
1287 """
1288 for catalog in catalogs:
1289 if catalog["name"] == catalog_name:
1290 return catalog["id"]
1291
1292 def create_vimcatalog(self, vca=None, catalog_name=None):
1293 """Create new catalog entry in vCloud director.
1294
1295 Args
1296 vca: vCloud director.
1297 catalog_name catalog that client wish to create. Note no validation done for a name.
1298 Client must make sure that provide valid string representation.
1299
1300 Returns catalog id if catalog created else None.
1301
1302 """
1303 try:
1304 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1305
1306 if lxml_catalog_element:
1307 id_attr_value = lxml_catalog_element.get("id")
1308 return id_attr_value.split(":")[-1]
1309
1310 catalogs = vca.list_catalogs()
1311 except Exception as ex:
1312 self.logger.error(
1313 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1314 catalog_name, ex
1315 )
1316 )
1317 raise
1318 return self.catalog_exists(catalog_name, catalogs)
1319
1320 # noinspection PyIncorrectDocstring
1321 def upload_ovf(
1322 self,
1323 vca=None,
1324 catalog_name=None,
1325 image_name=None,
1326 media_file_name=None,
1327 description="",
1328 progress=False,
1329 chunk_bytes=128 * 1024,
1330 ):
1331 """
1332 Uploads a OVF file to a vCloud catalog
1333
1334 :param chunk_bytes:
1335 :param progress:
1336 :param description:
1337 :param image_name:
1338 :param vca:
1339 :param catalog_name: (str): The name of the catalog to upload the media.
1340 :param media_file_name: (str): The name of the local media file to upload.
1341 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1342 """
1343 os.path.isfile(media_file_name)
1344 statinfo = os.stat(media_file_name)
1345
1346 # find a catalog entry where we upload OVF.
1347 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1348 # status change.
1349 # if VCD can parse OVF we upload VMDK file
1350 try:
1351 for catalog in vca.list_catalogs():
1352 if catalog_name != catalog["name"]:
1353 continue
1354 catalog_href = "{}/api/catalog/{}/action/upload".format(
1355 self.url, catalog["id"]
1356 )
1357 data = """
1358 <UploadVAppTemplateParams name="{}"
1359 xmlns="http://www.vmware.com/vcloud/v1.5"
1360 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1361 <Description>{} vApp Template</Description>
1362 </UploadVAppTemplateParams>
1363 """.format(
1364 catalog_name, description
1365 )
1366
1367 if self.client:
1368 headers = {
1369 "Accept": "application/*+xml;version=" + API_VERSION,
1370 "x-vcloud-authorization": self.client._session.headers[
1371 "x-vcloud-authorization"
1372 ],
1373 }
1374 headers[
1375 "Content-Type"
1376 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1377
1378 response = self.perform_request(
1379 req_type="POST", url=catalog_href, headers=headers, data=data
1380 )
1381
1382 if response.status_code == requests.codes.created:
1383 catalogItem = XmlElementTree.fromstring(response.text)
1384 entity = [
1385 child
1386 for child in catalogItem
1387 if child.get("type")
1388 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1389 ][0]
1390 href = entity.get("href")
1391 template = href
1392
1393 response = self.perform_request(
1394 req_type="GET", url=href, headers=headers
1395 )
1396
1397 if response.status_code == requests.codes.ok:
1398 headers["Content-Type"] = "Content-Type text/xml"
1399 result = re.search(
1400 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1401 response.text,
1402 )
1403
1404 if result:
1405 transfer_href = result.group(1)
1406
1407 response = self.perform_request(
1408 req_type="PUT",
1409 url=transfer_href,
1410 headers=headers,
1411 data=open(media_file_name, "rb"),
1412 )
1413
1414 if response.status_code != requests.codes.ok:
1415 self.logger.debug(
1416 "Failed create vApp template for catalog name {} and image {}".format(
1417 catalog_name, media_file_name
1418 )
1419 )
1420 return False
1421
1422 # TODO fix this with aync block
1423 time.sleep(5)
1424
1425 self.logger.debug(
1426 "vApp template for catalog name {} and image {}".format(
1427 catalog_name, media_file_name
1428 )
1429 )
1430
1431 # uploading VMDK file
1432 # check status of OVF upload and upload remaining files.
1433 response = self.perform_request(
1434 req_type="GET", url=template, headers=headers
1435 )
1436
1437 if response.status_code == requests.codes.ok:
1438 result = re.search(
1439 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1440 )
1441
1442 if result:
1443 link_href = result.group(1)
1444
1445 # we skip ovf since it already uploaded.
1446 if "ovf" in link_href:
1447 continue
1448
1449 # The OVF file and VMDK must be in a same directory
1450 head, _ = os.path.split(media_file_name)
1451 file_vmdk = head + "/" + link_href.split("/")[-1]
1452
1453 if not os.path.isfile(file_vmdk):
1454 return False
1455
1456 statinfo = os.stat(file_vmdk)
1457 if statinfo.st_size == 0:
1458 return False
1459
1460 hrefvmdk = link_href
1461
1462 if progress:
1463 widgets = [
1464 "Uploading file: ",
1465 Percentage(),
1466 " ",
1467 Bar(),
1468 " ",
1469 ETA(),
1470 " ",
1471 FileTransferSpeed(),
1472 ]
1473 progress_bar = ProgressBar(
1474 widgets=widgets, maxval=statinfo.st_size
1475 ).start()
1476
1477 bytes_transferred = 0
1478 f = open(file_vmdk, "rb")
1479
1480 while bytes_transferred < statinfo.st_size:
1481 my_bytes = f.read(chunk_bytes)
1482 if len(my_bytes) <= chunk_bytes:
1483 headers["Content-Range"] = "bytes {}-{}/{}".format(
1484 bytes_transferred,
1485 len(my_bytes) - 1,
1486 statinfo.st_size,
1487 )
1488 headers["Content-Length"] = str(len(my_bytes))
1489 response = requests.put(
1490 url=hrefvmdk,
1491 headers=headers,
1492 data=my_bytes,
1493 verify=False,
1494 )
1495
1496 if response.status_code == requests.codes.ok:
1497 bytes_transferred += len(my_bytes)
1498 if progress:
1499 progress_bar.update(bytes_transferred)
1500 else:
1501 self.logger.debug(
1502 "file upload failed with error: [{}] {}".format(
1503 response.status_code, response.text
1504 )
1505 )
1506
1507 f.close()
1508
1509 return False
1510
1511 f.close()
1512 if progress:
1513 progress_bar.finish()
1514 time.sleep(10)
1515
1516 return True
1517 else:
1518 self.logger.debug(
1519 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1520 catalog_name, media_file_name
1521 )
1522 )
1523 return False
1524 except Exception as exp:
1525 self.logger.debug(
1526 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1527 catalog_name, media_file_name, exp
1528 )
1529 )
1530
1531 raise vimconn.VimConnException(
1532 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1533 catalog_name, media_file_name, exp
1534 )
1535 )
1536
1537 self.logger.debug(
1538 "Failed retrieve catalog name {} for OVF file {}".format(
1539 catalog_name, media_file_name
1540 )
1541 )
1542
1543 return False
1544
1545 def upload_vimimage(
1546 self,
1547 vca=None,
1548 catalog_name=None,
1549 media_name=None,
1550 medial_file_name=None,
1551 progress=False,
1552 ):
1553 """Upload media file"""
1554 # TODO add named parameters for readability
1555 return self.upload_ovf(
1556 vca=vca,
1557 catalog_name=catalog_name,
1558 image_name=media_name.split(".")[0],
1559 media_file_name=medial_file_name,
1560 description="medial_file_name",
1561 progress=progress,
1562 )
1563
1564 def validate_uuid4(self, uuid_string=None):
1565 """Method validate correct format of UUID.
1566
1567 Return: true if string represent valid uuid
1568 """
1569 try:
1570 uuid.UUID(uuid_string, version=4)
1571 except ValueError:
1572 return False
1573
1574 return True
1575
1576 def get_catalogid(self, catalog_name=None, catalogs=None):
1577 """Method check catalog and return catalog ID in UUID format.
1578
1579 Args
1580 catalog_name: catalog name as string
1581 catalogs: list of catalogs.
1582
1583 Return: catalogs uuid
1584 """
1585 for catalog in catalogs:
1586 if catalog["name"] == catalog_name:
1587 catalog_id = catalog["id"]
1588 return catalog_id
1589
1590 return None
1591
1592 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1593 """Method check catalog and return catalog name lookup done by catalog UUID.
1594
1595 Args
1596 catalog_name: catalog name as string
1597 catalogs: list of catalogs.
1598
1599 Return: catalogs name or None
1600 """
1601 if not self.validate_uuid4(uuid_string=catalog_uuid):
1602 return None
1603
1604 for catalog in catalogs:
1605 catalog_id = catalog.get("id")
1606
1607 if catalog_id == catalog_uuid:
1608 return catalog.get("name")
1609
1610 return None
1611
1612 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1613 """Method check catalog and return catalog name lookup done by catalog UUID.
1614
1615 Args
1616 catalog_name: catalog name as string
1617 catalogs: list of catalogs.
1618
1619 Return: catalogs name or None
1620 """
1621 if not self.validate_uuid4(uuid_string=catalog_uuid):
1622 return None
1623
1624 for catalog in catalogs:
1625 catalog_id = catalog.get("id")
1626
1627 if catalog_id == catalog_uuid:
1628 return catalog
1629
1630 return None
1631
1632 def get_image_id_from_path(self, path=None, progress=False):
1633 """Method upload OVF image to vCloud director.
1634
1635 Each OVF image represented as single catalog entry in vcloud director.
1636 The method check for existing catalog entry. The check done by file name without file extension.
1637
1638 if given catalog name already present method will respond with existing catalog uuid otherwise
1639 it will create new catalog entry and upload OVF file to newly created catalog.
1640
1641 If method can't create catalog entry or upload a file it will throw exception.
1642
1643 Method accept boolean flag progress that will output progress bar. It useful method
1644 for standalone upload use case. In case to test large file upload.
1645
1646 Args
1647 path: - valid path to OVF file.
1648 progress - boolean progress bar show progress bar.
1649
1650 Return: if image uploaded correct method will provide image catalog UUID.
1651 """
1652 if not path:
1653 raise vimconn.VimConnException("Image path can't be None.")
1654
1655 if not os.path.isfile(path):
1656 raise vimconn.VimConnException("Can't read file. File not found.")
1657
1658 if not os.access(path, os.R_OK):
1659 raise vimconn.VimConnException(
1660 "Can't read file. Check file permission to read."
1661 )
1662
1663 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1664
1665 _, filename = os.path.split(path)
1666 _, file_extension = os.path.splitext(path)
1667 if file_extension != ".ovf":
1668 self.logger.debug(
1669 "Wrong file extension {} connector support only OVF container.".format(
1670 file_extension
1671 )
1672 )
1673
1674 raise vimconn.VimConnException(
1675 "Wrong container. vCloud director supports only OVF."
1676 )
1677
1678 catalog_name = os.path.splitext(filename)[0]
1679 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1680 self.logger.debug(
1681 "File name {} Catalog Name {} file path {} "
1682 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1683 )
1684
1685 try:
1686 org, _ = self.get_vdc_details()
1687 catalogs = org.list_catalogs()
1688 except Exception as exp:
1689 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1690
1691 raise vimconn.VimConnException(
1692 "Failed get catalogs() with Exception {} ".format(exp)
1693 )
1694
1695 if len(catalogs) == 0:
1696 self.logger.info(
1697 "Creating a new catalog entry {} in vcloud director".format(
1698 catalog_name
1699 )
1700 )
1701
1702 if self.create_vimcatalog(org, catalog_md5_name) is None:
1703 raise vimconn.VimConnException(
1704 "Failed create new catalog {} ".format(catalog_md5_name)
1705 )
1706
1707 result = self.upload_vimimage(
1708 vca=org,
1709 catalog_name=catalog_md5_name,
1710 media_name=filename,
1711 medial_file_name=path,
1712 progress=progress,
1713 )
1714
1715 if not result:
1716 raise vimconn.VimConnException(
1717 "Failed create vApp template for catalog {} ".format(catalog_name)
1718 )
1719
1720 return self.get_catalogid(catalog_name, catalogs)
1721 else:
1722 for catalog in catalogs:
1723 # search for existing catalog if we find same name we return ID
1724 # TODO optimize this
1725 if catalog["name"] == catalog_md5_name:
1726 self.logger.debug(
1727 "Found existing catalog entry for {} "
1728 "catalog id {}".format(
1729 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1730 )
1731 )
1732
1733 return self.get_catalogid(catalog_md5_name, catalogs)
1734
1735 # if we didn't find existing catalog we create a new one and upload image.
1736 self.logger.debug(
1737 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1738 )
1739 if self.create_vimcatalog(org, catalog_md5_name) is None:
1740 raise vimconn.VimConnException(
1741 "Failed create new catalog {} ".format(catalog_md5_name)
1742 )
1743
1744 result = self.upload_vimimage(
1745 vca=org,
1746 catalog_name=catalog_md5_name,
1747 media_name=filename,
1748 medial_file_name=path,
1749 progress=progress,
1750 )
1751 if not result:
1752 raise vimconn.VimConnException(
1753 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1754 )
1755
1756 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1757
1758 def get_image_list(self, filter_dict={}):
1759 """Obtain tenant images from VIM
1760 Filter_dict can be:
1761 name: image name
1762 id: image uuid
1763 checksum: image checksum
1764 location: image path
1765 Returns the image list of dictionaries:
1766 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1767 List can be empty
1768 """
1769 try:
1770 org, _ = self.get_vdc_details()
1771 image_list = []
1772 catalogs = org.list_catalogs()
1773
1774 if len(catalogs) == 0:
1775 return image_list
1776 else:
1777 for catalog in catalogs:
1778 catalog_uuid = catalog.get("id")
1779 name = catalog.get("name")
1780 filtered_dict = {}
1781
1782 if filter_dict.get("name") and filter_dict["name"] != name:
1783 continue
1784
1785 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1786 continue
1787
1788 filtered_dict["name"] = name
1789 filtered_dict["id"] = catalog_uuid
1790 image_list.append(filtered_dict)
1791
1792 self.logger.debug(
1793 "List of already created catalog items: {}".format(image_list)
1794 )
1795
1796 return image_list
1797 except Exception as exp:
1798 raise vimconn.VimConnException(
1799 "Exception occured while retriving catalog items {}".format(exp)
1800 )
1801
1802 def get_vappid(self, vdc=None, vapp_name=None):
1803 """Method takes vdc object and vApp name and returns vapp uuid or None
1804
1805 Args:
1806 vdc: The VDC object.
1807 vapp_name: is application vappp name identifier
1808
1809 Returns:
1810 The return vApp name otherwise None
1811 """
1812 if vdc is None or vapp_name is None:
1813 return None
1814
1815 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1816 try:
1817 refs = [
1818 ref
1819 for ref in vdc.ResourceEntities.ResourceEntity
1820 if ref.name == vapp_name
1821 and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1822 ]
1823
1824 if len(refs) == 1:
1825 return refs[0].href.split("vapp")[1][1:]
1826 except Exception as e:
1827 self.logger.exception(e)
1828 return False
1829
1830 return None
1831
1832 def check_vapp(self, vdc=None, vapp_uuid=None):
1833 """Method Method returns True or False if vapp deployed in vCloud director
1834
1835 Args:
1836 vca: Connector to VCA
1837 vdc: The VDC object.
1838 vappid: vappid is application identifier
1839
1840 Returns:
1841 The return True if vApp deployed
1842 :param vdc:
1843 :param vapp_uuid:
1844 """
1845 try:
1846 refs = [
1847 ref
1848 for ref in vdc.ResourceEntities.ResourceEntity
1849 if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1850 ]
1851
1852 for ref in refs:
1853 vappid = ref.href.split("vapp")[1][1:]
1854 # find vapp with respected vapp uuid
1855
1856 if vappid == vapp_uuid:
1857 return True
1858 except Exception as e:
1859 self.logger.exception(e)
1860
1861 return False
1862
1863 return False
1864
1865 def get_namebyvappid(self, vapp_uuid=None):
1866 """Method returns vApp name from vCD and lookup done by vapp_id.
1867
1868 Args:
1869 vapp_uuid: vappid is application identifier
1870
1871 Returns:
1872 The return vApp name otherwise None
1873 """
1874 try:
1875 if self.client and vapp_uuid:
1876 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1877 headers = {
1878 "Accept": "application/*+xml;version=" + API_VERSION,
1879 "x-vcloud-authorization": self.client._session.headers[
1880 "x-vcloud-authorization"
1881 ],
1882 }
1883
1884 response = self.perform_request(
1885 req_type="GET", url=vapp_call, headers=headers
1886 )
1887
1888 # Retry login if session expired & retry sending request
1889 if response.status_code == 403:
1890 response = self.retry_rest("GET", vapp_call)
1891
1892 tree = XmlElementTree.fromstring(response.text)
1893
1894 return tree.attrib["name"] if "name" in tree.attrib else None
1895 except Exception as e:
1896 self.logger.exception(e)
1897
1898 return None
1899
1900 return None
1901
1902 def new_vminstance(
1903 self,
1904 name=None,
1905 description="",
1906 start=False,
1907 image_id=None,
1908 flavor_id=None,
1909 net_list=[],
1910 cloud_config=None,
1911 disk_list=None,
1912 availability_zone_index=None,
1913 availability_zone_list=None,
1914 ):
1915 """Adds a VM instance to VIM
1916 Params:
1917 'start': (boolean) indicates if VM must start or created in pause mode.
1918 'image_id','flavor_id': image and flavor VIM id to use for the VM
1919 'net_list': list of interfaces, each one is a dictionary with:
1920 'name': (optional) name for the interface.
1921 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1922 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1923 capabilities
1924 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1925 'mac_address': (optional) mac address to assign to this interface
1926 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1927 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1928 for tagging VF
1929 'type': (mandatory) can be one of:
1930 'virtual', in this case always connected to a network of type 'net_type=bridge'
1931 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1932 data/ptp network or it can created unconnected
1933 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1934 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1935 are allocated on the same physical NIC
1936 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1937 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1938 or True, it must apply the default VIM behaviour
1939 After execution the method will add the key:
1940 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1941 interface. 'net_list' is modified
1942 'cloud_config': (optional) dictionary with:
1943 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1944 'users': (optional) list of users to be inserted, each item is a dict with:
1945 'name': (mandatory) user name,
1946 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1947 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1948 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1949 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1950 'dest': (mandatory) string with the destination absolute path
1951 'encoding': (optional, by default text). Can be one of:
1952 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1953 'content' (mandatory): string with the content of the file
1954 'permissions': (optional) string with file permissions, typically octal notation '0644'
1955 'owner': (optional) file owner, string with the format 'owner:group'
1956 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1957 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1958 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1959 'size': (mandatory) string with the size of the disk in GB
1960 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1961 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1962 availability_zone_index is None
1963 Returns a tuple with the instance identifier and created_items or raises an exception on error
1964 created_items can be None or a dictionary where this method can include key-values that will be passed to
1965 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1966 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1967 as not present.
1968 """
1969 self.logger.info("Creating new instance for entry {}".format(name))
1970 self.logger.debug(
1971 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1972 "availability_zone_index {} availability_zone_list {}".format(
1973 description,
1974 start,
1975 image_id,
1976 flavor_id,
1977 net_list,
1978 cloud_config,
1979 disk_list,
1980 availability_zone_index,
1981 availability_zone_list,
1982 )
1983 )
1984
1985 # new vm name = vmname + tenant_id + uuid
1986 new_vm_name = [name, "-", str(uuid.uuid4())]
1987 vmname_andid = "".join(new_vm_name)
1988
1989 for net in net_list:
1990 if net["type"] == "PCI-PASSTHROUGH":
1991 raise vimconn.VimConnNotSupportedException(
1992 "Current vCD version does not support type : {}".format(net["type"])
1993 )
1994
1995 if len(net_list) > 10:
1996 raise vimconn.VimConnNotSupportedException(
1997 "The VM hardware versions 7 and above support upto 10 NICs only"
1998 )
1999
2000 # if vm already deployed we return existing uuid
2001 # we check for presence of VDC, Catalog entry and Flavor.
2002 org, vdc = self.get_vdc_details()
2003 if vdc is None:
2004 raise vimconn.VimConnNotFoundException(
2005 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
2006 name
2007 )
2008 )
2009
2010 catalogs = org.list_catalogs()
2011 if catalogs is None:
2012 # Retry once, if failed by refreshing token
2013 self.get_token()
2014 org = Org(self.client, resource=self.client.get_org())
2015 catalogs = org.list_catalogs()
2016
2017 if catalogs is None:
2018 raise vimconn.VimConnNotFoundException(
2019 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
2020 name
2021 )
2022 )
2023
2024 catalog_hash_name = self.get_catalogbyid(
2025 catalog_uuid=image_id, catalogs=catalogs
2026 )
2027 if catalog_hash_name:
2028 self.logger.info(
2029 "Found catalog entry {} for image id {}".format(
2030 catalog_hash_name, image_id
2031 )
2032 )
2033 else:
2034 raise vimconn.VimConnNotFoundException(
2035 "new_vminstance(): Failed create vApp {}: "
2036 "(Failed retrieve catalog information {})".format(name, image_id)
2037 )
2038
2039 # Set vCPU and Memory based on flavor.
2040 vm_cpus = None
2041 vm_memory = None
2042 vm_disk = None
2043 numas = None
2044
2045 if flavor_id is not None:
2046 if flavor_id not in vimconnector.flavorlist:
2047 raise vimconn.VimConnNotFoundException(
2048 "new_vminstance(): Failed create vApp {}: "
2049 "Failed retrieve flavor information "
2050 "flavor id {}".format(name, flavor_id)
2051 )
2052 else:
2053 try:
2054 flavor = vimconnector.flavorlist[flavor_id]
2055 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
2056 vm_memory = flavor[FLAVOR_RAM_KEY]
2057 vm_disk = flavor[FLAVOR_DISK_KEY]
2058 extended = flavor.get("extended", None)
2059
2060 if extended:
2061 numas = extended.get("numas", None)
2062 except Exception as exp:
2063 raise vimconn.VimConnException(
2064 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
2065 )
2066
2067 # image upload creates template name as catalog name space Template.
2068 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
2069 # power_on = 'false'
2070 # if start:
2071 # power_on = 'true'
2072
2073 # client must provide at least one entry in net_list if not we report error
2074 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
2075 # If no mgmt, then the 1st NN in netlist is considered as primary net.
2076 primary_net = None
2077 primary_netname = None
2078 primary_net_href = None
2079 # network_mode = 'bridged'
2080 if net_list is not None and len(net_list) > 0:
2081 for net in net_list:
2082 if "use" in net and net["use"] == "mgmt" and not primary_net:
2083 primary_net = net
2084
2085 if primary_net is None:
2086 primary_net = net_list[0]
2087
2088 try:
2089 primary_net_id = primary_net["net_id"]
2090 url_list = [self.url, "/api/network/", primary_net_id]
2091 primary_net_href = "".join(url_list)
2092 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
2093
2094 if "name" in network_dict:
2095 primary_netname = network_dict["name"]
2096 except KeyError:
2097 raise vimconn.VimConnException(
2098 "Corrupted flavor. {}".format(primary_net)
2099 )
2100 else:
2101 raise vimconn.VimConnUnexpectedResponse(
2102 "new_vminstance(): Failed network list is empty."
2103 )
2104
2105 # use: 'data', 'bridge', 'mgmt'
2106 # create vApp. Set vcpu and ram based on flavor id.
2107 try:
2108 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
2109 if not vdc_obj:
2110 raise vimconn.VimConnNotFoundException(
2111 "new_vminstance(): Failed to get VDC object"
2112 )
2113
2114 for retry in (1, 2):
2115 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
2116 catalog_items = [items.attrib]
2117
2118 if len(catalog_items) == 1:
2119 if self.client:
2120 headers = {
2121 "Accept": "application/*+xml;version=" + API_VERSION,
2122 "x-vcloud-authorization": self.client._session.headers[
2123 "x-vcloud-authorization"
2124 ],
2125 }
2126
2127 response = self.perform_request(
2128 req_type="GET",
2129 url=catalog_items[0].get("href"),
2130 headers=headers,
2131 )
2132 catalogItem = XmlElementTree.fromstring(response.text)
2133 entity = [
2134 child
2135 for child in catalogItem
2136 if child.get("type")
2137 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
2138 ][0]
2139 vapp_tempalte_href = entity.get("href")
2140
2141 response = self.perform_request(
2142 req_type="GET", url=vapp_tempalte_href, headers=headers
2143 )
2144
2145 if response.status_code != requests.codes.ok:
2146 self.logger.debug(
2147 "REST API call {} failed. Return status code {}".format(
2148 vapp_tempalte_href, response.status_code
2149 )
2150 )
2151 else:
2152 result = (response.text).replace("\n", " ")
2153
2154 vapp_template_tree = XmlElementTree.fromstring(response.text)
2155 children_element = [
2156 child for child in vapp_template_tree if "Children" in child.tag
2157 ][0]
2158 vm_element = [child for child in children_element if "Vm" in child.tag][
2159 0
2160 ]
2161 vm_name = vm_element.get("name")
2162 vm_id = vm_element.get("id")
2163 vm_href = vm_element.get("href")
2164
2165 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
2166 # result).group(1)
2167 memory_mb = re.search(
2168 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
2169 result,
2170 ).group(1)
2171 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
2172
2173 headers[
2174 "Content-Type"
2175 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
2176 vdc_id = vdc.get("id").split(":")[-1]
2177 instantiate_vapp_href = (
2178 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
2179 self.url, vdc_id
2180 )
2181 )
2182
2183 with open(
2184 os.path.join(
2185 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2186 ),
2187 "r",
2188 ) as f:
2189 template = f.read()
2190
2191 data = template.format(
2192 vmname_andid,
2193 primary_netname,
2194 primary_net_href,
2195 vapp_tempalte_href,
2196 vm_href,
2197 vm_id,
2198 vm_name,
2199 primary_netname,
2200 cpu=vm_cpus,
2201 core=1,
2202 memory=vm_memory,
2203 )
2204
2205 response = self.perform_request(
2206 req_type="POST",
2207 url=instantiate_vapp_href,
2208 headers=headers,
2209 data=data,
2210 )
2211
2212 if response.status_code != 201:
2213 self.logger.error(
2214 "REST call {} failed reason : {}"
2215 "status code : {}".format(
2216 instantiate_vapp_href, response.text, response.status_code
2217 )
2218 )
2219 raise vimconn.VimConnException(
2220 "new_vminstance(): Failed to create"
2221 "vAapp {}".format(vmname_andid)
2222 )
2223 else:
2224 vapptask = self.get_task_from_response(response.text)
2225
2226 if vapptask is None and retry == 1:
2227 self.get_token() # Retry getting token
2228 continue
2229 else:
2230 break
2231
2232 if vapptask is None or vapptask is False:
2233 raise vimconn.VimConnUnexpectedResponse(
2234 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2235 )
2236
2237 # wait for task to complete
2238 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2239
2240 if result.get("status") == "success":
2241 self.logger.debug(
2242 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2243 )
2244 else:
2245 raise vimconn.VimConnUnexpectedResponse(
2246 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2247 )
2248 except Exception as exp:
2249 raise vimconn.VimConnUnexpectedResponse(
2250 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2251 vmname_andid, exp
2252 )
2253 )
2254
2255 # we should have now vapp in undeployed state.
2256 try:
2257 vdc_obj = VDC(self.client, href=vdc.get("href"))
2258 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2259 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2260 vapp = VApp(self.client, resource=vapp_resource)
2261 except Exception as exp:
2262 raise vimconn.VimConnUnexpectedResponse(
2263 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2264 vmname_andid, exp
2265 )
2266 )
2267
2268 if vapp_uuid is None:
2269 raise vimconn.VimConnUnexpectedResponse(
2270 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2271 vmname_andid
2272 )
2273 )
2274
2275 # Add PCI passthrough/SRIOV configrations
2276 pci_devices_info = []
2277 reserve_memory = False
2278
2279 for net in net_list:
2280 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2281 pci_devices_info.append(net)
2282 elif (
2283 net["type"] == "VF"
2284 or net["type"] == "SR-IOV"
2285 or net["type"] == "VFnotShared"
2286 ) and "net_id" in net:
2287 reserve_memory = True
2288
2289 # Add PCI
2290 if len(pci_devices_info) > 0:
2291 self.logger.info(
2292 "Need to add PCI devices {} into VM {}".format(
2293 pci_devices_info, vmname_andid
2294 )
2295 )
2296 PCI_devices_status, _, _ = self.add_pci_devices(
2297 vapp_uuid, pci_devices_info, vmname_andid
2298 )
2299
2300 if PCI_devices_status:
2301 self.logger.info(
2302 "Added PCI devives {} to VM {}".format(
2303 pci_devices_info, vmname_andid
2304 )
2305 )
2306 reserve_memory = True
2307 else:
2308 self.logger.info(
2309 "Fail to add PCI devives {} to VM {}".format(
2310 pci_devices_info, vmname_andid
2311 )
2312 )
2313
2314 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2315 self.add_serial_device(vapp_uuid)
2316
2317 if vm_disk:
2318 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2319 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2320 if result:
2321 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2322
2323 # Add new or existing disks to vApp
2324 if disk_list:
2325 added_existing_disk = False
2326 for disk in disk_list:
2327 if "device_type" in disk and disk["device_type"] == "cdrom":
2328 image_id = disk["image_id"]
2329 # Adding CD-ROM to VM
2330 # will revisit code once specification ready to support this feature
2331 self.insert_media_to_vm(vapp, image_id)
2332 elif "image_id" in disk and disk["image_id"] is not None:
2333 self.logger.debug(
2334 "Adding existing disk from image {} to vm {} ".format(
2335 disk["image_id"], vapp_uuid
2336 )
2337 )
2338 self.add_existing_disk(
2339 catalogs=catalogs,
2340 image_id=disk["image_id"],
2341 size=disk["size"],
2342 template_name=templateName,
2343 vapp_uuid=vapp_uuid,
2344 )
2345 added_existing_disk = True
2346 else:
2347 # Wait till added existing disk gets reflected into vCD database/API
2348 if added_existing_disk:
2349 time.sleep(5)
2350 added_existing_disk = False
2351 self.add_new_disk(vapp_uuid, disk["size"])
2352
2353 if numas:
2354 # Assigning numa affinity setting
2355 for numa in numas:
2356 if "paired-threads-id" in numa:
2357 paired_threads_id = numa["paired-threads-id"]
2358 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2359
2360 # add NICs & connect to networks in netlist
2361 try:
2362 vdc_obj = VDC(self.client, href=vdc.get("href"))
2363 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2364 vapp = VApp(self.client, resource=vapp_resource)
2365 vapp_id = vapp_resource.get("id").split(":")[-1]
2366
2367 self.logger.info("Removing primary NIC: ")
2368 # First remove all NICs so that NIC properties can be adjusted as needed
2369 self.remove_primary_network_adapter_from_all_vms(vapp)
2370
2371 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2372 primary_nic_index = 0
2373 nicIndex = 0
2374 for net in net_list:
2375 # openmano uses network id in UUID format.
2376 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2377 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2378 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2379
2380 if "net_id" not in net:
2381 continue
2382
2383 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2384 # Same will be returned in refresh_vms_status() as vim_interface_id
2385 net["vim_id"] = net[
2386 "net_id"
2387 ] # Provide the same VIM identifier as the VIM network
2388
2389 interface_net_id = net["net_id"]
2390 interface_net_name = self.get_network_name_by_id(
2391 network_uuid=interface_net_id
2392 )
2393 interface_network_mode = net["use"]
2394
2395 if interface_network_mode == "mgmt":
2396 primary_nic_index = nicIndex
2397
2398 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2399 - DHCP (The IP address is obtained from a DHCP service.)
2400 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2401 - NONE (No IP addressing mode specified.)"""
2402
2403 if primary_netname is not None:
2404 self.logger.debug(
2405 "new_vminstance(): Filtering by net name {}".format(
2406 interface_net_name
2407 )
2408 )
2409 nets = [
2410 n
2411 for n in self.get_network_list()
2412 if n.get("name") == interface_net_name
2413 ]
2414
2415 if len(nets) == 1:
2416 self.logger.info(
2417 "new_vminstance(): Found requested network: {}".format(
2418 nets[0].get("name")
2419 )
2420 )
2421
2422 if interface_net_name != primary_netname:
2423 # connect network to VM - with all DHCP by default
2424 self.logger.info(
2425 "new_vminstance(): Attaching net {} to vapp".format(
2426 interface_net_name
2427 )
2428 )
2429 self.connect_vapp_to_org_vdc_network(
2430 vapp_id, nets[0].get("name")
2431 )
2432
2433 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2434 nic_type = "VMXNET3"
2435 if "type" in net and net["type"] not in type_list:
2436 # fetching nic type from vnf
2437 if "model" in net:
2438 if net["model"] is not None:
2439 if (
2440 net["model"].lower() == "paravirt"
2441 or net["model"].lower() == "virtio"
2442 ):
2443 nic_type = "VMXNET3"
2444 else:
2445 nic_type = net["model"]
2446
2447 self.logger.info(
2448 "new_vminstance(): adding network adapter "
2449 "to a network {}".format(nets[0].get("name"))
2450 )
2451 self.add_network_adapter_to_vms(
2452 vapp,
2453 nets[0].get("name"),
2454 primary_nic_index,
2455 nicIndex,
2456 net,
2457 nic_type=nic_type,
2458 )
2459 else:
2460 self.logger.info(
2461 "new_vminstance(): adding network adapter "
2462 "to a network {}".format(nets[0].get("name"))
2463 )
2464
2465 if net["type"] in ["SR-IOV", "VF"]:
2466 nic_type = net["type"]
2467 self.add_network_adapter_to_vms(
2468 vapp,
2469 nets[0].get("name"),
2470 primary_nic_index,
2471 nicIndex,
2472 net,
2473 nic_type=nic_type,
2474 )
2475 nicIndex += 1
2476
2477 # cloud-init for ssh-key injection
2478 if cloud_config:
2479 # Create a catalog which will be carrying the config drive ISO
2480 # This catalog is deleted during vApp deletion. The catalog name carries
2481 # vApp UUID and thats how it gets identified during its deletion.
2482 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2483 self.logger.info(
2484 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2485 config_drive_catalog_name
2486 )
2487 )
2488 config_drive_catalog_id = self.create_vimcatalog(
2489 org, config_drive_catalog_name
2490 )
2491
2492 if config_drive_catalog_id is None:
2493 error_msg = (
2494 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2495 "ISO".format(config_drive_catalog_name)
2496 )
2497 raise Exception(error_msg)
2498
2499 # Create config-drive ISO
2500 _, userdata = self._create_user_data(cloud_config)
2501 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2502 iso_path = self.create_config_drive_iso(userdata)
2503 self.logger.debug(
2504 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2505 iso_path
2506 )
2507 )
2508
2509 self.logger.info(
2510 "new_vminstance(): uploading iso to catalog {}".format(
2511 config_drive_catalog_name
2512 )
2513 )
2514 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2515 # Attach the config-drive ISO to the VM
2516 self.logger.info(
2517 "new_vminstance(): Attaching the config-drive ISO to the VM"
2518 )
2519 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2520 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2521
2522 # If VM has PCI devices or SRIOV reserve memory for VM
2523 if reserve_memory:
2524 self.reserve_memory_for_all_vms(vapp, memory_mb)
2525
2526 self.logger.debug(
2527 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2528 )
2529
2530 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2531 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2532 if result.get("status") == "success":
2533 self.logger.info(
2534 "new_vminstance(): Successfully power on "
2535 "vApp {}".format(vmname_andid)
2536 )
2537 else:
2538 self.logger.error(
2539 "new_vminstance(): failed to power on vApp "
2540 "{}".format(vmname_andid)
2541 )
2542
2543 except Exception as exp:
2544 try:
2545 self.delete_vminstance(vapp_uuid)
2546 except Exception as exp2:
2547 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2548 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2549 self.logger.error(
2550 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2551 name, exp
2552 )
2553 )
2554 raise vimconn.VimConnException(
2555 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2556 name, exp
2557 )
2558 )
2559 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2560 wait_time = 0
2561 vapp_uuid = None
2562 while wait_time <= MAX_WAIT_TIME:
2563 try:
2564 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2565 vapp = VApp(self.client, resource=vapp_resource)
2566 except Exception as exp:
2567 raise vimconn.VimConnUnexpectedResponse(
2568 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2569 vmname_andid, exp
2570 )
2571 )
2572
2573 # if vapp and vapp.me.deployed:
2574 if vapp and vapp_resource.get("deployed") == "true":
2575 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2576 break
2577 else:
2578 self.logger.debug(
2579 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2580 )
2581 time.sleep(INTERVAL_TIME)
2582
2583 wait_time += INTERVAL_TIME
2584
2585 # SET Affinity Rule for VM
2586 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2587 # While creating VIM account user has to pass the Host Group names in availability_zone list
2588 # "availability_zone" is a part of VIM "config" parameters
2589 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2590 # Host groups are referred as availability zones
2591 # With following procedure, deployed VM will be added into a VM group.
2592 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2593 if availability_zone_list:
2594 self.logger.debug(
2595 "Existing Host Groups in VIM {}".format(
2596 self.config.get("availability_zone")
2597 )
2598 )
2599 # Admin access required for creating Affinity rules
2600 client = self.connect_as_admin()
2601
2602 if not client:
2603 raise vimconn.VimConnConnectionException(
2604 "Failed to connect vCD as admin"
2605 )
2606 else:
2607 self.client = client
2608
2609 if self.client:
2610 headers = {
2611 "Accept": "application/*+xml;version=27.0",
2612 "x-vcloud-authorization": self.client._session.headers[
2613 "x-vcloud-authorization"
2614 ],
2615 }
2616
2617 # Step1: Get provider vdc details from organization
2618 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2619 if pvdc_href is not None:
2620 # Step2: Found required pvdc, now get resource pool information
2621 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2622 if respool_href is None:
2623 # Raise error if respool_href not found
2624 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2625 pvdc_href
2626 )
2627 self.log_message(msg)
2628
2629 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2630 # get availability Zone
2631 vm_az = self.get_vm_availability_zone(
2632 availability_zone_index, availability_zone_list
2633 )
2634
2635 # check if provided av zone(hostGroup) is present in vCD VIM
2636 status = self.check_availibility_zone(vm_az, respool_href, headers)
2637 if status is False:
2638 msg = (
2639 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2640 "resource pool {} status: {}"
2641 ).format(vm_az, respool_href, status)
2642 self.log_message(msg)
2643 else:
2644 self.logger.debug(
2645 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2646 )
2647
2648 # Step4: Find VM group references to create vm group
2649 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2650 if vmgrp_href is None:
2651 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2652 self.log_message(msg)
2653
2654 # Step5: Create a VmGroup with name az_VmGroup
2655 vmgrp_name = (
2656 vm_az + "_" + name
2657 ) # Formed VM Group name = Host Group name + VM name
2658 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2659 if status is not True:
2660 msg = "new_vminstance(): Error in creating VM group {}".format(
2661 vmgrp_name
2662 )
2663 self.log_message(msg)
2664
2665 # VM Group url to add vms to vm group
2666 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2667
2668 # Step6: Add VM to VM Group
2669 # Find VM uuid from vapp_uuid
2670 vm_details = self.get_vapp_details_rest(vapp_uuid)
2671 vm_uuid = vm_details["vmuuid"]
2672
2673 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2674 if status is not True:
2675 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2676 vmgrp_name
2677 )
2678 self.log_message(msg)
2679
2680 # Step7: Create VM to Host affinity rule
2681 addrule_href = self.get_add_rule_reference(respool_href, headers)
2682 if addrule_href is None:
2683 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2684 respool_href
2685 )
2686 self.log_message(msg)
2687
2688 status = self.create_vm_to_host_affinity_rule(
2689 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2690 )
2691 if status is False:
2692 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2693 name, vm_az
2694 )
2695 self.log_message(msg)
2696 else:
2697 self.logger.debug(
2698 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2699 name, vm_az
2700 )
2701 )
2702 # Reset token to a normal user to perform other operations
2703 self.get_token()
2704
2705 if vapp_uuid is not None:
2706 return vapp_uuid, None
2707 else:
2708 raise vimconn.VimConnUnexpectedResponse(
2709 "new_vminstance(): Failed create new vm instance {}".format(name)
2710 )
2711
2712 def create_config_drive_iso(self, user_data):
2713 tmpdir = tempfile.mkdtemp()
2714 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2715 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2716 os.makedirs(latest_dir)
2717 with open(
2718 os.path.join(latest_dir, "meta_data.json"), "w"
2719 ) as meta_file_obj, open(
2720 os.path.join(latest_dir, "user_data"), "w"
2721 ) as userdata_file_obj:
2722 userdata_file_obj.write(user_data)
2723 meta_file_obj.write(
2724 json.dumps(
2725 {
2726 "availability_zone": "nova",
2727 "launch_index": 0,
2728 "name": "ConfigDrive",
2729 "uuid": str(uuid.uuid4()),
2730 }
2731 )
2732 )
2733 genisoimage_cmd = (
2734 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2735 iso_path=iso_path, source_dir_path=tmpdir
2736 )
2737 )
2738 self.logger.info(
2739 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2740 genisoimage_cmd
2741 )
2742 )
2743
2744 try:
2745 FNULL = open(os.devnull, "w")
2746 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2747 except subprocess.CalledProcessError as e:
2748 shutil.rmtree(tmpdir, ignore_errors=True)
2749 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2750 e
2751 )
2752 self.logger.error(error_msg)
2753 raise Exception(error_msg)
2754
2755 return iso_path
2756
2757 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2758 if not os.path.isfile(iso_file_path):
2759 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2760 iso_file_path
2761 )
2762 self.logger.error(error_msg)
2763 raise Exception(error_msg)
2764
2765 iso_file_stat = os.stat(iso_file_path)
2766 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2767 <Media
2768 xmlns="http://www.vmware.com/vcloud/v1.5"
2769 name="{iso_name}"
2770 size="{iso_size}"
2771 imageType="iso">
2772 <Description>ISO image for config-drive</Description>
2773 </Media>""".format(
2774 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2775 )
2776 headers = {
2777 "Accept": "application/*+xml;version=" + API_VERSION,
2778 "x-vcloud-authorization": self.client._session.headers[
2779 "x-vcloud-authorization"
2780 ],
2781 }
2782 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2783 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2784 response = self.perform_request(
2785 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2786 )
2787
2788 if response.status_code != 201:
2789 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2790 catalog_href
2791 )
2792 self.logger.error(error_msg)
2793 raise Exception(error_msg)
2794
2795 catalogItem = XmlElementTree.fromstring(response.text)
2796 entity = [
2797 child
2798 for child in catalogItem
2799 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2800 ][0]
2801 entity_href = entity.get("href")
2802
2803 response = self.perform_request(
2804 req_type="GET", url=entity_href, headers=headers
2805 )
2806 if response.status_code != 200:
2807 raise Exception(
2808 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2809 entity_href
2810 )
2811 )
2812
2813 match = re.search(
2814 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2815 response.text,
2816 re.DOTALL,
2817 )
2818 if match:
2819 media_upload_href = match.group(1)
2820 else:
2821 raise Exception(
2822 "Could not parse the upload URL for the media file from the last response"
2823 )
2824 upload_iso_task = self.get_task_from_response(response.text)
2825 headers["Content-Type"] = "application/octet-stream"
2826 response = self.perform_request(
2827 req_type="PUT",
2828 url=media_upload_href,
2829 headers=headers,
2830 data=open(iso_file_path, "rb"),
2831 )
2832
2833 if response.status_code != 200:
2834 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2835
2836 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2837 if result.get("status") != "success":
2838 raise Exception(
2839 "The upload iso task failed with status {}".format(result.get("status"))
2840 )
2841
2842 def get_vcd_availibility_zones(self, respool_href, headers):
2843 """Method to find presence of av zone is VIM resource pool
2844
2845 Args:
2846 respool_href - resource pool href
2847 headers - header information
2848
2849 Returns:
2850 vcd_az - list of azone present in vCD
2851 """
2852 vcd_az = []
2853 url = respool_href
2854 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2855
2856 if resp.status_code != requests.codes.ok:
2857 self.logger.debug(
2858 "REST API call {} failed. Return status code {}".format(
2859 url, resp.status_code
2860 )
2861 )
2862 else:
2863 # Get the href to hostGroups and find provided hostGroup is present in it
2864 resp_xml = XmlElementTree.fromstring(resp.content)
2865 for child in resp_xml:
2866 if "VMWProviderVdcResourcePool" in child.tag:
2867 for schild in child:
2868 if "Link" in schild.tag:
2869 if (
2870 schild.attrib.get("type")
2871 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
2872 ):
2873 hostGroup = schild.attrib.get("href")
2874 hg_resp = self.perform_request(
2875 req_type="GET", url=hostGroup, headers=headers
2876 )
2877
2878 if hg_resp.status_code != requests.codes.ok:
2879 self.logger.debug(
2880 "REST API call {} failed. Return status code {}".format(
2881 hostGroup, hg_resp.status_code
2882 )
2883 )
2884 else:
2885 hg_resp_xml = XmlElementTree.fromstring(
2886 hg_resp.content
2887 )
2888 for hostGroup in hg_resp_xml:
2889 if "HostGroup" in hostGroup.tag:
2890 # append host group name to the list
2891 vcd_az.append(hostGroup.attrib.get("name"))
2892
2893 return vcd_az
2894
2895 def set_availability_zones(self):
2896 """
2897 Set vim availability zone
2898 """
2899 vim_availability_zones = None
2900 availability_zone = None
2901
2902 if "availability_zone" in self.config:
2903 vim_availability_zones = self.config.get("availability_zone")
2904
2905 if isinstance(vim_availability_zones, str):
2906 availability_zone = [vim_availability_zones]
2907 elif isinstance(vim_availability_zones, list):
2908 availability_zone = vim_availability_zones
2909 else:
2910 return availability_zone
2911
2912 return availability_zone
2913
2914 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2915 """
2916 Return the availability zone to be used by the created VM.
2917 returns: The VIM availability zone to be used or None
2918 """
2919 if availability_zone_index is None:
2920 if not self.config.get("availability_zone"):
2921 return None
2922 elif isinstance(self.config.get("availability_zone"), str):
2923 return self.config["availability_zone"]
2924 else:
2925 return self.config["availability_zone"][0]
2926
2927 vim_availability_zones = self.availability_zone
2928
2929 # check if VIM offer enough availability zones describe in the VNFD
2930 if vim_availability_zones and len(availability_zone_list) <= len(
2931 vim_availability_zones
2932 ):
2933 # check if all the names of NFV AV match VIM AV names
2934 match_by_index = False
2935 for av in availability_zone_list:
2936 if av not in vim_availability_zones:
2937 match_by_index = True
2938 break
2939
2940 if match_by_index:
2941 self.logger.debug(
2942 "Required Availability zone or Host Group not found in VIM config"
2943 )
2944 self.logger.debug(
2945 "Input Availability zone list: {}".format(availability_zone_list)
2946 )
2947 self.logger.debug(
2948 "VIM configured Availability zones: {}".format(
2949 vim_availability_zones
2950 )
2951 )
2952 self.logger.debug("VIM Availability zones will be used by index")
2953 return vim_availability_zones[availability_zone_index]
2954 else:
2955 return availability_zone_list[availability_zone_index]
2956 else:
2957 raise vimconn.VimConnConflictException(
2958 "No enough availability zones at VIM for this deployment"
2959 )
2960
2961 def create_vm_to_host_affinity_rule(
2962 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2963 ):
2964 """Method to create VM to Host Affinity rule in vCD
2965
2966 Args:
2967 addrule_href - href to make a POST request
2968 vmgrpname - name of the VM group created
2969 hostgrpnmae - name of the host group created earlier
2970 polarity - Affinity or Anti-affinity (default: Affinity)
2971 headers - headers to make REST call
2972
2973 Returns:
2974 True- if rule is created
2975 False- Failed to create rule due to some error
2976
2977 """
2978 task_status = False
2979 rule_name = polarity + "_" + vmgrpname
2980 payload = """<?xml version="1.0" encoding="UTF-8"?>
2981 <vmext:VMWVmHostAffinityRule
2982 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2983 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2984 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2985 <vcloud:Name>{}</vcloud:Name>
2986 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2987 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2988 <vcloud:Polarity>{}</vcloud:Polarity>
2989 <vmext:HostGroupName>{}</vmext:HostGroupName>
2990 <vmext:VmGroupName>{}</vmext:VmGroupName>
2991 </vmext:VMWVmHostAffinityRule>""".format(
2992 rule_name, polarity, hostgrpname, vmgrpname
2993 )
2994
2995 resp = self.perform_request(
2996 req_type="POST", url=addrule_href, headers=headers, data=payload
2997 )
2998
2999 if resp.status_code != requests.codes.accepted:
3000 self.logger.debug(
3001 "REST API call {} failed. Return status code {}".format(
3002 addrule_href, resp.status_code
3003 )
3004 )
3005 task_status = False
3006
3007 return task_status
3008 else:
3009 affinity_task = self.get_task_from_response(resp.content)
3010 self.logger.debug("affinity_task: {}".format(affinity_task))
3011
3012 if affinity_task is None or affinity_task is False:
3013 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
3014 # wait for task to complete
3015 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
3016
3017 if result.get("status") == "success":
3018 self.logger.debug(
3019 "Successfully created affinity rule {}".format(rule_name)
3020 )
3021 return True
3022 else:
3023 raise vimconn.VimConnUnexpectedResponse(
3024 "failed to create affinity rule {}".format(rule_name)
3025 )
3026
3027 def get_add_rule_reference(self, respool_href, headers):
3028 """This method finds href to add vm to host affinity rule to vCD
3029
3030 Args:
3031 respool_href- href to resource pool
3032 headers- header information to make REST call
3033
3034 Returns:
3035 None - if no valid href to add rule found or
3036 addrule_href - href to add vm to host affinity rule of resource pool
3037 """
3038 addrule_href = None
3039 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3040
3041 if resp.status_code != requests.codes.ok:
3042 self.logger.debug(
3043 "REST API call {} failed. Return status code {}".format(
3044 respool_href, resp.status_code
3045 )
3046 )
3047 else:
3048 resp_xml = XmlElementTree.fromstring(resp.content)
3049 for child in resp_xml:
3050 if "VMWProviderVdcResourcePool" in child.tag:
3051 for schild in child:
3052 if "Link" in schild.tag:
3053 if (
3054 schild.attrib.get("type")
3055 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
3056 and schild.attrib.get("rel") == "add"
3057 ):
3058 addrule_href = schild.attrib.get("href")
3059 break
3060
3061 return addrule_href
3062
3063 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
3064 """Method to add deployed VM to newly created VM Group.
3065 This is required to create VM to Host affinity in vCD
3066
3067 Args:
3068 vm_uuid- newly created vm uuid
3069 vmGroupNameURL- URL to VM Group name
3070 vmGroup_name- Name of VM group created
3071 headers- Headers for REST request
3072
3073 Returns:
3074 True- if VM added to VM group successfully
3075 False- if any error encounter
3076 """
3077 addvm_resp = self.perform_request(
3078 req_type="GET", url=vmGroupNameURL, headers=headers
3079 ) # , data=payload)
3080
3081 if addvm_resp.status_code != requests.codes.ok:
3082 self.logger.debug(
3083 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
3084 vmGroupNameURL, addvm_resp.status_code
3085 )
3086 )
3087 return False
3088 else:
3089 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
3090 for child in resp_xml:
3091 if child.tag.split("}")[1] == "Link":
3092 if child.attrib.get("rel") == "addVms":
3093 addvmtogrpURL = child.attrib.get("href")
3094
3095 # Get vm details
3096 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
3097 vmdetailsURL = "".join(url_list)
3098
3099 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
3100
3101 if resp.status_code != requests.codes.ok:
3102 self.logger.debug(
3103 "REST API call {} failed. Return status code {}".format(
3104 vmdetailsURL, resp.status_code
3105 )
3106 )
3107 return False
3108
3109 # Parse VM details
3110 resp_xml = XmlElementTree.fromstring(resp.content)
3111 if resp_xml.tag.split("}")[1] == "Vm":
3112 vm_id = resp_xml.attrib.get("id")
3113 vm_name = resp_xml.attrib.get("name")
3114 vm_href = resp_xml.attrib.get("href")
3115 # print vm_id, vm_name, vm_href
3116
3117 # Add VM into VMgroup
3118 payload = """<?xml version="1.0" encoding="UTF-8"?>\
3119 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
3120 xmlns="http://www.vmware.com/vcloud/versions" \
3121 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
3122 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
3123 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
3124 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
3125 xmlns:ns7="http://www.vmware.com/schema/ovf" \
3126 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
3127 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
3128 <ns2:VmReference href="{}" id="{}" name="{}" \
3129 type="application/vnd.vmware.vcloud.vm+xml" />\
3130 </ns2:Vms>""".format(
3131 vm_href, vm_id, vm_name
3132 )
3133
3134 addvmtogrp_resp = self.perform_request(
3135 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
3136 )
3137
3138 if addvmtogrp_resp.status_code != requests.codes.accepted:
3139 self.logger.debug(
3140 "REST API call {} failed. Return status code {}".format(
3141 addvmtogrpURL, addvmtogrp_resp.status_code
3142 )
3143 )
3144
3145 return False
3146 else:
3147 self.logger.debug(
3148 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
3149 )
3150
3151 return True
3152
3153 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
3154 """Method to create a VM group in vCD
3155
3156 Args:
3157 vmgroup_name : Name of VM group to be created
3158 vmgroup_href : href for vmgroup
3159 headers- Headers for REST request
3160 """
3161 # POST to add URL with required data
3162 vmgroup_status = False
3163 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
3164 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
3165 <vmCount>1</vmCount>\
3166 </VMWVmGroup>""".format(
3167 vmgroup_name
3168 )
3169 resp = self.perform_request(
3170 req_type="POST", url=vmgroup_href, headers=headers, data=payload
3171 )
3172
3173 if resp.status_code != requests.codes.accepted:
3174 self.logger.debug(
3175 "REST API call {} failed. Return status code {}".format(
3176 vmgroup_href, resp.status_code
3177 )
3178 )
3179
3180 return vmgroup_status
3181 else:
3182 vmgroup_task = self.get_task_from_response(resp.content)
3183 if vmgroup_task is None or vmgroup_task is False:
3184 raise vimconn.VimConnUnexpectedResponse(
3185 "create_vmgroup(): failed to create VM group {}".format(
3186 vmgroup_name
3187 )
3188 )
3189
3190 # wait for task to complete
3191 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
3192
3193 if result.get("status") == "success":
3194 self.logger.debug(
3195 "create_vmgroup(): Successfully created VM group {}".format(
3196 vmgroup_name
3197 )
3198 )
3199 # time.sleep(10)
3200 vmgroup_status = True
3201
3202 return vmgroup_status
3203 else:
3204 raise vimconn.VimConnUnexpectedResponse(
3205 "create_vmgroup(): failed to create VM group {}".format(
3206 vmgroup_name
3207 )
3208 )
3209
3210 def find_vmgroup_reference(self, url, headers):
3211 """Method to create a new VMGroup which is required to add created VM
3212 Args:
3213 url- resource pool href
3214 headers- header information
3215
3216 Returns:
3217 returns href to VM group to create VM group
3218 """
3219 # Perform GET on resource pool to find 'add' link to create VMGroup
3220 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
3221 vmgrp_href = None
3222 resp = self.perform_request(req_type="GET", url=url, headers=headers)
3223
3224 if resp.status_code != requests.codes.ok:
3225 self.logger.debug(
3226 "REST API call {} failed. Return status code {}".format(
3227 url, resp.status_code
3228 )
3229 )
3230 else:
3231 # Get the href to add vmGroup to vCD
3232 resp_xml = XmlElementTree.fromstring(resp.content)
3233 for child in resp_xml:
3234 if "VMWProviderVdcResourcePool" in child.tag:
3235 for schild in child:
3236 if "Link" in schild.tag:
3237 # Find href with type VMGroup and rel with add
3238 if (
3239 schild.attrib.get("type")
3240 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3241 and schild.attrib.get("rel") == "add"
3242 ):
3243 vmgrp_href = schild.attrib.get("href")
3244
3245 return vmgrp_href
3246
3247 def check_availibility_zone(self, az, respool_href, headers):
3248 """Method to verify requested av zone is present or not in provided
3249 resource pool
3250
3251 Args:
3252 az - name of hostgroup (availibility_zone)
3253 respool_href - Resource Pool href
3254 headers - Headers to make REST call
3255 Returns:
3256 az_found - True if availibility_zone is found else False
3257 """
3258 az_found = False
3259 headers["Accept"] = "application/*+xml;version=27.0"
3260 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3261
3262 if resp.status_code != requests.codes.ok:
3263 self.logger.debug(
3264 "REST API call {} failed. Return status code {}".format(
3265 respool_href, resp.status_code
3266 )
3267 )
3268 else:
3269 # Get the href to hostGroups and find provided hostGroup is present in it
3270 resp_xml = XmlElementTree.fromstring(resp.content)
3271
3272 for child in resp_xml:
3273 if "VMWProviderVdcResourcePool" in child.tag:
3274 for schild in child:
3275 if "Link" in schild.tag:
3276 if (
3277 schild.attrib.get("type")
3278 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3279 ):
3280 hostGroup_href = schild.attrib.get("href")
3281 hg_resp = self.perform_request(
3282 req_type="GET", url=hostGroup_href, headers=headers
3283 )
3284
3285 if hg_resp.status_code != requests.codes.ok:
3286 self.logger.debug(
3287 "REST API call {} failed. Return status code {}".format(
3288 hostGroup_href, hg_resp.status_code
3289 )
3290 )
3291 else:
3292 hg_resp_xml = XmlElementTree.fromstring(
3293 hg_resp.content
3294 )
3295 for hostGroup in hg_resp_xml:
3296 if "HostGroup" in hostGroup.tag:
3297 if hostGroup.attrib.get("name") == az:
3298 az_found = True
3299 break
3300
3301 return az_found
3302
3303 def get_pvdc_for_org(self, org_vdc, headers):
3304 """This method gets provider vdc references from organisation
3305
3306 Args:
3307 org_vdc - name of the organisation VDC to find pvdc
3308 headers - headers to make REST call
3309
3310 Returns:
3311 None - if no pvdc href found else
3312 pvdc_href - href to pvdc
3313 """
3314 # Get provider VDC references from vCD
3315 pvdc_href = None
3316 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3317 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3318 url = "".join(url_list)
3319
3320 response = self.perform_request(req_type="GET", url=url, headers=headers)
3321 if response.status_code != requests.codes.ok:
3322 self.logger.debug(
3323 "REST API call {} failed. Return status code {}".format(
3324 url, response.status_code
3325 )
3326 )
3327 else:
3328 xmlroot_response = XmlElementTree.fromstring(response.text)
3329 for child in xmlroot_response:
3330 if "ProviderVdcReference" in child.tag:
3331 pvdc_href = child.attrib.get("href")
3332 # Get vdcReferences to find org
3333 pvdc_resp = self.perform_request(
3334 req_type="GET", url=pvdc_href, headers=headers
3335 )
3336
3337 if pvdc_resp.status_code != requests.codes.ok:
3338 raise vimconn.VimConnException(
3339 "REST API call {} failed. "
3340 "Return status code {}".format(url, pvdc_resp.status_code)
3341 )
3342
3343 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3344 for child in pvdc_resp_xml:
3345 if "Link" in child.tag:
3346 if (
3347 child.attrib.get("type")
3348 == "application/vnd.vmware.admin.vdcReferences+xml"
3349 ):
3350 vdc_href = child.attrib.get("href")
3351
3352 # Check if provided org is present in vdc
3353 vdc_resp = self.perform_request(
3354 req_type="GET", url=vdc_href, headers=headers
3355 )
3356
3357 if vdc_resp.status_code != requests.codes.ok:
3358 raise vimconn.VimConnException(
3359 "REST API call {} failed. "
3360 "Return status code {}".format(
3361 url, vdc_resp.status_code
3362 )
3363 )
3364 vdc_resp_xml = XmlElementTree.fromstring(
3365 vdc_resp.content
3366 )
3367
3368 for child in vdc_resp_xml:
3369 if "VdcReference" in child.tag:
3370 if child.attrib.get("name") == org_vdc:
3371 return pvdc_href
3372
3373 def get_resource_pool_details(self, pvdc_href, headers):
3374 """Method to get resource pool information.
3375 Host groups are property of resource group.
3376 To get host groups, we need to GET details of resource pool.
3377
3378 Args:
3379 pvdc_href: href to pvdc details
3380 headers: headers
3381
3382 Returns:
3383 respool_href - Returns href link reference to resource pool
3384 """
3385 respool_href = None
3386 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3387
3388 if resp.status_code != requests.codes.ok:
3389 self.logger.debug(
3390 "REST API call {} failed. Return status code {}".format(
3391 pvdc_href, resp.status_code
3392 )
3393 )
3394 else:
3395 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3396 for child in respool_resp_xml:
3397 if "Link" in child.tag:
3398 if (
3399 child.attrib.get("type")
3400 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3401 ):
3402 respool_href = child.attrib.get("href")
3403 break
3404
3405 return respool_href
3406
3407 def log_message(self, msg):
3408 """
3409 Method to log error messages related to Affinity rule creation
3410 in new_vminstance & raise Exception
3411 Args :
3412 msg - Error message to be logged
3413
3414 """
3415 # get token to connect vCD as a normal user
3416 self.get_token()
3417 self.logger.debug(msg)
3418
3419 raise vimconn.VimConnException(msg)
3420
3421 # #
3422 # #
3423 # # based on current discussion
3424 # #
3425 # #
3426 # # server:
3427 # created: '2016-09-08T11:51:58'
3428 # description: simple-instance.linux1.1
3429 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
3430 # hostId: e836c036-74e7-11e6-b249-0800273e724c
3431 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
3432 # status: ACTIVE
3433 # error_msg:
3434 # interfaces: …
3435 #
3436 def get_vminstance(self, vim_vm_uuid=None):
3437 """Returns the VM instance information from VIM"""
3438 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3439
3440 _, vdc = self.get_vdc_details()
3441 if vdc is None:
3442 raise vimconn.VimConnConnectionException(
3443 "Failed to get a reference of VDC for a tenant {}".format(
3444 self.tenant_name
3445 )
3446 )
3447
3448 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3449 if not vm_info_dict:
3450 self.logger.debug(
3451 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3452 vim_vm_uuid
3453 )
3454 )
3455 raise vimconn.VimConnNotFoundException(
3456 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3457 )
3458
3459 status_key = vm_info_dict["status"]
3460 error = ""
3461 try:
3462 vm_dict = {
3463 "created": vm_info_dict["created"],
3464 "description": vm_info_dict["name"],
3465 "status": vcdStatusCode2manoFormat[int(status_key)],
3466 "hostId": vm_info_dict["vmuuid"],
3467 "error_msg": error,
3468 "vim_info": yaml.safe_dump(vm_info_dict),
3469 "interfaces": [],
3470 }
3471
3472 if "interfaces" in vm_info_dict:
3473 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3474 else:
3475 vm_dict["interfaces"] = []
3476 except KeyError:
3477 vm_dict = {
3478 "created": "",
3479 "description": "",
3480 "status": vcdStatusCode2manoFormat[int(-1)],
3481 "hostId": vm_info_dict["vmuuid"],
3482 "error_msg": "Inconsistency state",
3483 "vim_info": yaml.safe_dump(vm_info_dict),
3484 "interfaces": [],
3485 }
3486
3487 return vm_dict
3488
3489 def delete_vminstance(self, vm__vim_uuid, created_items=None):
3490 """Method poweroff and remove VM instance from vcloud director network.
3491
3492 Args:
3493 vm__vim_uuid: VM UUID
3494
3495 Returns:
3496 Returns the instance identifier
3497 """
3498 self.logger.debug(
3499 "Client requesting delete vm instance {} ".format(vm__vim_uuid)
3500 )
3501
3502 _, vdc = self.get_vdc_details()
3503 vdc_obj = VDC(self.client, href=vdc.get("href"))
3504 if vdc_obj is None:
3505 self.logger.debug(
3506 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3507 self.tenant_name
3508 )
3509 )
3510 raise vimconn.VimConnException(
3511 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3512 self.tenant_name
3513 )
3514 )
3515
3516 try:
3517 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3518 if vapp_name is None:
3519 self.logger.debug(
3520 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3521 vm__vim_uuid
3522 )
3523 )
3524
3525 return (
3526 -1,
3527 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3528 vm__vim_uuid
3529 ),
3530 )
3531
3532 self.logger.info(
3533 "Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
3534 )
3535 vapp_resource = vdc_obj.get_vapp(vapp_name)
3536 vapp = VApp(self.client, resource=vapp_resource)
3537
3538 # Delete vApp and wait for status change if task executed and vApp is None.
3539 if vapp:
3540 if vapp_resource.get("deployed") == "true":
3541 self.logger.info("Powering off vApp {}".format(vapp_name))
3542 # Power off vApp
3543 powered_off = False
3544 wait_time = 0
3545
3546 while wait_time <= MAX_WAIT_TIME:
3547 power_off_task = vapp.power_off()
3548 result = self.client.get_task_monitor().wait_for_success(
3549 task=power_off_task
3550 )
3551
3552 if result.get("status") == "success":
3553 powered_off = True
3554 break
3555 else:
3556 self.logger.info(
3557 "Wait for vApp {} to power off".format(vapp_name)
3558 )
3559 time.sleep(INTERVAL_TIME)
3560
3561 wait_time += INTERVAL_TIME
3562
3563 if not powered_off:
3564 self.logger.debug(
3565 "delete_vminstance(): Failed to power off VM instance {} ".format(
3566 vm__vim_uuid
3567 )
3568 )
3569 else:
3570 self.logger.info(
3571 "delete_vminstance(): Powered off VM instance {} ".format(
3572 vm__vim_uuid
3573 )
3574 )
3575
3576 # Undeploy vApp
3577 self.logger.info("Undeploy vApp {}".format(vapp_name))
3578 wait_time = 0
3579 undeployed = False
3580 while wait_time <= MAX_WAIT_TIME:
3581 vapp = VApp(self.client, resource=vapp_resource)
3582 if not vapp:
3583 self.logger.debug(
3584 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3585 vm__vim_uuid
3586 )
3587 )
3588
3589 return (
3590 -1,
3591 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3592 vm__vim_uuid
3593 ),
3594 )
3595
3596 undeploy_task = vapp.undeploy()
3597 result = self.client.get_task_monitor().wait_for_success(
3598 task=undeploy_task
3599 )
3600
3601 if result.get("status") == "success":
3602 undeployed = True
3603 break
3604 else:
3605 self.logger.debug(
3606 "Wait for vApp {} to undeploy".format(vapp_name)
3607 )
3608 time.sleep(INTERVAL_TIME)
3609
3610 wait_time += INTERVAL_TIME
3611
3612 if not undeployed:
3613 self.logger.debug(
3614 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3615 vm__vim_uuid
3616 )
3617 )
3618
3619 # delete vapp
3620 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3621 if vapp is not None:
3622 wait_time = 0
3623 result = False
3624
3625 while wait_time <= MAX_WAIT_TIME:
3626 vapp = VApp(self.client, resource=vapp_resource)
3627 if not vapp:
3628 self.logger.debug(
3629 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3630 vm__vim_uuid
3631 )
3632 )
3633
3634 return (
3635 -1,
3636 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3637 vm__vim_uuid
3638 ),
3639 )
3640
3641 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3642 result = self.client.get_task_monitor().wait_for_success(
3643 task=delete_task
3644 )
3645 if result.get("status") == "success":
3646 break
3647 else:
3648 self.logger.debug(
3649 "Wait for vApp {} to delete".format(vapp_name)
3650 )
3651 time.sleep(INTERVAL_TIME)
3652
3653 wait_time += INTERVAL_TIME
3654
3655 if result is None:
3656 self.logger.debug(
3657 "delete_vminstance(): Failed delete uuid {} ".format(
3658 vm__vim_uuid
3659 )
3660 )
3661 else:
3662 self.logger.info(
3663 "Deleted vm instance {} sccessfully".format(vm__vim_uuid)
3664 )
3665 config_drive_catalog_name, config_drive_catalog_id = (
3666 "cfg_drv-" + vm__vim_uuid,
3667 None,
3668 )
3669 catalog_list = self.get_image_list()
3670
3671 try:
3672 config_drive_catalog_id = [
3673 catalog_["id"]
3674 for catalog_ in catalog_list
3675 if catalog_["name"] == config_drive_catalog_name
3676 ][0]
3677 except IndexError:
3678 pass
3679
3680 if config_drive_catalog_id:
3681 self.logger.debug(
3682 "delete_vminstance(): Found a config drive catalog {} matching "
3683 'vapp_name"{}". Deleting it.'.format(
3684 config_drive_catalog_id, vapp_name
3685 )
3686 )
3687 self.delete_image(config_drive_catalog_id)
3688
3689 return vm__vim_uuid
3690 except Exception:
3691 self.logger.debug(traceback.format_exc())
3692
3693 raise vimconn.VimConnException(
3694 "delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid)
3695 )
3696
3697 def refresh_vms_status(self, vm_list):
3698 """Get the status of the virtual machines and their interfaces/ports
3699 Params: the list of VM identifiers
3700 Returns a dictionary with:
3701 vm_id: #VIM id of this Virtual Machine
3702 status: #Mandatory. Text with one of:
3703 # DELETED (not found at vim)
3704 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3705 # OTHER (Vim reported other status not understood)
3706 # ERROR (VIM indicates an ERROR status)
3707 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3708 # CREATING (on building process), ERROR
3709 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3710 #
3711 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3712 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3713 interfaces:
3714 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3715 mac_address: #Text format XX:XX:XX:XX:XX:XX
3716 vim_net_id: #network id where this interface is connected
3717 vim_interface_id: #interface/port VIM id
3718 ip_address: #null, or text with IPv4, IPv6 address
3719 """
3720 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3721
3722 _, vdc = self.get_vdc_details()
3723 if vdc is None:
3724 raise vimconn.VimConnException(
3725 "Failed to get a reference of VDC for a tenant {}".format(
3726 self.tenant_name
3727 )
3728 )
3729
3730 vms_dict = {}
3731 nsx_edge_list = []
3732 for vmuuid in vm_list:
3733 vapp_name = self.get_namebyvappid(vmuuid)
3734 if vapp_name is not None:
3735 try:
3736 vm_pci_details = self.get_vm_pci_details(vmuuid)
3737 vdc_obj = VDC(self.client, href=vdc.get("href"))
3738 vapp_resource = vdc_obj.get_vapp(vapp_name)
3739 the_vapp = VApp(self.client, resource=vapp_resource)
3740
3741 vm_details = {}
3742 for vm in the_vapp.get_all_vms():
3743 headers = {
3744 "Accept": "application/*+xml;version=" + API_VERSION,
3745 "x-vcloud-authorization": self.client._session.headers[
3746 "x-vcloud-authorization"
3747 ],
3748 }
3749 response = self.perform_request(
3750 req_type="GET", url=vm.get("href"), headers=headers
3751 )
3752
3753 if response.status_code != 200:
3754 self.logger.error(
3755 "refresh_vms_status : REST call {} failed reason : {}"
3756 "status code : {}".format(
3757 vm.get("href"), response.text, response.status_code
3758 )
3759 )
3760 raise vimconn.VimConnException(
3761 "refresh_vms_status : Failed to get VM details"
3762 )
3763
3764 xmlroot = XmlElementTree.fromstring(response.text)
3765 result = response.text.replace("\n", " ")
3766 hdd_match = re.search(
3767 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3768 result,
3769 )
3770
3771 if hdd_match:
3772 hdd_mb = hdd_match.group(1)
3773 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3774
3775 cpus_match = re.search(
3776 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3777 result,
3778 )
3779
3780 if cpus_match:
3781 cpus = cpus_match.group(1)
3782 vm_details["cpus"] = int(cpus) if cpus else None
3783
3784 memory_mb = re.search(
3785 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3786 result,
3787 ).group(1)
3788 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3789 vm_details["status"] = vcdStatusCode2manoFormat[
3790 int(xmlroot.get("status"))
3791 ]
3792 vm_details["id"] = xmlroot.get("id")
3793 vm_details["name"] = xmlroot.get("name")
3794 vm_info = [vm_details]
3795
3796 if vm_pci_details:
3797 vm_info[0].update(vm_pci_details)
3798
3799 vm_dict = {
3800 "status": vcdStatusCode2manoFormat[
3801 int(vapp_resource.get("status"))
3802 ],
3803 "error_msg": vcdStatusCode2manoFormat[
3804 int(vapp_resource.get("status"))
3805 ],
3806 "vim_info": yaml.safe_dump(vm_info),
3807 "interfaces": [],
3808 }
3809
3810 # get networks
3811 vm_ip = None
3812 vm_mac = None
3813 networks = re.findall(
3814 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3815 result,
3816 )
3817
3818 for network in networks:
3819 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3820 vm_mac = mac_s.group(1) if mac_s else None
3821 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3822 vm_ip = ip_s.group(1) if ip_s else None
3823
3824 if vm_ip is None:
3825 if not nsx_edge_list:
3826 nsx_edge_list = self.get_edge_details()
3827 if nsx_edge_list is None:
3828 raise vimconn.VimConnException(
3829 "refresh_vms_status:"
3830 "Failed to get edge details from NSX Manager"
3831 )
3832
3833 if vm_mac is not None:
3834 vm_ip = self.get_ipaddr_from_NSXedge(
3835 nsx_edge_list, vm_mac
3836 )
3837
3838 net_s = re.search('network="(.*?)"', network)
3839 network_name = net_s.group(1) if net_s else None
3840 vm_net_id = self.get_network_id_by_name(network_name)
3841 interface = {
3842 "mac_address": vm_mac,
3843 "vim_net_id": vm_net_id,
3844 "vim_interface_id": vm_net_id,
3845 "ip_address": vm_ip,
3846 }
3847 vm_dict["interfaces"].append(interface)
3848
3849 # add a vm to vm dict
3850 vms_dict.setdefault(vmuuid, vm_dict)
3851 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3852 except Exception as exp:
3853 self.logger.debug("Error in response {}".format(exp))
3854 self.logger.debug(traceback.format_exc())
3855
3856 return vms_dict
3857
3858 def get_edge_details(self):
3859 """Get the NSX edge list from NSX Manager
3860 Returns list of NSX edges
3861 """
3862 edge_list = []
3863 rheaders = {"Content-Type": "application/xml"}
3864 nsx_api_url = "/api/4.0/edges"
3865
3866 self.logger.debug(
3867 "Get edge details from NSX Manager {} {}".format(
3868 self.nsx_manager, nsx_api_url
3869 )
3870 )
3871
3872 try:
3873 resp = requests.get(
3874 self.nsx_manager + nsx_api_url,
3875 auth=(self.nsx_user, self.nsx_password),
3876 verify=False,
3877 headers=rheaders,
3878 )
3879 if resp.status_code == requests.codes.ok:
3880 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3881 for edge_pages in paged_Edge_List:
3882 if edge_pages.tag == "edgePage":
3883 for edge_summary in edge_pages:
3884 if edge_summary.tag == "pagingInfo":
3885 for element in edge_summary:
3886 if (
3887 element.tag == "totalCount"
3888 and element.text == "0"
3889 ):
3890 raise vimconn.VimConnException(
3891 "get_edge_details: No NSX edges details found: {}".format(
3892 self.nsx_manager
3893 )
3894 )
3895
3896 if edge_summary.tag == "edgeSummary":
3897 for element in edge_summary:
3898 if element.tag == "id":
3899 edge_list.append(element.text)
3900 else:
3901 raise vimconn.VimConnException(
3902 "get_edge_details: No NSX edge details found: {}".format(
3903 self.nsx_manager
3904 )
3905 )
3906
3907 if not edge_list:
3908 raise vimconn.VimConnException(
3909 "get_edge_details: "
3910 "No NSX edge details found: {}".format(self.nsx_manager)
3911 )
3912 else:
3913 self.logger.debug(
3914 "get_edge_details: Found NSX edges {}".format(edge_list)
3915 )
3916
3917 return edge_list
3918 else:
3919 self.logger.debug(
3920 "get_edge_details: "
3921 "Failed to get NSX edge details from NSX Manager: {}".format(
3922 resp.content
3923 )
3924 )
3925
3926 return None
3927
3928 except Exception as exp:
3929 self.logger.debug(
3930 "get_edge_details: "
3931 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3932 )
3933 raise vimconn.VimConnException(
3934 "get_edge_details: "
3935 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3936 )
3937
3938 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3939 """Get IP address details from NSX edges, using the MAC address
3940 PARAMS: nsx_edges : List of NSX edges
3941 mac_address : Find IP address corresponding to this MAC address
3942 Returns: IP address corrresponding to the provided MAC address
3943 """
3944 ip_addr = None
3945 rheaders = {"Content-Type": "application/xml"}
3946
3947 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3948
3949 try:
3950 for edge in nsx_edges:
3951 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3952
3953 resp = requests.get(
3954 self.nsx_manager + nsx_api_url,
3955 auth=(self.nsx_user, self.nsx_password),
3956 verify=False,
3957 headers=rheaders,
3958 )
3959
3960 if resp.status_code == requests.codes.ok:
3961 dhcp_leases = XmlElementTree.fromstring(resp.text)
3962 for child in dhcp_leases:
3963 if child.tag == "dhcpLeaseInfo":
3964 dhcpLeaseInfo = child
3965 for leaseInfo in dhcpLeaseInfo:
3966 for elem in leaseInfo:
3967 if (elem.tag) == "macAddress":
3968 edge_mac_addr = elem.text
3969
3970 if (elem.tag) == "ipAddress":
3971 ip_addr = elem.text
3972
3973 if edge_mac_addr is not None:
3974 if edge_mac_addr == mac_address:
3975 self.logger.debug(
3976 "Found ip addr {} for mac {} at NSX edge {}".format(
3977 ip_addr, mac_address, edge
3978 )
3979 )
3980
3981 return ip_addr
3982 else:
3983 self.logger.debug(
3984 "get_ipaddr_from_NSXedge: "
3985 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3986 resp.content
3987 )
3988 )
3989
3990 self.logger.debug(
3991 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3992 )
3993
3994 return None
3995
3996 except XmlElementTree.ParseError as Err:
3997 self.logger.debug(
3998 "ParseError in response from NSX Manager {}".format(Err.message),
3999 exc_info=True,
4000 )
4001
4002 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
4003 """Send and action over a VM instance from VIM
4004 Returns the vm_id if the action was successfully sent to the VIM"""
4005
4006 self.logger.debug(
4007 "Received action for vm {} and action dict {}".format(
4008 vm__vim_uuid, action_dict
4009 )
4010 )
4011
4012 if vm__vim_uuid is None or action_dict is None:
4013 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
4014
4015 _, vdc = self.get_vdc_details()
4016 if vdc is None:
4017 raise vimconn.VimConnException(
4018 "Failed to get a reference of VDC for a tenant {}".format(
4019 self.tenant_name
4020 )
4021 )
4022
4023 vapp_name = self.get_namebyvappid(vm__vim_uuid)
4024 if vapp_name is None:
4025 self.logger.debug(
4026 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
4027 vm__vim_uuid
4028 )
4029 )
4030
4031 raise vimconn.VimConnException(
4032 "Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
4033 )
4034 else:
4035 self.logger.info(
4036 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
4037 )
4038
4039 try:
4040 vdc_obj = VDC(self.client, href=vdc.get("href"))
4041 vapp_resource = vdc_obj.get_vapp(vapp_name)
4042 vapp = VApp(self.client, resource=vapp_resource)
4043
4044 if "start" in action_dict:
4045 self.logger.info(
4046 "action_vminstance: Power on vApp: {}".format(vapp_name)
4047 )
4048 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4049 result = self.client.get_task_monitor().wait_for_success(
4050 task=poweron_task
4051 )
4052 self.instance_actions_result("start", result, vapp_name)
4053 elif "rebuild" in action_dict:
4054 self.logger.info(
4055 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
4056 )
4057 rebuild_task = vapp.deploy(power_on=True)
4058 result = self.client.get_task_monitor().wait_for_success(
4059 task=rebuild_task
4060 )
4061 self.instance_actions_result("rebuild", result, vapp_name)
4062 elif "pause" in action_dict:
4063 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
4064 pause_task = vapp.undeploy(action="suspend")
4065 result = self.client.get_task_monitor().wait_for_success(
4066 task=pause_task
4067 )
4068 self.instance_actions_result("pause", result, vapp_name)
4069 elif "resume" in action_dict:
4070 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
4071 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4072 result = self.client.get_task_monitor().wait_for_success(
4073 task=poweron_task
4074 )
4075 self.instance_actions_result("resume", result, vapp_name)
4076 elif "shutoff" in action_dict or "shutdown" in action_dict:
4077 action_name, _ = list(action_dict.items())[0]
4078 self.logger.info(
4079 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
4080 )
4081 shutdown_task = vapp.shutdown()
4082 result = self.client.get_task_monitor().wait_for_success(
4083 task=shutdown_task
4084 )
4085 if action_name == "shutdown":
4086 self.instance_actions_result("shutdown", result, vapp_name)
4087 else:
4088 self.instance_actions_result("shutoff", result, vapp_name)
4089 elif "forceOff" in action_dict:
4090 result = vapp.undeploy(action="powerOff")
4091 self.instance_actions_result("forceOff", result, vapp_name)
4092 elif "reboot" in action_dict:
4093 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
4094 reboot_task = vapp.reboot()
4095 self.client.get_task_monitor().wait_for_success(task=reboot_task)
4096 else:
4097 raise vimconn.VimConnException(
4098 "action_vminstance: Invalid action {} or action is None.".format(
4099 action_dict
4100 )
4101 )
4102
4103 return vm__vim_uuid
4104 except Exception as exp:
4105 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
4106
4107 raise vimconn.VimConnException(
4108 "action_vminstance: Failed with Exception {}".format(exp)
4109 )
4110
4111 def instance_actions_result(self, action, result, vapp_name):
4112 if result.get("status") == "success":
4113 self.logger.info(
4114 "action_vminstance: Sucessfully {} the vApp: {}".format(
4115 action, vapp_name
4116 )
4117 )
4118 else:
4119 self.logger.error(
4120 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
4121 )
4122
4123 def get_vminstance_console(self, vm_id, console_type="novnc"):
4124 """
4125 Get a console for the virtual machine
4126 Params:
4127 vm_id: uuid of the VM
4128 console_type, can be:
4129 "novnc" (by default), "xvpvnc" for VNC types,
4130 "rdp-html5" for RDP types, "spice-html5" for SPICE types
4131 Returns dict with the console parameters:
4132 protocol: ssh, ftp, http, https, ...
4133 server: usually ip address
4134 port: the http, ssh, ... port
4135 suffix: extra text, e.g. the http path and query string
4136 """
4137 console_dict = {}
4138
4139 if console_type is None or console_type == "novnc":
4140 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
4141 self.url, vm_id
4142 )
4143 headers = {
4144 "Accept": "application/*+xml;version=" + API_VERSION,
4145 "x-vcloud-authorization": self.client._session.headers[
4146 "x-vcloud-authorization"
4147 ],
4148 }
4149 response = self.perform_request(
4150 req_type="POST", url=url_rest_call, headers=headers
4151 )
4152
4153 if response.status_code == 403:
4154 response = self.retry_rest("GET", url_rest_call)
4155
4156 if response.status_code != 200:
4157 self.logger.error(
4158 "REST call {} failed reason : {}"
4159 "status code : {}".format(
4160 url_rest_call, response.text, response.status_code
4161 )
4162 )
4163 raise vimconn.VimConnException(
4164 "get_vminstance_console : Failed to get " "VM Mks ticket details"
4165 )
4166
4167 s = re.search("<Host>(.*?)</Host>", response.text)
4168 console_dict["server"] = s.group(1) if s else None
4169 s1 = re.search("<Port>(\d+)</Port>", response.text)
4170 console_dict["port"] = s1.group(1) if s1 else None
4171 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
4172 self.url, vm_id
4173 )
4174 headers = {
4175 "Accept": "application/*+xml;version=" + API_VERSION,
4176 "x-vcloud-authorization": self.client._session.headers[
4177 "x-vcloud-authorization"
4178 ],
4179 }
4180 response = self.perform_request(
4181 req_type="POST", url=url_rest_call, headers=headers
4182 )
4183
4184 if response.status_code == 403:
4185 response = self.retry_rest("GET", url_rest_call)
4186
4187 if response.status_code != 200:
4188 self.logger.error(
4189 "REST call {} failed reason : {}"
4190 "status code : {}".format(
4191 url_rest_call, response.text, response.status_code
4192 )
4193 )
4194 raise vimconn.VimConnException(
4195 "get_vminstance_console : Failed to get " "VM console details"
4196 )
4197
4198 s = re.search(">.*?/(vm-\d+.*)</", response.text)
4199 console_dict["suffix"] = s.group(1) if s else None
4200 console_dict["protocol"] = "https"
4201
4202 return console_dict
4203
4204 # NOT USED METHODS in current version
4205
4206 def host_vim2gui(self, host, server_dict):
4207 """Transform host dictionary from VIM format to GUI format,
4208 and append to the server_dict
4209 """
4210 raise vimconn.VimConnNotImplemented("Should have implemented this")
4211
4212 def get_hosts_info(self):
4213 """Get the information of deployed hosts
4214 Returns the hosts content"""
4215 raise vimconn.VimConnNotImplemented("Should have implemented this")
4216
4217 def get_hosts(self, vim_tenant):
4218 """Get the hosts and deployed instances
4219 Returns the hosts content"""
4220 raise vimconn.VimConnNotImplemented("Should have implemented this")
4221
4222 def get_processor_rankings(self):
4223 """Get the processor rankings in the VIM database"""
4224 raise vimconn.VimConnNotImplemented("Should have implemented this")
4225
4226 def new_host(self, host_data):
4227 """Adds a new host to VIM"""
4228 """Returns status code of the VIM response"""
4229 raise vimconn.VimConnNotImplemented("Should have implemented this")
4230
4231 def new_external_port(self, port_data):
4232 """Adds a external port to VIM"""
4233 """Returns the port identifier"""
4234 raise vimconn.VimConnNotImplemented("Should have implemented this")
4235
4236 def new_external_network(self, net_name, net_type):
4237 """Adds a external network to VIM (shared)"""
4238 """Returns the network identifier"""
4239 raise vimconn.VimConnNotImplemented("Should have implemented this")
4240
4241 def connect_port_network(self, port_id, network_id, admin=False):
4242 """Connects a external port to a network"""
4243 """Returns status code of the VIM response"""
4244 raise vimconn.VimConnNotImplemented("Should have implemented this")
4245
4246 def new_vminstancefromJSON(self, vm_data):
4247 """Adds a VM instance to VIM"""
4248 """Returns the instance identifier"""
4249 raise vimconn.VimConnNotImplemented("Should have implemented this")
4250
4251 def get_network_name_by_id(self, network_uuid=None):
4252 """Method gets vcloud director network named based on supplied uuid.
4253
4254 Args:
4255 network_uuid: network_id
4256
4257 Returns:
4258 The return network name.
4259 """
4260
4261 if not network_uuid:
4262 return None
4263
4264 try:
4265 org_dict = self.get_org(self.org_uuid)
4266 if "networks" in org_dict:
4267 org_network_dict = org_dict["networks"]
4268
4269 for net_uuid in org_network_dict:
4270 if net_uuid == network_uuid:
4271 return org_network_dict[net_uuid]
4272 except Exception:
4273 self.logger.debug("Exception in get_network_name_by_id")
4274 self.logger.debug(traceback.format_exc())
4275
4276 return None
4277
4278 def get_network_id_by_name(self, network_name=None):
4279 """Method gets vcloud director network uuid based on supplied name.
4280
4281 Args:
4282 network_name: network_name
4283 Returns:
4284 The return network uuid.
4285 network_uuid: network_id
4286 """
4287 if not network_name:
4288 self.logger.debug("get_network_id_by_name() : Network name is empty")
4289 return None
4290
4291 try:
4292 org_dict = self.get_org(self.org_uuid)
4293 if org_dict and "networks" in org_dict:
4294 org_network_dict = org_dict["networks"]
4295
4296 for net_uuid, net_name in org_network_dict.items():
4297 if net_name == network_name:
4298 return net_uuid
4299
4300 except KeyError as exp:
4301 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4302
4303 return None
4304
4305 def get_physical_network_by_name(self, physical_network_name):
4306 """
4307 Methos returns uuid of physical network which passed
4308 Args:
4309 physical_network_name: physical network name
4310 Returns:
4311 UUID of physical_network_name
4312 """
4313 try:
4314 client_as_admin = self.connect_as_admin()
4315
4316 if not client_as_admin:
4317 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4318
4319 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4320 vm_list_rest_call = "".join(url_list)
4321
4322 if client_as_admin._session:
4323 headers = {
4324 "Accept": "application/*+xml;version=" + API_VERSION,
4325 "x-vcloud-authorization": client_as_admin._session.headers[
4326 "x-vcloud-authorization"
4327 ],
4328 }
4329 response = self.perform_request(
4330 req_type="GET", url=vm_list_rest_call, headers=headers
4331 )
4332 provider_network = None
4333 available_network = None
4334 # add_vdc_rest_url = None
4335
4336 if response.status_code != requests.codes.ok:
4337 self.logger.debug(
4338 "REST API call {} failed. Return status code {}".format(
4339 vm_list_rest_call, response.status_code
4340 )
4341 )
4342 return None
4343 else:
4344 try:
4345 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4346 for child in vm_list_xmlroot:
4347 if child.tag.split("}")[1] == "ProviderVdcReference":
4348 provider_network = child.attrib.get("href")
4349 # application/vnd.vmware.admin.providervdc+xml
4350
4351 if child.tag.split("}")[1] == "Link":
4352 if (
4353 child.attrib.get("type")
4354 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4355 and child.attrib.get("rel") == "add"
4356 ):
4357 child.attrib.get("href")
4358 except Exception:
4359 self.logger.debug(
4360 "Failed parse respond for rest api call {}".format(
4361 vm_list_rest_call
4362 )
4363 )
4364 self.logger.debug("Respond body {}".format(response.text))
4365
4366 return None
4367
4368 # find pvdc provided available network
4369 response = self.perform_request(
4370 req_type="GET", url=provider_network, headers=headers
4371 )
4372
4373 if response.status_code != requests.codes.ok:
4374 self.logger.debug(
4375 "REST API call {} failed. Return status code {}".format(
4376 vm_list_rest_call, response.status_code
4377 )
4378 )
4379
4380 return None
4381
4382 try:
4383 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4384 for child in vm_list_xmlroot.iter():
4385 if child.tag.split("}")[1] == "AvailableNetworks":
4386 for networks in child.iter():
4387 if (
4388 networks.attrib.get("href") is not None
4389 and networks.attrib.get("name") is not None
4390 ):
4391 if (
4392 networks.attrib.get("name")
4393 == physical_network_name
4394 ):
4395 network_url = networks.attrib.get("href")
4396 available_network = network_url[
4397 network_url.rindex("/") + 1 :
4398 ]
4399 break
4400 except Exception:
4401 return None
4402
4403 return available_network
4404 except Exception as e:
4405 self.logger.error("Error while getting physical network: {}".format(e))
4406
4407 def list_org_action(self):
4408 """
4409 Method leverages vCloud director and query for available organization for particular user
4410
4411 Args:
4412 vca - is active VCA connection.
4413 vdc_name - is a vdc name that will be used to query vms action
4414
4415 Returns:
4416 The return XML respond
4417 """
4418 url_list = [self.url, "/api/org"]
4419 vm_list_rest_call = "".join(url_list)
4420
4421 if self.client._session:
4422 headers = {
4423 "Accept": "application/*+xml;version=" + API_VERSION,
4424 "x-vcloud-authorization": self.client._session.headers[
4425 "x-vcloud-authorization"
4426 ],
4427 }
4428
4429 response = self.perform_request(
4430 req_type="GET", url=vm_list_rest_call, headers=headers
4431 )
4432
4433 if response.status_code == 403:
4434 response = self.retry_rest("GET", vm_list_rest_call)
4435
4436 if response.status_code == requests.codes.ok:
4437 return response.text
4438
4439 return None
4440
4441 def get_org_action(self, org_uuid=None):
4442 """
4443 Method leverages vCloud director and retrieve available object for organization.
4444
4445 Args:
4446 org_uuid - vCD organization uuid
4447 self.client - is active connection.
4448
4449 Returns:
4450 The return XML respond
4451 """
4452
4453 if org_uuid is None:
4454 return None
4455
4456 url_list = [self.url, "/api/org/", org_uuid]
4457 vm_list_rest_call = "".join(url_list)
4458
4459 if self.client._session:
4460 headers = {
4461 "Accept": "application/*+xml;version=" + API_VERSION,
4462 "x-vcloud-authorization": self.client._session.headers[
4463 "x-vcloud-authorization"
4464 ],
4465 }
4466
4467 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4468 response = self.perform_request(
4469 req_type="GET", url=vm_list_rest_call, headers=headers
4470 )
4471
4472 if response.status_code == 403:
4473 response = self.retry_rest("GET", vm_list_rest_call)
4474
4475 if response.status_code == requests.codes.ok:
4476 return response.text
4477
4478 return None
4479
4480 def get_org(self, org_uuid=None):
4481 """
4482 Method retrieves available organization in vCloud Director
4483
4484 Args:
4485 org_uuid - is a organization uuid.
4486
4487 Returns:
4488 The return dictionary with following key
4489 "network" - for network list under the org
4490 "catalogs" - for network list under the org
4491 "vdcs" - for vdc list under org
4492 """
4493
4494 org_dict = {}
4495
4496 if org_uuid is None:
4497 return org_dict
4498
4499 content = self.get_org_action(org_uuid=org_uuid)
4500 try:
4501 vdc_list = {}
4502 network_list = {}
4503 catalog_list = {}
4504 vm_list_xmlroot = XmlElementTree.fromstring(content)
4505 for child in vm_list_xmlroot:
4506 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4507 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4508 "name"
4509 ]
4510 org_dict["vdcs"] = vdc_list
4511
4512 if (
4513 child.attrib["type"]
4514 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4515 ):
4516 network_list[
4517 child.attrib["href"].split("/")[-1:][0]
4518 ] = child.attrib["name"]
4519 org_dict["networks"] = network_list
4520
4521 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4522 catalog_list[
4523 child.attrib["href"].split("/")[-1:][0]
4524 ] = child.attrib["name"]
4525 org_dict["catalogs"] = catalog_list
4526 except Exception:
4527 pass
4528
4529 return org_dict
4530
4531 def get_org_list(self):
4532 """
4533 Method retrieves available organization in vCloud Director
4534
4535 Args:
4536 vca - is active VCA connection.
4537
4538 Returns:
4539 The return dictionary and key for each entry VDC UUID
4540 """
4541 org_dict = {}
4542
4543 content = self.list_org_action()
4544 try:
4545 vm_list_xmlroot = XmlElementTree.fromstring(content)
4546
4547 for vm_xml in vm_list_xmlroot:
4548 if vm_xml.tag.split("}")[1] == "Org":
4549 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4550 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4551 except Exception:
4552 pass
4553
4554 return org_dict
4555
4556 def vms_view_action(self, vdc_name=None):
4557 """Method leverages vCloud director vms query call
4558
4559 Args:
4560 vca - is active VCA connection.
4561 vdc_name - is a vdc name that will be used to query vms action
4562
4563 Returns:
4564 The return XML respond
4565 """
4566 vca = self.connect()
4567 if vdc_name is None:
4568 return None
4569
4570 url_list = [vca.host, "/api/vms/query"]
4571 vm_list_rest_call = "".join(url_list)
4572
4573 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4574 refs = [
4575 ref
4576 for ref in vca.vcloud_session.organization.Link
4577 if ref.name == vdc_name
4578 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4579 ]
4580
4581 if len(refs) == 1:
4582 response = self.perform_request(
4583 req_type="GET",
4584 url=vm_list_rest_call,
4585 headers=vca.vcloud_session.get_vcloud_headers(),
4586 verify=vca.verify,
4587 logger=vca.logger,
4588 )
4589
4590 if response.status_code == requests.codes.ok:
4591 return response.text
4592
4593 return None
4594
4595 def get_vapp_list(self, vdc_name=None):
4596 """
4597 Method retrieves vApp list deployed vCloud director and returns a dictionary
4598 contains a list of all vapp deployed for queried VDC.
4599 The key for a dictionary is vApp UUID
4600
4601
4602 Args:
4603 vca - is active VCA connection.
4604 vdc_name - is a vdc name that will be used to query vms action
4605
4606 Returns:
4607 The return dictionary and key for each entry vapp UUID
4608 """
4609 vapp_dict = {}
4610
4611 if vdc_name is None:
4612 return vapp_dict
4613
4614 content = self.vms_view_action(vdc_name=vdc_name)
4615 try:
4616 vm_list_xmlroot = XmlElementTree.fromstring(content)
4617 for vm_xml in vm_list_xmlroot:
4618 if vm_xml.tag.split("}")[1] == "VMRecord":
4619 if vm_xml.attrib["isVAppTemplate"] == "true":
4620 rawuuid = vm_xml.attrib["container"].split("/")[-1:]
4621 if "vappTemplate-" in rawuuid[0]:
4622 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4623 # vm and use raw UUID as key
4624 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
4625 except Exception:
4626 pass
4627
4628 return vapp_dict
4629
4630 def get_vm_list(self, vdc_name=None):
4631 """
4632 Method retrieves VM's list deployed vCloud director. It returns a dictionary
4633 contains a list of all VM's deployed for queried VDC.
4634 The key for a dictionary is VM UUID
4635
4636
4637 Args:
4638 vca - is active VCA connection.
4639 vdc_name - is a vdc name that will be used to query vms action
4640
4641 Returns:
4642 The return dictionary and key for each entry vapp UUID
4643 """
4644 vm_dict = {}
4645
4646 if vdc_name is None:
4647 return vm_dict
4648
4649 content = self.vms_view_action(vdc_name=vdc_name)
4650 try:
4651 vm_list_xmlroot = XmlElementTree.fromstring(content)
4652 for vm_xml in vm_list_xmlroot:
4653 if vm_xml.tag.split("}")[1] == "VMRecord":
4654 if vm_xml.attrib["isVAppTemplate"] == "false":
4655 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4656 if "vm-" in rawuuid[0]:
4657 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4658 # vm and use raw UUID as key
4659 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4660 except Exception:
4661 pass
4662
4663 return vm_dict
4664
4665 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4666 """
4667 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4668 contains a list of all VM's deployed for queried VDC.
4669 The key for a dictionary is VM UUID
4670
4671
4672 Args:
4673 vca - is active VCA connection.
4674 vdc_name - is a vdc name that will be used to query vms action
4675
4676 Returns:
4677 The return dictionary and key for each entry vapp UUID
4678 """
4679 vm_dict = {}
4680 vca = self.connect()
4681
4682 if not vca:
4683 raise vimconn.VimConnConnectionException("self.connect() is failed")
4684
4685 if vdc_name is None:
4686 return vm_dict
4687
4688 content = self.vms_view_action(vdc_name=vdc_name)
4689 try:
4690 vm_list_xmlroot = XmlElementTree.fromstring(content)
4691 for vm_xml in vm_list_xmlroot:
4692 if (
4693 vm_xml.tag.split("}")[1] == "VMRecord"
4694 and vm_xml.attrib["isVAppTemplate"] == "false"
4695 ):
4696 # lookup done by UUID
4697 if isuuid:
4698 if vapp_name in vm_xml.attrib["container"]:
4699 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4700 if "vm-" in rawuuid[0]:
4701 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4702 break
4703 # lookup done by Name
4704 else:
4705 if vapp_name in vm_xml.attrib["name"]:
4706 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4707 if "vm-" in rawuuid[0]:
4708 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4709 break
4710 except Exception:
4711 pass
4712
4713 return vm_dict
4714
4715 def get_network_action(self, network_uuid=None):
4716 """
4717 Method leverages vCloud director and query network based on network uuid
4718
4719 Args:
4720 vca - is active VCA connection.
4721 network_uuid - is a network uuid
4722
4723 Returns:
4724 The return XML respond
4725 """
4726 if network_uuid is None:
4727 return None
4728
4729 url_list = [self.url, "/api/network/", network_uuid]
4730 vm_list_rest_call = "".join(url_list)
4731
4732 if self.client._session:
4733 headers = {
4734 "Accept": "application/*+xml;version=" + API_VERSION,
4735 "x-vcloud-authorization": self.client._session.headers[
4736 "x-vcloud-authorization"
4737 ],
4738 }
4739 response = self.perform_request(
4740 req_type="GET", url=vm_list_rest_call, headers=headers
4741 )
4742
4743 # Retry login if session expired & retry sending request
4744 if response.status_code == 403:
4745 response = self.retry_rest("GET", vm_list_rest_call)
4746
4747 if response.status_code == requests.codes.ok:
4748 return response.text
4749
4750 return None
4751
4752 def get_vcd_network(self, network_uuid=None):
4753 """
4754 Method retrieves available network from vCloud Director
4755
4756 Args:
4757 network_uuid - is VCD network UUID
4758
4759 Each element serialized as key : value pair
4760
4761 Following keys available for access. network_configuration['Gateway'}
4762 <Configuration>
4763 <IpScopes>
4764 <IpScope>
4765 <IsInherited>true</IsInherited>
4766 <Gateway>172.16.252.100</Gateway>
4767 <Netmask>255.255.255.0</Netmask>
4768 <Dns1>172.16.254.201</Dns1>
4769 <Dns2>172.16.254.202</Dns2>
4770 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4771 <IsEnabled>true</IsEnabled>
4772 <IpRanges>
4773 <IpRange>
4774 <StartAddress>172.16.252.1</StartAddress>
4775 <EndAddress>172.16.252.99</EndAddress>
4776 </IpRange>
4777 </IpRanges>
4778 </IpScope>
4779 </IpScopes>
4780 <FenceMode>bridged</FenceMode>
4781
4782 Returns:
4783 The return dictionary and key for each entry vapp UUID
4784 """
4785 network_configuration = {}
4786
4787 if network_uuid is None:
4788 return network_uuid
4789
4790 try:
4791 content = self.get_network_action(network_uuid=network_uuid)
4792 if content is not None:
4793 vm_list_xmlroot = XmlElementTree.fromstring(content)
4794 network_configuration["status"] = vm_list_xmlroot.get("status")
4795 network_configuration["name"] = vm_list_xmlroot.get("name")
4796 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4797
4798 for child in vm_list_xmlroot:
4799 if child.tag.split("}")[1] == "IsShared":
4800 network_configuration["isShared"] = child.text.strip()
4801
4802 if child.tag.split("}")[1] == "Configuration":
4803 for configuration in child.iter():
4804 tagKey = configuration.tag.split("}")[1].strip()
4805 if tagKey != "":
4806 network_configuration[
4807 tagKey
4808 ] = configuration.text.strip()
4809 except Exception as exp:
4810 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4811
4812 raise vimconn.VimConnException(
4813 "get_vcd_network: Failed with Exception {}".format(exp)
4814 )
4815
4816 return network_configuration
4817
4818 def delete_network_action(self, network_uuid=None):
4819 """
4820 Method delete given network from vCloud director
4821
4822 Args:
4823 network_uuid - is a network uuid that client wish to delete
4824
4825 Returns:
4826 The return None or XML respond or false
4827 """
4828 client = self.connect_as_admin()
4829
4830 if not client:
4831 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4832
4833 if network_uuid is None:
4834 return False
4835
4836 url_list = [self.url, "/api/admin/network/", network_uuid]
4837 vm_list_rest_call = "".join(url_list)
4838
4839 if client._session:
4840 headers = {
4841 "Accept": "application/*+xml;version=" + API_VERSION,
4842 "x-vcloud-authorization": client._session.headers[
4843 "x-vcloud-authorization"
4844 ],
4845 }
4846 response = self.perform_request(
4847 req_type="DELETE", url=vm_list_rest_call, headers=headers
4848 )
4849
4850 if response.status_code == 202:
4851 return True
4852
4853 return False
4854
4855 def create_network(
4856 self,
4857 network_name=None,
4858 net_type="bridge",
4859 parent_network_uuid=None,
4860 ip_profile=None,
4861 isshared="true",
4862 ):
4863 """
4864 Method create network in vCloud director
4865
4866 Args:
4867 network_name - is network name to be created.
4868 net_type - can be 'bridge','data','ptp','mgmt'.
4869 ip_profile is a dict containing the IP parameters of the network
4870 isshared - is a boolean
4871 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4872 It optional attribute. by default if no parent network indicate the first available will be used.
4873
4874 Returns:
4875 The return network uuid or return None
4876 """
4877 new_network_name = [network_name, "-", str(uuid.uuid4())]
4878 content = self.create_network_rest(
4879 network_name="".join(new_network_name),
4880 ip_profile=ip_profile,
4881 net_type=net_type,
4882 parent_network_uuid=parent_network_uuid,
4883 isshared=isshared,
4884 )
4885
4886 if content is None:
4887 self.logger.debug("Failed create network {}.".format(network_name))
4888
4889 return None
4890
4891 try:
4892 vm_list_xmlroot = XmlElementTree.fromstring(content)
4893 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4894 if len(vcd_uuid) == 4:
4895 self.logger.info(
4896 "Created new network name: {} uuid: {}".format(
4897 network_name, vcd_uuid[3]
4898 )
4899 )
4900
4901 return vcd_uuid[3]
4902 except Exception:
4903 self.logger.debug("Failed create network {}".format(network_name))
4904
4905 return None
4906
4907 def create_network_rest(
4908 self,
4909 network_name=None,
4910 net_type="bridge",
4911 parent_network_uuid=None,
4912 ip_profile=None,
4913 isshared="true",
4914 ):
4915 """
4916 Method create network in vCloud director
4917
4918 Args:
4919 network_name - is network name to be created.
4920 net_type - can be 'bridge','data','ptp','mgmt'.
4921 ip_profile is a dict containing the IP parameters of the network
4922 isshared - is a boolean
4923 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4924 It optional attribute. by default if no parent network indicate the first available will be used.
4925
4926 Returns:
4927 The return network uuid or return None
4928 """
4929 client_as_admin = self.connect_as_admin()
4930
4931 if not client_as_admin:
4932 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4933
4934 if network_name is None:
4935 return None
4936
4937 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4938 vm_list_rest_call = "".join(url_list)
4939
4940 if client_as_admin._session:
4941 headers = {
4942 "Accept": "application/*+xml;version=" + API_VERSION,
4943 "x-vcloud-authorization": client_as_admin._session.headers[
4944 "x-vcloud-authorization"
4945 ],
4946 }
4947 response = self.perform_request(
4948 req_type="GET", url=vm_list_rest_call, headers=headers
4949 )
4950 provider_network = None
4951 available_networks = None
4952 add_vdc_rest_url = None
4953
4954 if response.status_code != requests.codes.ok:
4955 self.logger.debug(
4956 "REST API call {} failed. Return status code {}".format(
4957 vm_list_rest_call, response.status_code
4958 )
4959 )
4960
4961 return None
4962 else:
4963 try:
4964 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4965 for child in vm_list_xmlroot:
4966 if child.tag.split("}")[1] == "ProviderVdcReference":
4967 provider_network = child.attrib.get("href")
4968 # application/vnd.vmware.admin.providervdc+xml
4969
4970 if child.tag.split("}")[1] == "Link":
4971 if (
4972 child.attrib.get("type")
4973 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4974 and child.attrib.get("rel") == "add"
4975 ):
4976 add_vdc_rest_url = child.attrib.get("href")
4977 except Exception:
4978 self.logger.debug(
4979 "Failed parse respond for rest api call {}".format(
4980 vm_list_rest_call
4981 )
4982 )
4983 self.logger.debug("Respond body {}".format(response.text))
4984
4985 return None
4986
4987 # find pvdc provided available network
4988 response = self.perform_request(
4989 req_type="GET", url=provider_network, headers=headers
4990 )
4991
4992 if response.status_code != requests.codes.ok:
4993 self.logger.debug(
4994 "REST API call {} failed. Return status code {}".format(
4995 vm_list_rest_call, response.status_code
4996 )
4997 )
4998
4999 return None
5000
5001 if parent_network_uuid is None:
5002 try:
5003 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5004 for child in vm_list_xmlroot.iter():
5005 if child.tag.split("}")[1] == "AvailableNetworks":
5006 for networks in child.iter():
5007 # application/vnd.vmware.admin.network+xml
5008 if networks.attrib.get("href") is not None:
5009 available_networks = networks.attrib.get("href")
5010 break
5011 except Exception:
5012 return None
5013
5014 try:
5015 # Configure IP profile of the network
5016 ip_profile = (
5017 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
5018 )
5019
5020 if (
5021 "subnet_address" not in ip_profile
5022 or ip_profile["subnet_address"] is None
5023 ):
5024 subnet_rand = random.randint(0, 255)
5025 ip_base = "192.168.{}.".format(subnet_rand)
5026 ip_profile["subnet_address"] = ip_base + "0/24"
5027 else:
5028 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
5029
5030 if (
5031 "gateway_address" not in ip_profile
5032 or ip_profile["gateway_address"] is None
5033 ):
5034 ip_profile["gateway_address"] = ip_base + "1"
5035
5036 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
5037 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
5038
5039 if (
5040 "dhcp_enabled" not in ip_profile
5041 or ip_profile["dhcp_enabled"] is None
5042 ):
5043 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
5044
5045 if (
5046 "dhcp_start_address" not in ip_profile
5047 or ip_profile["dhcp_start_address"] is None
5048 ):
5049 ip_profile["dhcp_start_address"] = ip_base + "3"
5050
5051 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
5052 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
5053
5054 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
5055 ip_profile["dns_address"] = ip_base + "2"
5056
5057 gateway_address = ip_profile["gateway_address"]
5058 dhcp_count = int(ip_profile["dhcp_count"])
5059 subnet_address = self.convert_cidr_to_netmask(
5060 ip_profile["subnet_address"]
5061 )
5062
5063 if ip_profile["dhcp_enabled"] is True:
5064 dhcp_enabled = "true"
5065 else:
5066 dhcp_enabled = "false"
5067
5068 dhcp_start_address = ip_profile["dhcp_start_address"]
5069
5070 # derive dhcp_end_address from dhcp_start_address & dhcp_count
5071 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
5072 end_ip_int += dhcp_count - 1
5073 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
5074
5075 # ip_version = ip_profile['ip_version']
5076 dns_address = ip_profile["dns_address"]
5077 except KeyError as exp:
5078 self.logger.debug("Create Network REST: Key error {}".format(exp))
5079
5080 raise vimconn.VimConnException(
5081 "Create Network REST: Key error{}".format(exp)
5082 )
5083
5084 # either use client provided UUID or search for a first available
5085 # if both are not defined we return none
5086 if parent_network_uuid is not None:
5087 provider_network = None
5088 available_networks = None
5089 add_vdc_rest_url = None
5090 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
5091 add_vdc_rest_url = "".join(url_list)
5092 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
5093 available_networks = "".join(url_list)
5094
5095 # Creating all networks as Direct Org VDC type networks.
5096 # Unused in case of Underlay (data/ptp) network interface.
5097 fence_mode = "isolated"
5098 is_inherited = "false"
5099 dns_list = dns_address.split(";")
5100 dns1 = dns_list[0]
5101 dns2_text = ""
5102
5103 if len(dns_list) >= 2:
5104 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
5105 dns_list[1]
5106 )
5107
5108 if net_type == "isolated":
5109 fence_mode = "isolated"
5110 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5111 <Description>Openmano created</Description>
5112 <Configuration>
5113 <IpScopes>
5114 <IpScope>
5115 <IsInherited>{1:s}</IsInherited>
5116 <Gateway>{2:s}</Gateway>
5117 <Netmask>{3:s}</Netmask>
5118 <Dns1>{4:s}</Dns1>{5:s}
5119 <IsEnabled>{6:s}</IsEnabled>
5120 <IpRanges>
5121 <IpRange>
5122 <StartAddress>{7:s}</StartAddress>
5123 <EndAddress>{8:s}</EndAddress>
5124 </IpRange>
5125 </IpRanges>
5126 </IpScope>
5127 </IpScopes>
5128 <FenceMode>{9:s}</FenceMode>
5129 </Configuration>
5130 <IsShared>{10:s}</IsShared>
5131 </OrgVdcNetwork> """.format(
5132 escape(network_name),
5133 is_inherited,
5134 gateway_address,
5135 subnet_address,
5136 dns1,
5137 dns2_text,
5138 dhcp_enabled,
5139 dhcp_start_address,
5140 dhcp_end_address,
5141 fence_mode,
5142 isshared,
5143 )
5144 else:
5145 fence_mode = "bridged"
5146 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5147 <Description>Openmano created</Description>
5148 <Configuration>
5149 <IpScopes>
5150 <IpScope>
5151 <IsInherited>{1:s}</IsInherited>
5152 <Gateway>{2:s}</Gateway>
5153 <Netmask>{3:s}</Netmask>
5154 <Dns1>{4:s}</Dns1>{5:s}
5155 <IsEnabled>{6:s}</IsEnabled>
5156 <IpRanges>
5157 <IpRange>
5158 <StartAddress>{7:s}</StartAddress>
5159 <EndAddress>{8:s}</EndAddress>
5160 </IpRange>
5161 </IpRanges>
5162 </IpScope>
5163 </IpScopes>
5164 <ParentNetwork href="{9:s}"/>
5165 <FenceMode>{10:s}</FenceMode>
5166 </Configuration>
5167 <IsShared>{11:s}</IsShared>
5168 </OrgVdcNetwork> """.format(
5169 escape(network_name),
5170 is_inherited,
5171 gateway_address,
5172 subnet_address,
5173 dns1,
5174 dns2_text,
5175 dhcp_enabled,
5176 dhcp_start_address,
5177 dhcp_end_address,
5178 available_networks,
5179 fence_mode,
5180 isshared,
5181 )
5182
5183 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
5184 try:
5185 response = self.perform_request(
5186 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
5187 )
5188
5189 if response.status_code != 201:
5190 self.logger.debug(
5191 "Create Network POST REST API call failed. "
5192 "Return status code {}, response.text: {}".format(
5193 response.status_code, response.text
5194 )
5195 )
5196 else:
5197 network_task = self.get_task_from_response(response.text)
5198 self.logger.debug(
5199 "Create Network REST : Waiting for Network creation complete"
5200 )
5201 time.sleep(5)
5202 result = self.client.get_task_monitor().wait_for_success(
5203 task=network_task
5204 )
5205
5206 if result.get("status") == "success":
5207 return response.text
5208 else:
5209 self.logger.debug(
5210 "create_network_rest task failed. Network Create response : {}".format(
5211 response.text
5212 )
5213 )
5214 except Exception as exp:
5215 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
5216
5217 return None
5218
5219 def convert_cidr_to_netmask(self, cidr_ip=None):
5220 """
5221 Method sets convert CIDR netmask address to normal IP format
5222 Args:
5223 cidr_ip : CIDR IP address
5224 Returns:
5225 netmask : Converted netmask
5226 """
5227 if cidr_ip is not None:
5228 if "/" in cidr_ip:
5229 _, net_bits = cidr_ip.split("/")
5230 netmask = socket.inet_ntoa(
5231 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
5232 )
5233 else:
5234 netmask = cidr_ip
5235
5236 return netmask
5237
5238 return None
5239
5240 def get_provider_rest(self, vca=None):
5241 """
5242 Method gets provider vdc view from vcloud director
5243
5244 Args:
5245 network_name - is network name to be created.
5246 parent_network_uuid - is parent provider vdc network that will be used for mapping.
5247 It optional attribute. by default if no parent network indicate the first available will be used.
5248
5249 Returns:
5250 The return xml content of respond or None
5251 """
5252 url_list = [self.url, "/api/admin"]
5253
5254 if vca:
5255 headers = {
5256 "Accept": "application/*+xml;version=" + API_VERSION,
5257 "x-vcloud-authorization": self.client._session.headers[
5258 "x-vcloud-authorization"
5259 ],
5260 }
5261 response = self.perform_request(
5262 req_type="GET", url="".join(url_list), headers=headers
5263 )
5264
5265 if response.status_code == requests.codes.ok:
5266 return response.text
5267
5268 return None
5269
5270 def create_vdc(self, vdc_name=None):
5271 vdc_dict = {}
5272 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
5273
5274 if xml_content is not None:
5275 try:
5276 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
5277 for child in task_resp_xmlroot:
5278 if child.tag.split("}")[1] == "Owner":
5279 vdc_id = child.attrib.get("href").split("/")[-1]
5280 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
5281
5282 return vdc_dict
5283 except Exception:
5284 self.logger.debug("Respond body {}".format(xml_content))
5285
5286 return None
5287
5288 def create_vdc_from_tmpl_rest(self, vdc_name=None):
5289 """
5290 Method create vdc in vCloud director based on VDC template.
5291 it uses pre-defined template.
5292
5293 Args:
5294 vdc_name - name of a new vdc.
5295
5296 Returns:
5297 The return xml content of respond or None
5298 """
5299 # pre-requesite atleast one vdc template should be available in vCD
5300 self.logger.info("Creating new vdc {}".format(vdc_name))
5301 vca = self.connect_as_admin()
5302
5303 if not vca:
5304 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5305
5306 if vdc_name is None:
5307 return None
5308
5309 url_list = [self.url, "/api/vdcTemplates"]
5310 vm_list_rest_call = "".join(url_list)
5311 headers = {
5312 "Accept": "application/*+xml;version=" + API_VERSION,
5313 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
5314 }
5315 response = self.perform_request(
5316 req_type="GET", url=vm_list_rest_call, headers=headers
5317 )
5318
5319 # container url to a template
5320 vdc_template_ref = None
5321 try:
5322 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5323 for child in vm_list_xmlroot:
5324 # application/vnd.vmware.admin.providervdc+xml
5325 # we need find a template from witch we instantiate VDC
5326 if child.tag.split("}")[1] == "VdcTemplate":
5327 if (
5328 child.attrib.get("type")
5329 == "application/vnd.vmware.admin.vdcTemplate+xml"
5330 ):
5331 vdc_template_ref = child.attrib.get("href")
5332 except Exception:
5333 self.logger.debug(
5334 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5335 )
5336 self.logger.debug("Respond body {}".format(response.text))
5337
5338 return None
5339
5340 # if we didn't found required pre defined template we return None
5341 if vdc_template_ref is None:
5342 return None
5343
5344 try:
5345 # instantiate vdc
5346 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
5347 vm_list_rest_call = "".join(url_list)
5348 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5349 <Source href="{1:s}"></Source>
5350 <Description>opnemano</Description>
5351 </InstantiateVdcTemplateParams>""".format(
5352 vdc_name, vdc_template_ref
5353 )
5354 headers[
5355 "Content-Type"
5356 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
5357 response = self.perform_request(
5358 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
5359 )
5360 vdc_task = self.get_task_from_response(response.text)
5361 self.client.get_task_monitor().wait_for_success(task=vdc_task)
5362
5363 # if we all ok we respond with content otherwise by default None
5364 if response.status_code >= 200 and response.status_code < 300:
5365 return response.text
5366
5367 return None
5368 except Exception:
5369 self.logger.debug(
5370 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5371 )
5372 self.logger.debug("Respond body {}".format(response.text))
5373
5374 return None
5375
5376 def create_vdc_rest(self, vdc_name=None):
5377 """
5378 Method create network in vCloud director
5379
5380 Args:
5381 vdc_name - vdc name to be created
5382 Returns:
5383 The return response
5384 """
5385 self.logger.info("Creating new vdc {}".format(vdc_name))
5386 vca = self.connect_as_admin()
5387
5388 if not vca:
5389 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5390
5391 if vdc_name is None:
5392 return None
5393
5394 url_list = [self.url, "/api/admin/org/", self.org_uuid]
5395 vm_list_rest_call = "".join(url_list)
5396
5397 if vca._session:
5398 headers = {
5399 "Accept": "application/*+xml;version=" + API_VERSION,
5400 "x-vcloud-authorization": self.client._session.headers[
5401 "x-vcloud-authorization"
5402 ],
5403 }
5404 response = self.perform_request(
5405 req_type="GET", url=vm_list_rest_call, headers=headers
5406 )
5407 provider_vdc_ref = None
5408 add_vdc_rest_url = None
5409 # available_networks = None
5410
5411 if response.status_code != requests.codes.ok:
5412 self.logger.debug(
5413 "REST API call {} failed. Return status code {}".format(
5414 vm_list_rest_call, response.status_code
5415 )
5416 )
5417
5418 return None
5419 else:
5420 try:
5421 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5422 for child in vm_list_xmlroot:
5423 # application/vnd.vmware.admin.providervdc+xml
5424 if child.tag.split("}")[1] == "Link":
5425 if (
5426 child.attrib.get("type")
5427 == "application/vnd.vmware.admin.createVdcParams+xml"
5428 and child.attrib.get("rel") == "add"
5429 ):
5430 add_vdc_rest_url = child.attrib.get("href")
5431 except Exception:
5432 self.logger.debug(
5433 "Failed parse respond for rest api call {}".format(
5434 vm_list_rest_call
5435 )
5436 )
5437 self.logger.debug("Respond body {}".format(response.text))
5438
5439 return None
5440
5441 response = self.get_provider_rest(vca=vca)
5442 try:
5443 vm_list_xmlroot = XmlElementTree.fromstring(response)
5444 for child in vm_list_xmlroot:
5445 if child.tag.split("}")[1] == "ProviderVdcReferences":
5446 for sub_child in child:
5447 provider_vdc_ref = sub_child.attrib.get("href")
5448 except Exception:
5449 self.logger.debug(
5450 "Failed parse respond for rest api call {}".format(
5451 vm_list_rest_call
5452 )
5453 )
5454 self.logger.debug("Respond body {}".format(response))
5455
5456 return None
5457
5458 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
5459 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
5460 <AllocationModel>ReservationPool</AllocationModel>
5461 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
5462 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
5463 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
5464 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
5465 <ProviderVdcReference
5466 name="Main Provider"
5467 href="{2:s}" />
5468 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
5469 escape(vdc_name), escape(vdc_name), provider_vdc_ref
5470 )
5471 headers[
5472 "Content-Type"
5473 ] = "application/vnd.vmware.admin.createVdcParams+xml"
5474 response = self.perform_request(
5475 req_type="POST",
5476 url=add_vdc_rest_url,
5477 headers=headers,
5478 data=data,
5479 )
5480
5481 # if we all ok we respond with content otherwise by default None
5482 if response.status_code == 201:
5483 return response.text
5484
5485 return None
5486
5487 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5488 """
5489 Method retrieve vapp detail from vCloud director
5490
5491 Args:
5492 vapp_uuid - is vapp identifier.
5493
5494 Returns:
5495 The return network uuid or return None
5496 """
5497 parsed_respond = {}
5498 vca = None
5499
5500 if need_admin_access:
5501 vca = self.connect_as_admin()
5502 else:
5503 vca = self.client
5504
5505 if not vca:
5506 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5507 if vapp_uuid is None:
5508 return None
5509
5510 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5511 get_vapp_restcall = "".join(url_list)
5512
5513 if vca._session:
5514 headers = {
5515 "Accept": "application/*+xml;version=" + API_VERSION,
5516 "x-vcloud-authorization": vca._session.headers[
5517 "x-vcloud-authorization"
5518 ],
5519 }
5520 response = self.perform_request(
5521 req_type="GET", url=get_vapp_restcall, headers=headers
5522 )
5523
5524 if response.status_code == 403:
5525 if need_admin_access is False:
5526 response = self.retry_rest("GET", get_vapp_restcall)
5527
5528 if response.status_code != requests.codes.ok:
5529 self.logger.debug(
5530 "REST API call {} failed. Return status code {}".format(
5531 get_vapp_restcall, response.status_code
5532 )
5533 )
5534
5535 return parsed_respond
5536
5537 try:
5538 xmlroot_respond = XmlElementTree.fromstring(response.text)
5539 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5540 "ovfDescriptorUploaded"
5541 ]
5542 namespaces = {
5543 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5544 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5545 "vmw": "http://www.vmware.com/schema/ovf",
5546 "vm": "http://www.vmware.com/vcloud/v1.5",
5547 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5548 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5549 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5550 }
5551
5552 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5553 if created_section is not None:
5554 parsed_respond["created"] = created_section.text
5555
5556 network_section = xmlroot_respond.find(
5557 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5558 )
5559 if (
5560 network_section is not None
5561 and "networkName" in network_section.attrib
5562 ):
5563 parsed_respond["networkname"] = network_section.attrib[
5564 "networkName"
5565 ]
5566
5567 ipscopes_section = xmlroot_respond.find(
5568 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5569 namespaces,
5570 )
5571 if ipscopes_section is not None:
5572 for ipscope in ipscopes_section:
5573 for scope in ipscope:
5574 tag_key = scope.tag.split("}")[1]
5575 if tag_key == "IpRanges":
5576 ip_ranges = scope.getchildren()
5577 for ipblock in ip_ranges:
5578 for block in ipblock:
5579 parsed_respond[
5580 block.tag.split("}")[1]
5581 ] = block.text
5582 else:
5583 parsed_respond[tag_key] = scope.text
5584
5585 # parse children section for other attrib
5586 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5587 if children_section is not None:
5588 parsed_respond["name"] = children_section.attrib["name"]
5589 parsed_respond["nestedHypervisorEnabled"] = (
5590 children_section.attrib["nestedHypervisorEnabled"]
5591 if "nestedHypervisorEnabled" in children_section.attrib
5592 else None
5593 )
5594 parsed_respond["deployed"] = children_section.attrib["deployed"]
5595 parsed_respond["status"] = children_section.attrib["status"]
5596 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5597 -1
5598 ]
5599 network_adapter = children_section.find(
5600 "vm:NetworkConnectionSection", namespaces
5601 )
5602 nic_list = []
5603 for adapters in network_adapter:
5604 adapter_key = adapters.tag.split("}")[1]
5605 if adapter_key == "PrimaryNetworkConnectionIndex":
5606 parsed_respond["primarynetwork"] = adapters.text
5607
5608 if adapter_key == "NetworkConnection":
5609 vnic = {}
5610 if "network" in adapters.attrib:
5611 vnic["network"] = adapters.attrib["network"]
5612 for adapter in adapters:
5613 setting_key = adapter.tag.split("}")[1]
5614 vnic[setting_key] = adapter.text
5615 nic_list.append(vnic)
5616
5617 for link in children_section:
5618 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5619 if link.attrib["rel"] == "screen:acquireTicket":
5620 parsed_respond["acquireTicket"] = link.attrib
5621
5622 if link.attrib["rel"] == "screen:acquireMksTicket":
5623 parsed_respond["acquireMksTicket"] = link.attrib
5624
5625 parsed_respond["interfaces"] = nic_list
5626 vCloud_extension_section = children_section.find(
5627 "xmlns:VCloudExtension", namespaces
5628 )
5629 if vCloud_extension_section is not None:
5630 vm_vcenter_info = {}
5631 vim_info = vCloud_extension_section.find(
5632 "vmext:VmVimInfo", namespaces
5633 )
5634 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5635
5636 if vmext is not None:
5637 vm_vcenter_info["vm_moref_id"] = vmext.find(
5638 "vmext:MoRef", namespaces
5639 ).text
5640
5641 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5642
5643 virtual_hardware_section = children_section.find(
5644 "ovf:VirtualHardwareSection", namespaces
5645 )
5646 vm_virtual_hardware_info = {}
5647 if virtual_hardware_section is not None:
5648 for item in virtual_hardware_section.iterfind(
5649 "ovf:Item", namespaces
5650 ):
5651 if (
5652 item.find("rasd:Description", namespaces).text
5653 == "Hard disk"
5654 ):
5655 disk_size = item.find(
5656 "rasd:HostResource", namespaces
5657 ).attrib["{" + namespaces["vm"] + "}capacity"]
5658 vm_virtual_hardware_info["disk_size"] = disk_size
5659 break
5660
5661 for link in virtual_hardware_section:
5662 if (
5663 link.tag.split("}")[1] == "Link"
5664 and "rel" in link.attrib
5665 ):
5666 if link.attrib["rel"] == "edit" and link.attrib[
5667 "href"
5668 ].endswith("/disks"):
5669 vm_virtual_hardware_info[
5670 "disk_edit_href"
5671 ] = link.attrib["href"]
5672 break
5673
5674 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5675 except Exception as exp:
5676 self.logger.info(
5677 "Error occurred calling rest api for getting vApp details {}".format(
5678 exp
5679 )
5680 )
5681
5682 return parsed_respond
5683
5684 def acquire_console(self, vm_uuid=None):
5685 if vm_uuid is None:
5686 return None
5687
5688 if self.client._session:
5689 headers = {
5690 "Accept": "application/*+xml;version=" + API_VERSION,
5691 "x-vcloud-authorization": self.client._session.headers[
5692 "x-vcloud-authorization"
5693 ],
5694 }
5695 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
5696 console_dict = vm_dict["acquireTicket"]
5697 console_rest_call = console_dict["href"]
5698
5699 response = self.perform_request(
5700 req_type="POST", url=console_rest_call, headers=headers
5701 )
5702
5703 if response.status_code == 403:
5704 response = self.retry_rest("POST", console_rest_call)
5705
5706 if response.status_code == requests.codes.ok:
5707 return response.text
5708
5709 return None
5710
5711 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5712 """
5713 Method retrieve vm disk details
5714
5715 Args:
5716 vapp_uuid - is vapp identifier.
5717 flavor_disk - disk size as specified in VNFD (flavor)
5718
5719 Returns:
5720 The return network uuid or return None
5721 """
5722 status = None
5723 try:
5724 # Flavor disk is in GB convert it into MB
5725 flavor_disk = int(flavor_disk) * 1024
5726 vm_details = self.get_vapp_details_rest(vapp_uuid)
5727
5728 if vm_details:
5729 vm_name = vm_details["name"]
5730 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5731
5732 if vm_details and "vm_virtual_hardware" in vm_details:
5733 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5734 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5735 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5736
5737 if flavor_disk > vm_disk:
5738 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5739 self.logger.info(
5740 "Modify disk of VM {} from {} to {} MB".format(
5741 vm_name, vm_disk, flavor_disk
5742 )
5743 )
5744 else:
5745 status = True
5746 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5747
5748 return status
5749 except Exception as exp:
5750 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5751
5752 def modify_vm_disk_rest(self, disk_href, disk_size):
5753 """
5754 Method retrieve modify vm disk size
5755
5756 Args:
5757 disk_href - vCD API URL to GET and PUT disk data
5758 disk_size - disk size as specified in VNFD (flavor)
5759
5760 Returns:
5761 The return network uuid or return None
5762 """
5763 if disk_href is None or disk_size is None:
5764 return None
5765
5766 if self.client._session:
5767 headers = {
5768 "Accept": "application/*+xml;version=" + API_VERSION,
5769 "x-vcloud-authorization": self.client._session.headers[
5770 "x-vcloud-authorization"
5771 ],
5772 }
5773 response = self.perform_request(
5774 req_type="GET", url=disk_href, headers=headers
5775 )
5776
5777 if response.status_code == 403:
5778 response = self.retry_rest("GET", disk_href)
5779
5780 if response.status_code != requests.codes.ok:
5781 self.logger.debug(
5782 "GET REST API call {} failed. Return status code {}".format(
5783 disk_href, response.status_code
5784 )
5785 )
5786
5787 return None
5788
5789 try:
5790 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5791 namespaces = {
5792 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5793 }
5794 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5795
5796 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5797 if item.find("rasd:Description", namespaces).text == "Hard disk":
5798 disk_item = item.find("rasd:HostResource", namespaces)
5799 if disk_item is not None:
5800 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5801 disk_size
5802 )
5803 break
5804
5805 data = lxmlElementTree.tostring(
5806 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5807 )
5808
5809 # Send PUT request to modify disk size
5810 headers[
5811 "Content-Type"
5812 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5813
5814 response = self.perform_request(
5815 req_type="PUT", url=disk_href, headers=headers, data=data
5816 )
5817 if response.status_code == 403:
5818 add_headers = {"Content-Type": headers["Content-Type"]}
5819 response = self.retry_rest("PUT", disk_href, add_headers, data)
5820
5821 if response.status_code != 202:
5822 self.logger.debug(
5823 "PUT REST API call {} failed. Return status code {}".format(
5824 disk_href, response.status_code
5825 )
5826 )
5827 else:
5828 modify_disk_task = self.get_task_from_response(response.text)
5829 result = self.client.get_task_monitor().wait_for_success(
5830 task=modify_disk_task
5831 )
5832 if result.get("status") == "success":
5833 return True
5834 else:
5835 return False
5836
5837 return None
5838 except Exception as exp:
5839 self.logger.info(
5840 "Error occurred calling rest api for modifing disk size {}".format(exp)
5841 )
5842
5843 return None
5844
5845 def add_serial_device(self, vapp_uuid):
5846 """
5847 Method to attach a serial device to a VM
5848
5849 Args:
5850 vapp_uuid - uuid of vApp/VM
5851
5852 Returns:
5853 """
5854 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5855 _, content = self.get_vcenter_content()
5856 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5857
5858 if vm_moref_id:
5859 try:
5860 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5861 self.logger.info(
5862 "VM {} is currently on host {}".format(vm_obj, host_obj)
5863 )
5864 if host_obj and vm_obj:
5865 spec = vim.vm.ConfigSpec()
5866 spec.deviceChange = []
5867 serial_spec = vim.vm.device.VirtualDeviceSpec()
5868 serial_spec.operation = "add"
5869 serial_port = vim.vm.device.VirtualSerialPort()
5870 serial_port.yieldOnPoll = True
5871 backing = serial_port.URIBackingInfo()
5872 backing.serviceURI = "tcp://:65500"
5873 backing.direction = "server"
5874 serial_port.backing = backing
5875 serial_spec.device = serial_port
5876 spec.deviceChange.append(serial_spec)
5877 vm_obj.ReconfigVM_Task(spec=spec)
5878 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5879 except vmodl.MethodFault as error:
5880 self.logger.error("Error occurred while adding PCI devices {} ", error)
5881
5882 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5883 """
5884 Method to attach pci devices to VM
5885
5886 Args:
5887 vapp_uuid - uuid of vApp/VM
5888 pci_devices - pci devices infromation as specified in VNFD (flavor)
5889
5890 Returns:
5891 The status of add pci device task , vm object and
5892 vcenter_conect object
5893 """
5894 vm_obj = None
5895 self.logger.info(
5896 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5897 )
5898 vcenter_conect, content = self.get_vcenter_content()
5899 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5900
5901 if vm_moref_id:
5902 try:
5903 no_of_pci_devices = len(pci_devices)
5904 if no_of_pci_devices > 0:
5905 # Get VM and its host
5906 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5907 self.logger.info(
5908 "VM {} is currently on host {}".format(vm_obj, host_obj)
5909 )
5910
5911 if host_obj and vm_obj:
5912 # get PCI devies from host on which vapp is currently installed
5913 avilable_pci_devices = self.get_pci_devices(
5914 host_obj, no_of_pci_devices
5915 )
5916
5917 if avilable_pci_devices is None:
5918 # find other hosts with active pci devices
5919 (
5920 new_host_obj,
5921 avilable_pci_devices,
5922 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5923
5924 if (
5925 new_host_obj is not None
5926 and avilable_pci_devices is not None
5927 and len(avilable_pci_devices) > 0
5928 ):
5929 # Migrate vm to the host where PCI devices are availble
5930 self.logger.info(
5931 "Relocate VM {} on new host {}".format(
5932 vm_obj, new_host_obj
5933 )
5934 )
5935
5936 task = self.relocate_vm(new_host_obj, vm_obj)
5937 if task is not None:
5938 result = self.wait_for_vcenter_task(
5939 task, vcenter_conect
5940 )
5941 self.logger.info(
5942 "Migrate VM status: {}".format(result)
5943 )
5944 host_obj = new_host_obj
5945 else:
5946 self.logger.info(
5947 "Fail to migrate VM : {}".format(result)
5948 )
5949 raise vimconn.VimConnNotFoundException(
5950 "Fail to migrate VM : {} to host {}".format(
5951 vmname_andid, new_host_obj
5952 )
5953 )
5954
5955 if (
5956 host_obj is not None
5957 and avilable_pci_devices is not None
5958 and len(avilable_pci_devices) > 0
5959 ):
5960 # Add PCI devices one by one
5961 for pci_device in avilable_pci_devices:
5962 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5963 if task:
5964 status = self.wait_for_vcenter_task(
5965 task, vcenter_conect
5966 )
5967
5968 if status:
5969 self.logger.info(
5970 "Added PCI device {} to VM {}".format(
5971 pci_device, str(vm_obj)
5972 )
5973 )
5974 else:
5975 self.logger.error(
5976 "Fail to add PCI device {} to VM {}".format(
5977 pci_device, str(vm_obj)
5978 )
5979 )
5980
5981 return True, vm_obj, vcenter_conect
5982 else:
5983 self.logger.error(
5984 "Currently there is no host with"
5985 " {} number of avaialble PCI devices required for VM {}".format(
5986 no_of_pci_devices, vmname_andid
5987 )
5988 )
5989
5990 raise vimconn.VimConnNotFoundException(
5991 "Currently there is no host with {} "
5992 "number of avaialble PCI devices required for VM {}".format(
5993 no_of_pci_devices, vmname_andid
5994 )
5995 )
5996 else:
5997 self.logger.debug(
5998 "No infromation about PCI devices {} ", pci_devices
5999 )
6000 except vmodl.MethodFault as error:
6001 self.logger.error("Error occurred while adding PCI devices {} ", error)
6002
6003 return None, vm_obj, vcenter_conect
6004
6005 def get_vm_obj(self, content, mob_id):
6006 """
6007 Method to get the vsphere VM object associated with a given morf ID
6008 Args:
6009 vapp_uuid - uuid of vApp/VM
6010 content - vCenter content object
6011 mob_id - mob_id of VM
6012
6013 Returns:
6014 VM and host object
6015 """
6016 vm_obj = None
6017 host_obj = None
6018
6019 try:
6020 container = content.viewManager.CreateContainerView(
6021 content.rootFolder, [vim.VirtualMachine], True
6022 )
6023 for vm in container.view:
6024 mobID = vm._GetMoId()
6025
6026 if mobID == mob_id:
6027 vm_obj = vm
6028 host_obj = vm_obj.runtime.host
6029 break
6030 except Exception as exp:
6031 self.logger.error("Error occurred while finding VM object : {}".format(exp))
6032
6033 return host_obj, vm_obj
6034
6035 def get_pci_devices(self, host, need_devices):
6036 """
6037 Method to get the details of pci devices on given host
6038 Args:
6039 host - vSphere host object
6040 need_devices - number of pci devices needed on host
6041
6042 Returns:
6043 array of pci devices
6044 """
6045 all_devices = []
6046 all_device_ids = []
6047 used_devices_ids = []
6048
6049 try:
6050 if host:
6051 pciPassthruInfo = host.config.pciPassthruInfo
6052 pciDevies = host.hardware.pciDevice
6053
6054 for pci_status in pciPassthruInfo:
6055 if pci_status.passthruActive:
6056 for device in pciDevies:
6057 if device.id == pci_status.id:
6058 all_device_ids.append(device.id)
6059 all_devices.append(device)
6060
6061 # check if devices are in use
6062 avalible_devices = all_devices
6063 for vm in host.vm:
6064 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
6065 vm_devices = vm.config.hardware.device
6066 for device in vm_devices:
6067 if type(device) is vim.vm.device.VirtualPCIPassthrough:
6068 if device.backing.id in all_device_ids:
6069 for use_device in avalible_devices:
6070 if use_device.id == device.backing.id:
6071 avalible_devices.remove(use_device)
6072
6073 used_devices_ids.append(device.backing.id)
6074 self.logger.debug(
6075 "Device {} from devices {}"
6076 "is in use".format(device.backing.id, device)
6077 )
6078 if len(avalible_devices) < need_devices:
6079 self.logger.debug(
6080 "Host {} don't have {} number of active devices".format(
6081 host, need_devices
6082 )
6083 )
6084 self.logger.debug(
6085 "found only {} devices {}".format(
6086 len(avalible_devices), avalible_devices
6087 )
6088 )
6089
6090 return None
6091 else:
6092 required_devices = avalible_devices[:need_devices]
6093 self.logger.info(
6094 "Found {} PCI devices on host {} but required only {}".format(
6095 len(avalible_devices), host, need_devices
6096 )
6097 )
6098 self.logger.info(
6099 "Retruning {} devices as {}".format(need_devices, required_devices)
6100 )
6101
6102 return required_devices
6103 except Exception as exp:
6104 self.logger.error(
6105 "Error {} occurred while finding pci devices on host: {}".format(
6106 exp, host
6107 )
6108 )
6109
6110 return None
6111
6112 def get_host_and_PCIdevices(self, content, need_devices):
6113 """
6114 Method to get the details of pci devices infromation on all hosts
6115
6116 Args:
6117 content - vSphere host object
6118 need_devices - number of pci devices needed on host
6119
6120 Returns:
6121 array of pci devices and host object
6122 """
6123 host_obj = None
6124 pci_device_objs = None
6125
6126 try:
6127 if content:
6128 container = content.viewManager.CreateContainerView(
6129 content.rootFolder, [vim.HostSystem], True
6130 )
6131 for host in container.view:
6132 devices = self.get_pci_devices(host, need_devices)
6133
6134 if devices:
6135 host_obj = host
6136 pci_device_objs = devices
6137 break
6138 except Exception as exp:
6139 self.logger.error(
6140 "Error {} occurred while finding pci devices on host: {}".format(
6141 exp, host_obj
6142 )
6143 )
6144
6145 return host_obj, pci_device_objs
6146
6147 def relocate_vm(self, dest_host, vm):
6148 """
6149 Method to get the relocate VM to new host
6150
6151 Args:
6152 dest_host - vSphere host object
6153 vm - vSphere VM object
6154
6155 Returns:
6156 task object
6157 """
6158 task = None
6159
6160 try:
6161 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
6162 task = vm.Relocate(relocate_spec)
6163 self.logger.info(
6164 "Migrating {} to destination host {}".format(vm, dest_host)
6165 )
6166 except Exception as exp:
6167 self.logger.error(
6168 "Error occurred while relocate VM {} to new host {}: {}".format(
6169 dest_host, vm, exp
6170 )
6171 )
6172
6173 return task
6174
6175 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
6176 """
6177 Waits and provides updates on a vSphere task
6178 """
6179 while task.info.state == vim.TaskInfo.State.running:
6180 time.sleep(2)
6181
6182 if task.info.state == vim.TaskInfo.State.success:
6183 if task.info.result is not None and not hideResult:
6184 self.logger.info(
6185 "{} completed successfully, result: {}".format(
6186 actionName, task.info.result
6187 )
6188 )
6189 else:
6190 self.logger.info("Task {} completed successfully.".format(actionName))
6191 else:
6192 self.logger.error(
6193 "{} did not complete successfully: {} ".format(
6194 actionName, task.info.error
6195 )
6196 )
6197
6198 return task.info.result
6199
6200 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
6201 """
6202 Method to add pci device in given VM
6203
6204 Args:
6205 host_object - vSphere host object
6206 vm_object - vSphere VM object
6207 host_pci_dev - host_pci_dev must be one of the devices from the
6208 host_object.hardware.pciDevice list
6209 which is configured as a PCI passthrough device
6210
6211 Returns:
6212 task object
6213 """
6214 task = None
6215
6216 if vm_object and host_object and host_pci_dev:
6217 try:
6218 # Add PCI device to VM
6219 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
6220 host=None
6221 ).pciPassthrough
6222 systemid_by_pciid = {
6223 item.pciDevice.id: item.systemId for item in pci_passthroughs
6224 }
6225
6226 if host_pci_dev.id not in systemid_by_pciid:
6227 self.logger.error(
6228 "Device {} is not a passthrough device ".format(host_pci_dev)
6229 )
6230 return None
6231
6232 deviceId = hex(host_pci_dev.deviceId % 2 ** 16).lstrip("0x")
6233 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
6234 deviceId=deviceId,
6235 id=host_pci_dev.id,
6236 systemId=systemid_by_pciid[host_pci_dev.id],
6237 vendorId=host_pci_dev.vendorId,
6238 deviceName=host_pci_dev.deviceName,
6239 )
6240
6241 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
6242 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
6243 new_device_config.operation = "add"
6244 vmConfigSpec = vim.vm.ConfigSpec()
6245 vmConfigSpec.deviceChange = [new_device_config]
6246 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
6247 self.logger.info(
6248 "Adding PCI device {} into VM {} from host {} ".format(
6249 host_pci_dev, vm_object, host_object
6250 )
6251 )
6252 except Exception as exp:
6253 self.logger.error(
6254 "Error occurred while adding pci devive {} to VM {}: {}".format(
6255 host_pci_dev, vm_object, exp
6256 )
6257 )
6258
6259 return task
6260
6261 def get_vm_vcenter_info(self):
6262 """
6263 Method to get details of vCenter and vm
6264
6265 Args:
6266 vapp_uuid - uuid of vApp or VM
6267
6268 Returns:
6269 Moref Id of VM and deails of vCenter
6270 """
6271 vm_vcenter_info = {}
6272
6273 if self.vcenter_ip is not None:
6274 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
6275 else:
6276 raise vimconn.VimConnException(
6277 message="vCenter IP is not provided."
6278 " Please provide vCenter IP while attaching datacenter "
6279 "to tenant in --config"
6280 )
6281
6282 if self.vcenter_port is not None:
6283 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
6284 else:
6285 raise vimconn.VimConnException(
6286 message="vCenter port is not provided."
6287 " Please provide vCenter port while attaching datacenter "
6288 "to tenant in --config"
6289 )
6290
6291 if self.vcenter_user is not None:
6292 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
6293 else:
6294 raise vimconn.VimConnException(
6295 message="vCenter user is not provided."
6296 " Please provide vCenter user while attaching datacenter "
6297 "to tenant in --config"
6298 )
6299
6300 if self.vcenter_password is not None:
6301 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
6302 else:
6303 raise vimconn.VimConnException(
6304 message="vCenter user password is not provided."
6305 " Please provide vCenter user password while attaching datacenter "
6306 "to tenant in --config"
6307 )
6308
6309 return vm_vcenter_info
6310
6311 def get_vm_pci_details(self, vmuuid):
6312 """
6313 Method to get VM PCI device details from vCenter
6314
6315 Args:
6316 vm_obj - vSphere VM object
6317
6318 Returns:
6319 dict of PCI devives attached to VM
6320
6321 """
6322 vm_pci_devices_info = {}
6323
6324 try:
6325 _, content = self.get_vcenter_content()
6326 vm_moref_id = self.get_vm_moref_id(vmuuid)
6327 if vm_moref_id:
6328 # Get VM and its host
6329 if content:
6330 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6331 if host_obj and vm_obj:
6332 vm_pci_devices_info["host_name"] = host_obj.name
6333 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
6334 0
6335 ].spec.ip.ipAddress
6336
6337 for device in vm_obj.config.hardware.device:
6338 if type(device) == vim.vm.device.VirtualPCIPassthrough:
6339 device_details = {
6340 "devide_id": device.backing.id,
6341 "pciSlotNumber": device.slotInfo.pciSlotNumber,
6342 }
6343 vm_pci_devices_info[
6344 device.deviceInfo.label
6345 ] = device_details
6346 else:
6347 self.logger.error(
6348 "Can not connect to vCenter while getting "
6349 "PCI devices infromationn"
6350 )
6351
6352 return vm_pci_devices_info
6353 except Exception as exp:
6354 self.logger.error(
6355 "Error occurred while getting VM information" " for VM : {}".format(exp)
6356 )
6357
6358 raise vimconn.VimConnException(message=exp)
6359
6360 def reserve_memory_for_all_vms(self, vapp, memory_mb):
6361 """
6362 Method to reserve memory for all VMs
6363 Args :
6364 vapp - VApp
6365 memory_mb - Memory in MB
6366 Returns:
6367 None
6368 """
6369 self.logger.info("Reserve memory for all VMs")
6370
6371 for vms in vapp.get_all_vms():
6372 vm_id = vms.get("id").split(":")[-1]
6373 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
6374 self.url, vm_id
6375 )
6376 headers = {
6377 "Accept": "application/*+xml;version=" + API_VERSION,
6378 "x-vcloud-authorization": self.client._session.headers[
6379 "x-vcloud-authorization"
6380 ],
6381 }
6382 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
6383 response = self.perform_request(
6384 req_type="GET", url=url_rest_call, headers=headers
6385 )
6386
6387 if response.status_code == 403:
6388 response = self.retry_rest("GET", url_rest_call)
6389
6390 if response.status_code != 200:
6391 self.logger.error(
6392 "REST call {} failed reason : {}"
6393 "status code : {}".format(
6394 url_rest_call, response.text, response.status_code
6395 )
6396 )
6397 raise vimconn.VimConnException(
6398 "reserve_memory_for_all_vms : Failed to get " "memory"
6399 )
6400
6401 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
6402 contentelem = lxmlElementTree.XML(bytexml)
6403 namespaces = {
6404 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
6405 }
6406 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6407
6408 # Find the reservation element in the response
6409 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
6410 for memelem in memelem_list:
6411 memelem.text = str(memory_mb)
6412
6413 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
6414
6415 response = self.perform_request(
6416 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6417 )
6418
6419 if response.status_code == 403:
6420 add_headers = {"Content-Type": headers["Content-Type"]}
6421 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6422
6423 if response.status_code != 202:
6424 self.logger.error(
6425 "REST call {} failed reason : {}"
6426 "status code : {} ".format(
6427 url_rest_call, response.text, response.status_code
6428 )
6429 )
6430 raise vimconn.VimConnException(
6431 "reserve_memory_for_all_vms : Failed to update "
6432 "virtual hardware memory section"
6433 )
6434 else:
6435 mem_task = self.get_task_from_response(response.text)
6436 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
6437
6438 if result.get("status") == "success":
6439 self.logger.info(
6440 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
6441 )
6442 else:
6443 self.logger.error(
6444 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
6445 )
6446
6447 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
6448 """
6449 Configure VApp network config with org vdc network
6450 Args :
6451 vapp - VApp
6452 Returns:
6453 None
6454 """
6455
6456 self.logger.info(
6457 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
6458 )
6459
6460 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
6461 self.url, vapp_id
6462 )
6463
6464 headers = {
6465 "Accept": "application/*+xml;version=" + API_VERSION,
6466 "x-vcloud-authorization": self.client._session.headers[
6467 "x-vcloud-authorization"
6468 ],
6469 }
6470 response = self.perform_request(
6471 req_type="GET", url=url_rest_call, headers=headers
6472 )
6473
6474 if response.status_code == 403:
6475 response = self.retry_rest("GET", url_rest_call)
6476
6477 if response.status_code != 200:
6478 self.logger.error(
6479 "REST call {} failed reason : {}"
6480 "status code : {}".format(
6481 url_rest_call, response.text, response.status_code
6482 )
6483 )
6484 raise vimconn.VimConnException(
6485 "connect_vapp_to_org_vdc_network : Failed to get "
6486 "network config section"
6487 )
6488
6489 data = response.text
6490 headers[
6491 "Content-Type"
6492 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
6493 net_id = self.get_network_id_by_name(net_name)
6494 if not net_id:
6495 raise vimconn.VimConnException(
6496 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
6497 )
6498
6499 bytexml = bytes(bytearray(data, encoding="utf-8"))
6500 newelem = lxmlElementTree.XML(bytexml)
6501 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6502 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6503 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6504
6505 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6506 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6507 if parentnetworklist:
6508 for pn in parentnetworklist:
6509 if "href" not in pn.keys():
6510 id_val = pn.get("id")
6511 href_val = "{}/api/network/{}".format(self.url, id_val)
6512 pn.set("href", href_val)
6513
6514 newstr = """<NetworkConfig networkName="{}">
6515 <Configuration>
6516 <ParentNetwork href="{}/api/network/{}"/>
6517 <FenceMode>bridged</FenceMode>
6518 </Configuration>
6519 </NetworkConfig>
6520 """.format(
6521 net_name, self.url, net_id
6522 )
6523 newcfgelem = lxmlElementTree.fromstring(newstr)
6524 if nwcfglist:
6525 nwcfglist[0].addnext(newcfgelem)
6526
6527 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6528
6529 response = self.perform_request(
6530 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6531 )
6532
6533 if response.status_code == 403:
6534 add_headers = {"Content-Type": headers["Content-Type"]}
6535 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6536
6537 if response.status_code != 202:
6538 self.logger.error(
6539 "REST call {} failed reason : {}"
6540 "status code : {} ".format(
6541 url_rest_call, response.text, response.status_code
6542 )
6543 )
6544 raise vimconn.VimConnException(
6545 "connect_vapp_to_org_vdc_network : Failed to update "
6546 "network config section"
6547 )
6548 else:
6549 vapp_task = self.get_task_from_response(response.text)
6550 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6551 if result.get("status") == "success":
6552 self.logger.info(
6553 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6554 "network {}".format(vapp_id, net_name)
6555 )
6556 else:
6557 self.logger.error(
6558 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6559 "connect to network {}".format(vapp_id, net_name)
6560 )
6561
6562 def remove_primary_network_adapter_from_all_vms(self, vapp):
6563 """
6564 Method to remove network adapter type to vm
6565 Args :
6566 vapp - VApp
6567 Returns:
6568 None
6569 """
6570 self.logger.info("Removing network adapter from all VMs")
6571
6572 for vms in vapp.get_all_vms():
6573 vm_id = vms.get("id").split(":")[-1]
6574
6575 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6576 self.url, vm_id
6577 )
6578
6579 headers = {
6580 "Accept": "application/*+xml;version=" + API_VERSION,
6581 "x-vcloud-authorization": self.client._session.headers[
6582 "x-vcloud-authorization"
6583 ],
6584 }
6585 response = self.perform_request(
6586 req_type="GET", url=url_rest_call, headers=headers
6587 )
6588
6589 if response.status_code == 403:
6590 response = self.retry_rest("GET", url_rest_call)
6591
6592 if response.status_code != 200:
6593 self.logger.error(
6594 "REST call {} failed reason : {}"
6595 "status code : {}".format(
6596 url_rest_call, response.text, response.status_code
6597 )
6598 )
6599 raise vimconn.VimConnException(
6600 "remove_primary_network_adapter : Failed to get "
6601 "network connection section"
6602 )
6603
6604 data = response.text
6605 data = data.split('<Link rel="edit"')[0]
6606
6607 headers[
6608 "Content-Type"
6609 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6610
6611 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6612 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6613 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6614 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6615 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6616 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6617 xmlns:vmw="http://www.vmware.com/schema/ovf"
6618 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6619 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6620 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6621 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6622 ovf:required="false">
6623 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6624 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6625 <Link rel="edit" href="{url}"
6626 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6627 </NetworkConnectionSection>""".format(
6628 url=url_rest_call
6629 )
6630 response = self.perform_request(
6631 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6632 )
6633
6634 if response.status_code == 403:
6635 add_headers = {"Content-Type": headers["Content-Type"]}
6636 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6637
6638 if response.status_code != 202:
6639 self.logger.error(
6640 "REST call {} failed reason : {}"
6641 "status code : {} ".format(
6642 url_rest_call, response.text, response.status_code
6643 )
6644 )
6645 raise vimconn.VimConnException(
6646 "remove_primary_network_adapter : Failed to update "
6647 "network connection section"
6648 )
6649 else:
6650 nic_task = self.get_task_from_response(response.text)
6651 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6652 if result.get("status") == "success":
6653 self.logger.info(
6654 "remove_primary_network_adapter(): VM {} conneced to "
6655 "default NIC type".format(vm_id)
6656 )
6657 else:
6658 self.logger.error(
6659 "remove_primary_network_adapter(): VM {} failed to "
6660 "connect NIC type".format(vm_id)
6661 )
6662
6663 def add_network_adapter_to_vms(
6664 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6665 ):
6666 """
6667 Method to add network adapter type to vm
6668 Args :
6669 network_name - name of network
6670 primary_nic_index - int value for primary nic index
6671 nicIndex - int value for nic index
6672 nic_type - specify model name to which add to vm
6673 Returns:
6674 None
6675 """
6676
6677 self.logger.info(
6678 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6679 network_name, nicIndex, nic_type
6680 )
6681 )
6682 try:
6683 ip_address = None
6684 floating_ip = False
6685 mac_address = None
6686 if "floating_ip" in net:
6687 floating_ip = net["floating_ip"]
6688
6689 # Stub for ip_address feature
6690 if "ip_address" in net:
6691 ip_address = net["ip_address"]
6692
6693 if "mac_address" in net:
6694 mac_address = net["mac_address"]
6695
6696 if floating_ip:
6697 allocation_mode = "POOL"
6698 elif ip_address:
6699 allocation_mode = "MANUAL"
6700 else:
6701 allocation_mode = "DHCP"
6702
6703 if not nic_type:
6704 for vms in vapp.get_all_vms():
6705 vm_id = vms.get("id").split(":")[-1]
6706
6707 url_rest_call = (
6708 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6709 self.url, vm_id
6710 )
6711 )
6712
6713 headers = {
6714 "Accept": "application/*+xml;version=" + API_VERSION,
6715 "x-vcloud-authorization": self.client._session.headers[
6716 "x-vcloud-authorization"
6717 ],
6718 }
6719 response = self.perform_request(
6720 req_type="GET", url=url_rest_call, headers=headers
6721 )
6722
6723 if response.status_code == 403:
6724 response = self.retry_rest("GET", url_rest_call)
6725
6726 if response.status_code != 200:
6727 self.logger.error(
6728 "REST call {} failed reason : {}"
6729 "status code : {}".format(
6730 url_rest_call, response.text, response.status_code
6731 )
6732 )
6733 raise vimconn.VimConnException(
6734 "add_network_adapter_to_vms : Failed to get "
6735 "network connection section"
6736 )
6737
6738 data = response.text
6739 data = data.split('<Link rel="edit"')[0]
6740 if "<PrimaryNetworkConnectionIndex>" not in data:
6741 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6742 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6743 <NetworkConnection network="{}">
6744 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6745 <IsConnected>true</IsConnected>
6746 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6747 </NetworkConnection>""".format(
6748 primary_nic_index, network_name, nicIndex, allocation_mode
6749 )
6750
6751 # Stub for ip_address feature
6752 if ip_address:
6753 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6754 item = item.replace(
6755 "</NetworkConnectionIndex>\n",
6756 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6757 )
6758
6759 if mac_address:
6760 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6761 item = item.replace(
6762 "</IsConnected>\n",
6763 "</IsConnected>\n{}\n".format(mac_tag),
6764 )
6765
6766 data = data.replace(
6767 "</ovf:Info>\n",
6768 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6769 )
6770 else:
6771 self.logger.debug("add_network_adapter PrimaryNIC in data")
6772 new_item = """<NetworkConnection network="{}">
6773 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6774 <IsConnected>true</IsConnected>
6775 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6776 </NetworkConnection>""".format(
6777 network_name, nicIndex, allocation_mode
6778 )
6779
6780 # Stub for ip_address feature
6781 if ip_address:
6782 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6783 new_item = new_item.replace(
6784 "</NetworkConnectionIndex>\n",
6785 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6786 )
6787
6788 if mac_address:
6789 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6790 new_item = new_item.replace(
6791 "</IsConnected>\n",
6792 "</IsConnected>\n{}\n".format(mac_tag),
6793 )
6794
6795 data = data + new_item + "</NetworkConnectionSection>"
6796
6797 headers[
6798 "Content-Type"
6799 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6800
6801 response = self.perform_request(
6802 req_type="PUT", url=url_rest_call, headers=headers, data=data
6803 )
6804
6805 if response.status_code == 403:
6806 add_headers = {"Content-Type": headers["Content-Type"]}
6807 response = self.retry_rest(
6808 "PUT", url_rest_call, add_headers, data
6809 )
6810
6811 if response.status_code != 202:
6812 self.logger.error(
6813 "REST call {} failed reason : {}"
6814 "status code : {} ".format(
6815 url_rest_call, response.text, response.status_code
6816 )
6817 )
6818 raise vimconn.VimConnException(
6819 "add_network_adapter_to_vms : Failed to update "
6820 "network connection section"
6821 )
6822 else:
6823 nic_task = self.get_task_from_response(response.text)
6824 result = self.client.get_task_monitor().wait_for_success(
6825 task=nic_task
6826 )
6827
6828 if result.get("status") == "success":
6829 self.logger.info(
6830 "add_network_adapter_to_vms(): VM {} conneced to "
6831 "default NIC type".format(vm_id)
6832 )
6833 else:
6834 self.logger.error(
6835 "add_network_adapter_to_vms(): VM {} failed to "
6836 "connect NIC type".format(vm_id)
6837 )
6838 else:
6839 for vms in vapp.get_all_vms():
6840 vm_id = vms.get("id").split(":")[-1]
6841
6842 url_rest_call = (
6843 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6844 self.url, vm_id
6845 )
6846 )
6847
6848 headers = {
6849 "Accept": "application/*+xml;version=" + API_VERSION,
6850 "x-vcloud-authorization": self.client._session.headers[
6851 "x-vcloud-authorization"
6852 ],
6853 }
6854 response = self.perform_request(
6855 req_type="GET", url=url_rest_call, headers=headers
6856 )
6857
6858 if response.status_code == 403:
6859 response = self.retry_rest("GET", url_rest_call)
6860
6861 if response.status_code != 200:
6862 self.logger.error(
6863 "REST call {} failed reason : {}"
6864 "status code : {}".format(
6865 url_rest_call, response.text, response.status_code
6866 )
6867 )
6868 raise vimconn.VimConnException(
6869 "add_network_adapter_to_vms : Failed to get "
6870 "network connection section"
6871 )
6872 data = response.text
6873 data = data.split('<Link rel="edit"')[0]
6874 vcd_netadapter_type = nic_type
6875
6876 if nic_type in ["SR-IOV", "VF"]:
6877 vcd_netadapter_type = "SRIOVETHERNETCARD"
6878
6879 if "<PrimaryNetworkConnectionIndex>" not in data:
6880 self.logger.debug(
6881 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6882 nic_type
6883 )
6884 )
6885 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6886 <NetworkConnection network="{}">
6887 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6888 <IsConnected>true</IsConnected>
6889 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6890 <NetworkAdapterType>{}</NetworkAdapterType>
6891 </NetworkConnection>""".format(
6892 primary_nic_index,
6893 network_name,
6894 nicIndex,
6895 allocation_mode,
6896 vcd_netadapter_type,
6897 )
6898
6899 # Stub for ip_address feature
6900 if ip_address:
6901 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6902 item = item.replace(
6903 "</NetworkConnectionIndex>\n",
6904 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6905 )
6906
6907 if mac_address:
6908 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6909 item = item.replace(
6910 "</IsConnected>\n",
6911 "</IsConnected>\n{}\n".format(mac_tag),
6912 )
6913
6914 data = data.replace(
6915 "</ovf:Info>\n",
6916 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6917 )
6918 else:
6919 self.logger.debug(
6920 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6921 nic_type
6922 )
6923 )
6924 new_item = """<NetworkConnection network="{}">
6925 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6926 <IsConnected>true</IsConnected>
6927 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6928 <NetworkAdapterType>{}</NetworkAdapterType>
6929 </NetworkConnection>""".format(
6930 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6931 )
6932
6933 # Stub for ip_address feature
6934 if ip_address:
6935 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6936 new_item = new_item.replace(
6937 "</NetworkConnectionIndex>\n",
6938 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6939 )
6940
6941 if mac_address:
6942 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6943 new_item = new_item.replace(
6944 "</IsConnected>\n",
6945 "</IsConnected>\n{}\n".format(mac_tag),
6946 )
6947
6948 data = data + new_item + "</NetworkConnectionSection>"
6949
6950 headers[
6951 "Content-Type"
6952 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6953
6954 response = self.perform_request(
6955 req_type="PUT", url=url_rest_call, headers=headers, data=data
6956 )
6957
6958 if response.status_code == 403:
6959 add_headers = {"Content-Type": headers["Content-Type"]}
6960 response = self.retry_rest(
6961 "PUT", url_rest_call, add_headers, data
6962 )
6963
6964 if response.status_code != 202:
6965 self.logger.error(
6966 "REST call {} failed reason : {}"
6967 "status code : {}".format(
6968 url_rest_call, response.text, response.status_code
6969 )
6970 )
6971 raise vimconn.VimConnException(
6972 "add_network_adapter_to_vms : Failed to update "
6973 "network connection section"
6974 )
6975 else:
6976 nic_task = self.get_task_from_response(response.text)
6977 result = self.client.get_task_monitor().wait_for_success(
6978 task=nic_task
6979 )
6980
6981 if result.get("status") == "success":
6982 self.logger.info(
6983 "add_network_adapter_to_vms(): VM {} "
6984 "conneced to NIC type {}".format(vm_id, nic_type)
6985 )
6986 else:
6987 self.logger.error(
6988 "add_network_adapter_to_vms(): VM {} "
6989 "failed to connect NIC type {}".format(vm_id, nic_type)
6990 )
6991 except Exception as exp:
6992 self.logger.error(
6993 "add_network_adapter_to_vms() : exception occurred "
6994 "while adding Network adapter"
6995 )
6996
6997 raise vimconn.VimConnException(message=exp)
6998
6999 def set_numa_affinity(self, vmuuid, paired_threads_id):
7000 """
7001 Method to assign numa affinity in vm configuration parammeters
7002 Args :
7003 vmuuid - vm uuid
7004 paired_threads_id - one or more virtual processor
7005 numbers
7006 Returns:
7007 return if True
7008 """
7009 try:
7010 vcenter_conect, content = self.get_vcenter_content()
7011 vm_moref_id = self.get_vm_moref_id(vmuuid)
7012 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7013
7014 if vm_obj:
7015 config_spec = vim.vm.ConfigSpec()
7016 config_spec.extraConfig = []
7017 opt = vim.option.OptionValue()
7018 opt.key = "numa.nodeAffinity"
7019 opt.value = str(paired_threads_id)
7020 config_spec.extraConfig.append(opt)
7021 task = vm_obj.ReconfigVM_Task(config_spec)
7022
7023 if task:
7024 self.wait_for_vcenter_task(task, vcenter_conect)
7025 extra_config = vm_obj.config.extraConfig
7026 flag = False
7027
7028 for opts in extra_config:
7029 if "numa.nodeAffinity" in opts.key:
7030 flag = True
7031 self.logger.info(
7032 "set_numa_affinity: Sucessfully assign numa affinity "
7033 "value {} for vm {}".format(opt.value, vm_obj)
7034 )
7035
7036 if flag:
7037 return
7038 else:
7039 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
7040 except Exception as exp:
7041 self.logger.error(
7042 "set_numa_affinity : exception occurred while setting numa affinity "
7043 "for VM {} : {}".format(vm_obj, vm_moref_id)
7044 )
7045
7046 raise vimconn.VimConnException(
7047 "set_numa_affinity : Error {} failed to assign numa "
7048 "affinity".format(exp)
7049 )
7050
7051 def cloud_init(self, vapp, cloud_config):
7052 """
7053 Method to inject ssh-key
7054 vapp - vapp object
7055 cloud_config a dictionary with:
7056 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
7057 'users': (optional) list of users to be inserted, each item is a dict with:
7058 'name': (mandatory) user name,
7059 'key-pairs': (optional) list of strings with the public key to be inserted to the user
7060 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
7061 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
7062 'config-files': (optional). List of files to be transferred. Each item is a dict with:
7063 'dest': (mandatory) string with the destination absolute path
7064 'encoding': (optional, by default text). Can be one of:
7065 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
7066 'content' (mandatory): string with the content of the file
7067 'permissions': (optional) string with file permissions, typically octal notation '0644'
7068 'owner': (optional) file owner, string with the format 'owner:group'
7069 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
7070 """
7071 try:
7072 if not isinstance(cloud_config, dict):
7073 raise Exception(
7074 "cloud_init : parameter cloud_config is not a dictionary"
7075 )
7076 else:
7077 key_pairs = []
7078 userdata = []
7079
7080 if "key-pairs" in cloud_config:
7081 key_pairs = cloud_config["key-pairs"]
7082
7083 if "users" in cloud_config:
7084 userdata = cloud_config["users"]
7085
7086 self.logger.debug("cloud_init : Guest os customization started..")
7087 customize_script = self.format_script(
7088 key_pairs=key_pairs, users_list=userdata
7089 )
7090 customize_script = customize_script.replace("&", "&amp;")
7091 self.guest_customization(vapp, customize_script)
7092 except Exception as exp:
7093 self.logger.error(
7094 "cloud_init : exception occurred while injecting " "ssh-key"
7095 )
7096
7097 raise vimconn.VimConnException(
7098 "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
7099 )
7100
7101 def format_script(self, key_pairs=[], users_list=[]):
7102 bash_script = """#!/bin/sh
7103 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
7104 if [ "$1" = "precustomization" ];then
7105 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
7106 """
7107
7108 keys = "\n".join(key_pairs)
7109 if keys:
7110 keys_data = """
7111 if [ ! -d /root/.ssh ];then
7112 mkdir /root/.ssh
7113 chown root:root /root/.ssh
7114 chmod 700 /root/.ssh
7115 touch /root/.ssh/authorized_keys
7116 chown root:root /root/.ssh/authorized_keys
7117 chmod 600 /root/.ssh/authorized_keys
7118 # make centos with selinux happy
7119 which restorecon && restorecon -Rv /root/.ssh
7120 else
7121 touch /root/.ssh/authorized_keys
7122 chown root:root /root/.ssh/authorized_keys
7123 chmod 600 /root/.ssh/authorized_keys
7124 fi
7125 echo '{key}' >> /root/.ssh/authorized_keys
7126 """.format(
7127 key=keys
7128 )
7129
7130 bash_script += keys_data
7131
7132 for user in users_list:
7133 if "name" in user:
7134 user_name = user["name"]
7135
7136 if "key-pairs" in user:
7137 user_keys = "\n".join(user["key-pairs"])
7138 else:
7139 user_keys = None
7140
7141 add_user_name = """
7142 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
7143 """.format(
7144 user_name=user_name
7145 )
7146
7147 bash_script += add_user_name
7148
7149 if user_keys:
7150 user_keys_data = """
7151 mkdir /home/{user_name}/.ssh
7152 chown {user_name}:{user_name} /home/{user_name}/.ssh
7153 chmod 700 /home/{user_name}/.ssh
7154 touch /home/{user_name}/.ssh/authorized_keys
7155 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
7156 chmod 600 /home/{user_name}/.ssh/authorized_keys
7157 # make centos with selinux happy
7158 which restorecon && restorecon -Rv /home/{user_name}/.ssh
7159 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
7160 """.format(
7161 user_name=user_name, user_key=user_keys
7162 )
7163 bash_script += user_keys_data
7164
7165 return bash_script + "\n\tfi"
7166
7167 def guest_customization(self, vapp, customize_script):
7168 """
7169 Method to customize guest os
7170 vapp - Vapp object
7171 customize_script - Customize script to be run at first boot of VM.
7172 """
7173 for vm in vapp.get_all_vms():
7174 vm_id = vm.get("id").split(":")[-1]
7175 vm_name = vm.get("name")
7176 vm_name = vm_name.replace("_", "-")
7177
7178 vm_customization_url = (
7179 "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
7180 )
7181 headers = {
7182 "Accept": "application/*+xml;version=" + API_VERSION,
7183 "x-vcloud-authorization": self.client._session.headers[
7184 "x-vcloud-authorization"
7185 ],
7186 }
7187
7188 headers[
7189 "Content-Type"
7190 ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
7191
7192 data = """<GuestCustomizationSection
7193 xmlns="http://www.vmware.com/vcloud/v1.5"
7194 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
7195 ovf:required="false" href="{}"
7196 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
7197 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
7198 <Enabled>true</Enabled>
7199 <ChangeSid>false</ChangeSid>
7200 <VirtualMachineId>{}</VirtualMachineId>
7201 <JoinDomainEnabled>false</JoinDomainEnabled>
7202 <UseOrgSettings>false</UseOrgSettings>
7203 <AdminPasswordEnabled>false</AdminPasswordEnabled>
7204 <AdminPasswordAuto>true</AdminPasswordAuto>
7205 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
7206 <AdminAutoLogonCount>0</AdminAutoLogonCount>
7207 <ResetPasswordRequired>false</ResetPasswordRequired>
7208 <CustomizationScript>{}</CustomizationScript>
7209 <ComputerName>{}</ComputerName>
7210 <Link href="{}"
7211 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
7212 </GuestCustomizationSection>
7213 """.format(
7214 vm_customization_url,
7215 vm_id,
7216 customize_script,
7217 vm_name,
7218 vm_customization_url,
7219 )
7220
7221 response = self.perform_request(
7222 req_type="PUT", url=vm_customization_url, headers=headers, data=data
7223 )
7224 if response.status_code == 202:
7225 guest_task = self.get_task_from_response(response.text)
7226 self.client.get_task_monitor().wait_for_success(task=guest_task)
7227 self.logger.info(
7228 "guest_customization : customized guest os task "
7229 "completed for VM {}".format(vm_name)
7230 )
7231 else:
7232 self.logger.error(
7233 "guest_customization : task for customized guest os"
7234 "failed for VM {}".format(vm_name)
7235 )
7236
7237 raise vimconn.VimConnException(
7238 "guest_customization : failed to perform"
7239 "guest os customization on VM {}".format(vm_name)
7240 )
7241
7242 def add_new_disk(self, vapp_uuid, disk_size):
7243 """
7244 Method to create an empty vm disk
7245
7246 Args:
7247 vapp_uuid - is vapp identifier.
7248 disk_size - size of disk to be created in GB
7249
7250 Returns:
7251 None
7252 """
7253 status = False
7254 vm_details = None
7255 try:
7256 # Disk size in GB, convert it into MB
7257 if disk_size is not None:
7258 disk_size_mb = int(disk_size) * 1024
7259 vm_details = self.get_vapp_details_rest(vapp_uuid)
7260
7261 if vm_details and "vm_virtual_hardware" in vm_details:
7262 self.logger.info(
7263 "Adding disk to VM: {} disk size:{}GB".format(
7264 vm_details["name"], disk_size
7265 )
7266 )
7267 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
7268 status = self.add_new_disk_rest(disk_href, disk_size_mb)
7269 except Exception as exp:
7270 msg = "Error occurred while creating new disk {}.".format(exp)
7271 self.rollback_newvm(vapp_uuid, msg)
7272
7273 if status:
7274 self.logger.info(
7275 "Added new disk to VM: {} disk size:{}GB".format(
7276 vm_details["name"], disk_size
7277 )
7278 )
7279 else:
7280 # If failed to add disk, delete VM
7281 msg = "add_new_disk: Failed to add new disk to {}".format(
7282 vm_details["name"]
7283 )
7284 self.rollback_newvm(vapp_uuid, msg)
7285
7286 def add_new_disk_rest(self, disk_href, disk_size_mb):
7287 """
7288 Retrives vApp Disks section & add new empty disk
7289
7290 Args:
7291 disk_href: Disk section href to addd disk
7292 disk_size_mb: Disk size in MB
7293
7294 Returns: Status of add new disk task
7295 """
7296 status = False
7297 if self.client._session:
7298 headers = {
7299 "Accept": "application/*+xml;version=" + API_VERSION,
7300 "x-vcloud-authorization": self.client._session.headers[
7301 "x-vcloud-authorization"
7302 ],
7303 }
7304 response = self.perform_request(
7305 req_type="GET", url=disk_href, headers=headers
7306 )
7307
7308 if response.status_code == 403:
7309 response = self.retry_rest("GET", disk_href)
7310
7311 if response.status_code != requests.codes.ok:
7312 self.logger.error(
7313 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
7314 disk_href, response.status_code
7315 )
7316 )
7317
7318 return status
7319
7320 try:
7321 # Find but type & max of instance IDs assigned to disks
7322 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
7323 namespaces = {
7324 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
7325 }
7326 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
7327 instance_id = 0
7328
7329 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
7330 if item.find("rasd:Description", namespaces).text == "Hard disk":
7331 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
7332
7333 if inst_id > instance_id:
7334 instance_id = inst_id
7335 disk_item = item.find("rasd:HostResource", namespaces)
7336 bus_subtype = disk_item.attrib[
7337 "{" + namespaces["xmlns"] + "}busSubType"
7338 ]
7339 bus_type = disk_item.attrib[
7340 "{" + namespaces["xmlns"] + "}busType"
7341 ]
7342
7343 instance_id = instance_id + 1
7344 new_item = """<Item>
7345 <rasd:Description>Hard disk</rasd:Description>
7346 <rasd:ElementName>New disk</rasd:ElementName>
7347 <rasd:HostResource
7348 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
7349 vcloud:capacity="{}"
7350 vcloud:busSubType="{}"
7351 vcloud:busType="{}"></rasd:HostResource>
7352 <rasd:InstanceID>{}</rasd:InstanceID>
7353 <rasd:ResourceType>17</rasd:ResourceType>
7354 </Item>""".format(
7355 disk_size_mb, bus_subtype, bus_type, instance_id
7356 )
7357
7358 new_data = response.text
7359 # Add new item at the bottom
7360 new_data = new_data.replace(
7361 "</Item>\n</RasdItemsList>",
7362 "</Item>\n{}\n</RasdItemsList>".format(new_item),
7363 )
7364
7365 # Send PUT request to modify virtual hardware section with new disk
7366 headers[
7367 "Content-Type"
7368 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
7369
7370 response = self.perform_request(
7371 req_type="PUT", url=disk_href, data=new_data, headers=headers
7372 )
7373
7374 if response.status_code == 403:
7375 add_headers = {"Content-Type": headers["Content-Type"]}
7376 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
7377
7378 if response.status_code != 202:
7379 self.logger.error(
7380 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
7381 disk_href, response.status_code, response.text
7382 )
7383 )
7384 else:
7385 add_disk_task = self.get_task_from_response(response.text)
7386 result = self.client.get_task_monitor().wait_for_success(
7387 task=add_disk_task
7388 )
7389
7390 if result.get("status") == "success":
7391 status = True
7392 else:
7393 self.logger.error(
7394 "Add new disk REST task failed to add {} MB disk".format(
7395 disk_size_mb
7396 )
7397 )
7398 except Exception as exp:
7399 self.logger.error(
7400 "Error occurred calling rest api for creating new disk {}".format(exp)
7401 )
7402
7403 return status
7404
7405 def add_existing_disk(
7406 self,
7407 catalogs=None,
7408 image_id=None,
7409 size=None,
7410 template_name=None,
7411 vapp_uuid=None,
7412 ):
7413 """
7414 Method to add existing disk to vm
7415 Args :
7416 catalogs - List of VDC catalogs
7417 image_id - Catalog ID
7418 template_name - Name of template in catalog
7419 vapp_uuid - UUID of vApp
7420 Returns:
7421 None
7422 """
7423 disk_info = None
7424 vcenter_conect, content = self.get_vcenter_content()
7425 # find moref-id of vm in image
7426 catalog_vm_info = self.get_vapp_template_details(
7427 catalogs=catalogs,
7428 image_id=image_id,
7429 )
7430
7431 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
7432 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
7433 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
7434 "vm_moref_id", None
7435 )
7436
7437 if catalog_vm_moref_id:
7438 self.logger.info(
7439 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
7440 )
7441 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
7442
7443 if catalog_vm_obj:
7444 # find existing disk
7445 disk_info = self.find_disk(catalog_vm_obj)
7446 else:
7447 exp_msg = "No VM with image id {} found".format(image_id)
7448 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7449 else:
7450 exp_msg = "No Image found with image ID {} ".format(image_id)
7451 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7452
7453 if disk_info:
7454 self.logger.info("Existing disk_info : {}".format(disk_info))
7455 # get VM
7456 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7457 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7458
7459 if vm_obj:
7460 status = self.add_disk(
7461 vcenter_conect=vcenter_conect,
7462 vm=vm_obj,
7463 disk_info=disk_info,
7464 size=size,
7465 vapp_uuid=vapp_uuid,
7466 )
7467
7468 if status:
7469 self.logger.info(
7470 "Disk from image id {} added to {}".format(
7471 image_id, vm_obj.config.name
7472 )
7473 )
7474 else:
7475 msg = "No disk found with image id {} to add in VM {}".format(
7476 image_id, vm_obj.config.name
7477 )
7478 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
7479
7480 def find_disk(self, vm_obj):
7481 """
7482 Method to find details of existing disk in VM
7483 Args:
7484 vm_obj - vCenter object of VM
7485 Returns:
7486 disk_info : dict of disk details
7487 """
7488 disk_info = {}
7489 if vm_obj:
7490 try:
7491 devices = vm_obj.config.hardware.device
7492
7493 for device in devices:
7494 if type(device) is vim.vm.device.VirtualDisk:
7495 if (
7496 isinstance(
7497 device.backing,
7498 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
7499 )
7500 and hasattr(device.backing, "fileName")
7501 ):
7502 disk_info["full_path"] = device.backing.fileName
7503 disk_info["datastore"] = device.backing.datastore
7504 disk_info["capacityKB"] = device.capacityInKB
7505 break
7506 except Exception as exp:
7507 self.logger.error(
7508 "find_disk() : exception occurred while "
7509 "getting existing disk details :{}".format(exp)
7510 )
7511
7512 return disk_info
7513
7514 def add_disk(
7515 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
7516 ):
7517 """
7518 Method to add existing disk in VM
7519 Args :
7520 vcenter_conect - vCenter content object
7521 vm - vCenter vm object
7522 disk_info : dict of disk details
7523 Returns:
7524 status : status of add disk task
7525 """
7526 datastore = disk_info["datastore"] if "datastore" in disk_info else None
7527 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
7528 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
7529 if size is not None:
7530 # Convert size from GB to KB
7531 sizeKB = int(size) * 1024 * 1024
7532 # compare size of existing disk and user given size.Assign whicherver is greater
7533 self.logger.info(
7534 "Add Existing disk : sizeKB {} , capacityKB {}".format(
7535 sizeKB, capacityKB
7536 )
7537 )
7538
7539 if sizeKB > capacityKB:
7540 capacityKB = sizeKB
7541
7542 if datastore and fullpath and capacityKB:
7543 try:
7544 spec = vim.vm.ConfigSpec()
7545 # get all disks on a VM, set unit_number to the next available
7546 unit_number = 0
7547 for dev in vm.config.hardware.device:
7548 if hasattr(dev.backing, "fileName"):
7549 unit_number = int(dev.unitNumber) + 1
7550 # unit_number 7 reserved for scsi controller
7551
7552 if unit_number == 7:
7553 unit_number += 1
7554
7555 if isinstance(dev, vim.vm.device.VirtualDisk):
7556 # vim.vm.device.VirtualSCSIController
7557 controller_key = dev.controllerKey
7558
7559 self.logger.info(
7560 "Add Existing disk : unit number {} , controller key {}".format(
7561 unit_number, controller_key
7562 )
7563 )
7564 # add disk here
7565 dev_changes = []
7566 disk_spec = vim.vm.device.VirtualDeviceSpec()
7567 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7568 disk_spec.device = vim.vm.device.VirtualDisk()
7569 disk_spec.device.backing = (
7570 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
7571 )
7572 disk_spec.device.backing.thinProvisioned = True
7573 disk_spec.device.backing.diskMode = "persistent"
7574 disk_spec.device.backing.datastore = datastore
7575 disk_spec.device.backing.fileName = fullpath
7576
7577 disk_spec.device.unitNumber = unit_number
7578 disk_spec.device.capacityInKB = capacityKB
7579 disk_spec.device.controllerKey = controller_key
7580 dev_changes.append(disk_spec)
7581 spec.deviceChange = dev_changes
7582 task = vm.ReconfigVM_Task(spec=spec)
7583 status = self.wait_for_vcenter_task(task, vcenter_conect)
7584
7585 return status
7586 except Exception as exp:
7587 exp_msg = (
7588 "add_disk() : exception {} occurred while adding disk "
7589 "{} to vm {}".format(exp, fullpath, vm.config.name)
7590 )
7591 self.rollback_newvm(vapp_uuid, exp_msg)
7592 else:
7593 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
7594 disk_info
7595 )
7596 self.rollback_newvm(vapp_uuid, msg)
7597
7598 def get_vcenter_content(self):
7599 """
7600 Get the vsphere content object
7601 """
7602 try:
7603 vm_vcenter_info = self.get_vm_vcenter_info()
7604 except Exception as exp:
7605 self.logger.error(
7606 "Error occurred while getting vCenter infromationn"
7607 " for VM : {}".format(exp)
7608 )
7609
7610 raise vimconn.VimConnException(message=exp)
7611
7612 context = None
7613 if hasattr(ssl, "_create_unverified_context"):
7614 context = ssl._create_unverified_context()
7615
7616 vcenter_conect = SmartConnect(
7617 host=vm_vcenter_info["vm_vcenter_ip"],
7618 user=vm_vcenter_info["vm_vcenter_user"],
7619 pwd=vm_vcenter_info["vm_vcenter_password"],
7620 port=int(vm_vcenter_info["vm_vcenter_port"]),
7621 sslContext=context,
7622 )
7623 atexit.register(Disconnect, vcenter_conect)
7624 content = vcenter_conect.RetrieveContent()
7625
7626 return vcenter_conect, content
7627
7628 def get_vm_moref_id(self, vapp_uuid):
7629 """
7630 Get the moref_id of given VM
7631 """
7632 try:
7633 if vapp_uuid:
7634 vm_details = self.get_vapp_details_rest(
7635 vapp_uuid, need_admin_access=True
7636 )
7637
7638 if vm_details and "vm_vcenter_info" in vm_details:
7639 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
7640
7641 return vm_moref_id
7642 except Exception as exp:
7643 self.logger.error(
7644 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
7645 )
7646
7647 return None
7648
7649 def get_vapp_template_details(
7650 self, catalogs=None, image_id=None, template_name=None
7651 ):
7652 """
7653 Method to get vApp template details
7654 Args :
7655 catalogs - list of VDC catalogs
7656 image_id - Catalog ID to find
7657 template_name : template name in catalog
7658 Returns:
7659 parsed_respond : dict of vApp tempalte details
7660 """
7661 parsed_response = {}
7662
7663 vca = self.connect_as_admin()
7664 if not vca:
7665 raise vimconn.VimConnConnectionException("Failed to connect vCD")
7666
7667 try:
7668 org, _ = self.get_vdc_details()
7669 catalog = self.get_catalog_obj(image_id, catalogs)
7670 if catalog:
7671 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
7672 catalog_items = [items.attrib]
7673
7674 if len(catalog_items) == 1:
7675 headers = {
7676 "Accept": "application/*+xml;version=" + API_VERSION,
7677 "x-vcloud-authorization": vca._session.headers[
7678 "x-vcloud-authorization"
7679 ],
7680 }
7681 response = self.perform_request(
7682 req_type="GET",
7683 url=catalog_items[0].get("href"),
7684 headers=headers,
7685 )
7686 catalogItem = XmlElementTree.fromstring(response.text)
7687 entity = [
7688 child
7689 for child in catalogItem
7690 if child.get("type")
7691 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
7692 ][0]
7693 vapp_tempalte_href = entity.get("href")
7694 # get vapp details and parse moref id
7695
7696 namespaces = {
7697 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7698 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7699 "vmw": "http://www.vmware.com/schema/ovf",
7700 "vm": "http://www.vmware.com/vcloud/v1.5",
7701 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7702 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7703 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7704 }
7705
7706 if vca._session:
7707 response = self.perform_request(
7708 req_type="GET", url=vapp_tempalte_href, headers=headers
7709 )
7710
7711 if response.status_code != requests.codes.ok:
7712 self.logger.debug(
7713 "REST API call {} failed. Return status code {}".format(
7714 vapp_tempalte_href, response.status_code
7715 )
7716 )
7717 else:
7718 xmlroot_respond = XmlElementTree.fromstring(response.text)
7719 children_section = xmlroot_respond.find(
7720 "vm:Children/", namespaces
7721 )
7722
7723 if children_section is not None:
7724 vCloud_extension_section = children_section.find(
7725 "xmlns:VCloudExtension", namespaces
7726 )
7727
7728 if vCloud_extension_section is not None:
7729 vm_vcenter_info = {}
7730 vim_info = vCloud_extension_section.find(
7731 "vmext:VmVimInfo", namespaces
7732 )
7733 vmext = vim_info.find(
7734 "vmext:VmVimObjectRef", namespaces
7735 )
7736
7737 if vmext is not None:
7738 vm_vcenter_info["vm_moref_id"] = vmext.find(
7739 "vmext:MoRef", namespaces
7740 ).text
7741
7742 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7743 except Exception as exp:
7744 self.logger.info(
7745 "Error occurred calling rest api for getting vApp details {}".format(
7746 exp
7747 )
7748 )
7749
7750 return parsed_response
7751
7752 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7753 """
7754 Method to delete vApp
7755 Args :
7756 vapp_uuid - vApp UUID
7757 msg - Error message to be logged
7758 exp_type : Exception type
7759 Returns:
7760 None
7761 """
7762 if vapp_uuid:
7763 self.delete_vminstance(vapp_uuid)
7764 else:
7765 msg = "No vApp ID"
7766
7767 self.logger.error(msg)
7768
7769 if exp_type == "Genric":
7770 raise vimconn.VimConnException(msg)
7771 elif exp_type == "NotFound":
7772 raise vimconn.VimConnNotFoundException(message=msg)
7773
7774 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
7775 """
7776 Method to attach SRIOV adapters to VM
7777
7778 Args:
7779 vapp_uuid - uuid of vApp/VM
7780 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
7781 vmname_andid - vmname
7782
7783 Returns:
7784 The status of add SRIOV adapter task , vm object and
7785 vcenter_conect object
7786 """
7787 vm_obj = None
7788 vcenter_conect, content = self.get_vcenter_content()
7789 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7790
7791 if vm_moref_id:
7792 try:
7793 no_of_sriov_devices = len(sriov_nets)
7794 if no_of_sriov_devices > 0:
7795 # Get VM and its host
7796 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
7797 self.logger.info(
7798 "VM {} is currently on host {}".format(vm_obj, host_obj)
7799 )
7800
7801 if host_obj and vm_obj:
7802 # get SRIOV devies from host on which vapp is currently installed
7803 avilable_sriov_devices = self.get_sriov_devices(
7804 host_obj,
7805 no_of_sriov_devices,
7806 )
7807
7808 if len(avilable_sriov_devices) == 0:
7809 # find other hosts with active pci devices
7810 (
7811 new_host_obj,
7812 avilable_sriov_devices,
7813 ) = self.get_host_and_sriov_devices(
7814 content,
7815 no_of_sriov_devices,
7816 )
7817
7818 if (
7819 new_host_obj is not None
7820 and len(avilable_sriov_devices) > 0
7821 ):
7822 # Migrate vm to the host where SRIOV devices are available
7823 self.logger.info(
7824 "Relocate VM {} on new host {}".format(
7825 vm_obj, new_host_obj
7826 )
7827 )
7828 task = self.relocate_vm(new_host_obj, vm_obj)
7829
7830 if task is not None:
7831 result = self.wait_for_vcenter_task(
7832 task, vcenter_conect
7833 )
7834 self.logger.info(
7835 "Migrate VM status: {}".format(result)
7836 )
7837 host_obj = new_host_obj
7838 else:
7839 self.logger.info(
7840 "Fail to migrate VM : {}".format(result)
7841 )
7842
7843 raise vimconn.VimConnNotFoundException(
7844 "Fail to migrate VM : {} to host {}".format(
7845 vmname_andid, new_host_obj
7846 )
7847 )
7848
7849 if (
7850 host_obj is not None
7851 and avilable_sriov_devices is not None
7852 and len(avilable_sriov_devices) > 0
7853 ):
7854 # Add SRIOV devices one by one
7855 for sriov_net in sriov_nets:
7856 network_name = sriov_net.get("net_id")
7857 self.create_dvPort_group(network_name)
7858
7859 if (
7860 sriov_net.get("type") == "VF"
7861 or sriov_net.get("type") == "SR-IOV"
7862 ):
7863 # add vlan ID ,Modify portgroup for vlan ID
7864 self.configure_vlanID(
7865 content, vcenter_conect, network_name
7866 )
7867
7868 task = self.add_sriov_to_vm(
7869 content,
7870 vm_obj,
7871 host_obj,
7872 network_name,
7873 avilable_sriov_devices[0],
7874 )
7875
7876 if task:
7877 status = self.wait_for_vcenter_task(
7878 task, vcenter_conect
7879 )
7880
7881 if status:
7882 self.logger.info(
7883 "Added SRIOV {} to VM {}".format(
7884 no_of_sriov_devices, str(vm_obj)
7885 )
7886 )
7887 else:
7888 self.logger.error(
7889 "Fail to add SRIOV {} to VM {}".format(
7890 no_of_sriov_devices, str(vm_obj)
7891 )
7892 )
7893
7894 raise vimconn.VimConnUnexpectedResponse(
7895 "Fail to add SRIOV adapter in VM {}".format(
7896 str(vm_obj)
7897 )
7898 )
7899
7900 return True, vm_obj, vcenter_conect
7901 else:
7902 self.logger.error(
7903 "Currently there is no host with"
7904 " {} number of avaialble SRIOV "
7905 "VFs required for VM {}".format(
7906 no_of_sriov_devices, vmname_andid
7907 )
7908 )
7909
7910 raise vimconn.VimConnNotFoundException(
7911 "Currently there is no host with {} "
7912 "number of avaialble SRIOV devices required for VM {}".format(
7913 no_of_sriov_devices, vmname_andid
7914 )
7915 )
7916 else:
7917 self.logger.debug(
7918 "No infromation about SRIOV devices {} ", sriov_nets
7919 )
7920 except vmodl.MethodFault as error:
7921 self.logger.error("Error occurred while adding SRIOV {} ", error)
7922
7923 return None, vm_obj, vcenter_conect
7924
7925 def get_sriov_devices(self, host, no_of_vfs):
7926 """
7927 Method to get the details of SRIOV devices on given host
7928 Args:
7929 host - vSphere host object
7930 no_of_vfs - number of VFs needed on host
7931
7932 Returns:
7933 array of SRIOV devices
7934 """
7935 sriovInfo = []
7936
7937 if host:
7938 for device in host.config.pciPassthruInfo:
7939 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7940 if device.numVirtualFunction >= no_of_vfs:
7941 sriovInfo.append(device)
7942 break
7943
7944 return sriovInfo
7945
7946 def get_host_and_sriov_devices(self, content, no_of_vfs):
7947 """
7948 Method to get the details of SRIOV devices infromation on all hosts
7949
7950 Args:
7951 content - vSphere host object
7952 no_of_vfs - number of pci VFs needed on host
7953
7954 Returns:
7955 array of SRIOV devices and host object
7956 """
7957 host_obj = None
7958 sriov_device_objs = None
7959
7960 try:
7961 if content:
7962 container = content.viewManager.CreateContainerView(
7963 content.rootFolder, [vim.HostSystem], True
7964 )
7965
7966 for host in container.view:
7967 devices = self.get_sriov_devices(host, no_of_vfs)
7968
7969 if devices:
7970 host_obj = host
7971 sriov_device_objs = devices
7972 break
7973 except Exception as exp:
7974 self.logger.error(
7975 "Error {} occurred while finding SRIOV devices on host: {}".format(
7976 exp, host_obj
7977 )
7978 )
7979
7980 return host_obj, sriov_device_objs
7981
7982 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
7983 """
7984 Method to add SRIOV adapter to vm
7985
7986 Args:
7987 host_obj - vSphere host object
7988 vm_obj - vSphere vm object
7989 content - vCenter content object
7990 network_name - name of distributed virtaul portgroup
7991 sriov_device - SRIOV device info
7992
7993 Returns:
7994 task object
7995 """
7996 devices = []
7997 vnic_label = "sriov nic"
7998
7999 try:
8000 dvs_portgr = self.get_dvport_group(network_name)
8001 network_name = dvs_portgr.name
8002 nic = vim.vm.device.VirtualDeviceSpec()
8003 # VM device
8004 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
8005 nic.device = vim.vm.device.VirtualSriovEthernetCard()
8006 nic.device.addressType = "assigned"
8007 # nic.device.key = 13016
8008 nic.device.deviceInfo = vim.Description()
8009 nic.device.deviceInfo.label = vnic_label
8010 nic.device.deviceInfo.summary = network_name
8011 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
8012
8013 nic.device.backing.network = self.get_obj(
8014 content, [vim.Network], network_name
8015 )
8016 nic.device.backing.deviceName = network_name
8017 nic.device.backing.useAutoDetect = False
8018 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
8019 nic.device.connectable.startConnected = True
8020 nic.device.connectable.allowGuestControl = True
8021
8022 nic.device.sriovBacking = (
8023 vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
8024 )
8025 nic.device.sriovBacking.physicalFunctionBacking = (
8026 vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
8027 )
8028 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
8029
8030 devices.append(nic)
8031 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
8032 task = vm_obj.ReconfigVM_Task(vmconf)
8033
8034 return task
8035 except Exception as exp:
8036 self.logger.error(
8037 "Error {} occurred while adding SRIOV adapter in VM: {}".format(
8038 exp, vm_obj
8039 )
8040 )
8041
8042 return None
8043
8044 def create_dvPort_group(self, network_name):
8045 """
8046 Method to create disributed virtual portgroup
8047
8048 Args:
8049 network_name - name of network/portgroup
8050
8051 Returns:
8052 portgroup key
8053 """
8054 try:
8055 new_network_name = [network_name, "-", str(uuid.uuid4())]
8056 network_name = "".join(new_network_name)
8057 vcenter_conect, content = self.get_vcenter_content()
8058
8059 dv_switch = self.get_obj(
8060 content, [vim.DistributedVirtualSwitch], self.dvs_name
8061 )
8062
8063 if dv_switch:
8064 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8065 dv_pg_spec.name = network_name
8066
8067 dv_pg_spec.type = (
8068 vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
8069 )
8070 dv_pg_spec.defaultPortConfig = (
8071 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8072 )
8073 dv_pg_spec.defaultPortConfig.securityPolicy = (
8074 vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
8075 )
8076 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
8077 vim.BoolPolicy(value=False)
8078 )
8079 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
8080 vim.BoolPolicy(value=False)
8081 )
8082 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
8083 value=False
8084 )
8085
8086 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
8087 self.wait_for_vcenter_task(task, vcenter_conect)
8088
8089 dvPort_group = self.get_obj(
8090 content, [vim.dvs.DistributedVirtualPortgroup], network_name
8091 )
8092
8093 if dvPort_group:
8094 self.logger.info(
8095 "Created disributed virtaul port group: {}".format(dvPort_group)
8096 )
8097 return dvPort_group.key
8098 else:
8099 self.logger.debug(
8100 "No disributed virtual switch found with name {}".format(
8101 network_name
8102 )
8103 )
8104
8105 except Exception as exp:
8106 self.logger.error(
8107 "Error occurred while creating disributed virtaul port group {}"
8108 " : {}".format(network_name, exp)
8109 )
8110
8111 return None
8112
8113 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
8114 """
8115 Method to reconfigure disributed virtual portgroup
8116
8117 Args:
8118 dvPort_group_name - name of disributed virtual portgroup
8119 content - vCenter content object
8120 config_info - disributed virtual portgroup configuration
8121
8122 Returns:
8123 task object
8124 """
8125 try:
8126 dvPort_group = self.get_dvport_group(dvPort_group_name)
8127
8128 if dvPort_group:
8129 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8130 dv_pg_spec.configVersion = dvPort_group.config.configVersion
8131 dv_pg_spec.defaultPortConfig = (
8132 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8133 )
8134
8135 if "vlanID" in config_info:
8136 dv_pg_spec.defaultPortConfig.vlan = (
8137 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
8138 )
8139 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
8140
8141 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
8142
8143 return task
8144 else:
8145 return None
8146 except Exception as exp:
8147 self.logger.error(
8148 "Error occurred while reconfiguraing disributed virtaul port group {}"
8149 " : {}".format(dvPort_group_name, exp)
8150 )
8151
8152 return None
8153
8154 def destroy_dvport_group(self, dvPort_group_name):
8155 """
8156 Method to destroy disributed virtual portgroup
8157
8158 Args:
8159 network_name - name of network/portgroup
8160
8161 Returns:
8162 True if portgroup successfully got deleted else false
8163 """
8164 vcenter_conect, _ = self.get_vcenter_content()
8165
8166 try:
8167 status = None
8168 dvPort_group = self.get_dvport_group(dvPort_group_name)
8169
8170 if dvPort_group:
8171 task = dvPort_group.Destroy_Task()
8172 status = self.wait_for_vcenter_task(task, vcenter_conect)
8173
8174 return status
8175 except vmodl.MethodFault as exp:
8176 self.logger.error(
8177 "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
8178 exp, dvPort_group_name
8179 )
8180 )
8181
8182 return None
8183
8184 def get_dvport_group(self, dvPort_group_name):
8185 """
8186 Method to get disributed virtual portgroup
8187
8188 Args:
8189 network_name - name of network/portgroup
8190
8191 Returns:
8192 portgroup object
8193 """
8194 _, content = self.get_vcenter_content()
8195 dvPort_group = None
8196
8197 try:
8198 container = content.viewManager.CreateContainerView(
8199 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
8200 )
8201
8202 for item in container.view:
8203 if item.key == dvPort_group_name:
8204 dvPort_group = item
8205 break
8206
8207 return dvPort_group
8208 except vmodl.MethodFault as exp:
8209 self.logger.error(
8210 "Caught vmodl fault {} for disributed virtual port group {}".format(
8211 exp, dvPort_group_name
8212 )
8213 )
8214
8215 return None
8216
8217 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
8218 """
8219 Method to get disributed virtual portgroup vlanID
8220
8221 Args:
8222 network_name - name of network/portgroup
8223
8224 Returns:
8225 vlan ID
8226 """
8227 vlanId = None
8228
8229 try:
8230 dvPort_group = self.get_dvport_group(dvPort_group_name)
8231
8232 if dvPort_group:
8233 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
8234 except vmodl.MethodFault as exp:
8235 self.logger.error(
8236 "Caught vmodl fault {} for disributed virtaul port group {}".format(
8237 exp, dvPort_group_name
8238 )
8239 )
8240
8241 return vlanId
8242
8243 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
8244 """
8245 Method to configure vlanID in disributed virtual portgroup vlanID
8246
8247 Args:
8248 network_name - name of network/portgroup
8249
8250 Returns:
8251 None
8252 """
8253 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
8254
8255 if vlanID == 0:
8256 # configure vlanID
8257 vlanID = self.genrate_vlanID(dvPort_group_name)
8258 config = {"vlanID": vlanID}
8259 task = self.reconfig_portgroup(
8260 content, dvPort_group_name, config_info=config
8261 )
8262
8263 if task:
8264 status = self.wait_for_vcenter_task(task, vcenter_conect)
8265
8266 if status:
8267 self.logger.info(
8268 "Reconfigured Port group {} for vlan ID {}".format(
8269 dvPort_group_name, vlanID
8270 )
8271 )
8272 else:
8273 self.logger.error(
8274 "Fail reconfigure portgroup {} for vlanID{}".format(
8275 dvPort_group_name, vlanID
8276 )
8277 )
8278
8279 def genrate_vlanID(self, network_name):
8280 """
8281 Method to get unused vlanID
8282 Args:
8283 network_name - name of network/portgroup
8284 Returns:
8285 vlanID
8286 """
8287 vlan_id = None
8288 used_ids = []
8289
8290 if self.config.get("vlanID_range") is None:
8291 raise vimconn.VimConnConflictException(
8292 "You must provide a 'vlanID_range' "
8293 "at config value before creating sriov network with vlan tag"
8294 )
8295
8296 if "used_vlanIDs" not in self.persistent_info:
8297 self.persistent_info["used_vlanIDs"] = {}
8298 else:
8299 used_ids = list(self.persistent_info["used_vlanIDs"].values())
8300
8301 for vlanID_range in self.config.get("vlanID_range"):
8302 start_vlanid, end_vlanid = vlanID_range.split("-")
8303
8304 if start_vlanid > end_vlanid:
8305 raise vimconn.VimConnConflictException(
8306 "Invalid vlan ID range {}".format(vlanID_range)
8307 )
8308
8309 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
8310 if vid not in used_ids:
8311 vlan_id = vid
8312 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
8313 return vlan_id
8314
8315 if vlan_id is None:
8316 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
8317
8318 def get_obj(self, content, vimtype, name):
8319 """
8320 Get the vsphere object associated with a given text name
8321 """
8322 obj = None
8323 container = content.viewManager.CreateContainerView(
8324 content.rootFolder, vimtype, True
8325 )
8326
8327 for item in container.view:
8328 if item.name == name:
8329 obj = item
8330 break
8331
8332 return obj
8333
8334 def insert_media_to_vm(self, vapp, image_id):
8335 """
8336 Method to insert media CD-ROM (ISO image) from catalog to vm.
8337 vapp - vapp object to get vm id
8338 Image_id - image id for cdrom to be inerted to vm
8339 """
8340 # create connection object
8341 vca = self.connect()
8342 try:
8343 # fetching catalog details
8344 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
8345
8346 if vca._session:
8347 headers = {
8348 "Accept": "application/*+xml;version=" + API_VERSION,
8349 "x-vcloud-authorization": vca._session.headers[
8350 "x-vcloud-authorization"
8351 ],
8352 }
8353 response = self.perform_request(
8354 req_type="GET", url=rest_url, headers=headers
8355 )
8356
8357 if response.status_code != 200:
8358 self.logger.error(
8359 "REST call {} failed reason : {}"
8360 "status code : {}".format(
8361 rest_url, response.text, response.status_code
8362 )
8363 )
8364
8365 raise vimconn.VimConnException(
8366 "insert_media_to_vm(): Failed to get " "catalog details"
8367 )
8368
8369 # searching iso name and id
8370 iso_name, media_id = self.get_media_details(vca, response.text)
8371
8372 if iso_name and media_id:
8373 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
8374 <ns6:MediaInsertOrEjectParams
8375 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
8376 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
8377 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
8378 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
8379 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
8380 xmlns:ns7="http://www.vmware.com/schema/ovf"
8381 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
8382 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
8383 <ns6:Media
8384 type="application/vnd.vmware.vcloud.media+xml"
8385 name="{}"
8386 id="urn:vcloud:media:{}"
8387 href="https://{}/api/media/{}"/>
8388 </ns6:MediaInsertOrEjectParams>""".format(
8389 iso_name, media_id, self.url, media_id
8390 )
8391
8392 for vms in vapp.get_all_vms():
8393 vm_id = vms.get("id").split(":")[-1]
8394
8395 headers[
8396 "Content-Type"
8397 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
8398 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
8399 self.url, vm_id
8400 )
8401
8402 response = self.perform_request(
8403 req_type="POST", url=rest_url, data=data, headers=headers
8404 )
8405
8406 if response.status_code != 202:
8407 error_msg = (
8408 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
8409 "Status code {}".format(response.text, response.status_code)
8410 )
8411 self.logger.error(error_msg)
8412
8413 raise vimconn.VimConnException(error_msg)
8414 else:
8415 task = self.get_task_from_response(response.text)
8416 result = self.client.get_task_monitor().wait_for_success(
8417 task=task
8418 )
8419
8420 if result.get("status") == "success":
8421 self.logger.info(
8422 "insert_media_to_vm(): Sucessfully inserted media ISO"
8423 " image to vm {}".format(vm_id)
8424 )
8425 except Exception as exp:
8426 self.logger.error(
8427 "insert_media_to_vm() : exception occurred "
8428 "while inserting media CD-ROM"
8429 )
8430
8431 raise vimconn.VimConnException(message=exp)
8432
8433 def get_media_details(self, vca, content):
8434 """
8435 Method to get catalog item details
8436 vca - connection object
8437 content - Catalog details
8438 Return - Media name, media id
8439 """
8440 cataloghref_list = []
8441 try:
8442 if content:
8443 vm_list_xmlroot = XmlElementTree.fromstring(content)
8444
8445 for child in vm_list_xmlroot.iter():
8446 if "CatalogItem" in child.tag:
8447 cataloghref_list.append(child.attrib.get("href"))
8448
8449 if cataloghref_list is not None:
8450 for href in cataloghref_list:
8451 if href:
8452 headers = {
8453 "Accept": "application/*+xml;version=" + API_VERSION,
8454 "x-vcloud-authorization": vca._session.headers[
8455 "x-vcloud-authorization"
8456 ],
8457 }
8458 response = self.perform_request(
8459 req_type="GET", url=href, headers=headers
8460 )
8461
8462 if response.status_code != 200:
8463 self.logger.error(
8464 "REST call {} failed reason : {}"
8465 "status code : {}".format(
8466 href, response.text, response.status_code
8467 )
8468 )
8469
8470 raise vimconn.VimConnException(
8471 "get_media_details : Failed to get "
8472 "catalogitem details"
8473 )
8474
8475 list_xmlroot = XmlElementTree.fromstring(response.text)
8476
8477 for child in list_xmlroot.iter():
8478 if "Entity" in child.tag:
8479 if "media" in child.attrib.get("href"):
8480 name = child.attrib.get("name")
8481 media_id = (
8482 child.attrib.get("href").split("/").pop()
8483 )
8484
8485 return name, media_id
8486 else:
8487 self.logger.debug("Media name and id not found")
8488
8489 return False, False
8490 except Exception as exp:
8491 self.logger.error(
8492 "get_media_details : exception occurred " "getting media details"
8493 )
8494
8495 raise vimconn.VimConnException(message=exp)
8496
8497 def retry_rest(self, method, url, add_headers=None, data=None):
8498 """Method to get Token & retry respective REST request
8499 Args:
8500 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
8501 url - request url to be used
8502 add_headers - Additional headers (optional)
8503 data - Request payload data to be passed in request
8504 Returns:
8505 response - Response of request
8506 """
8507 response = None
8508
8509 # Get token
8510 self.get_token()
8511
8512 if self.client._session:
8513 headers = {
8514 "Accept": "application/*+xml;version=" + API_VERSION,
8515 "x-vcloud-authorization": self.client._session.headers[
8516 "x-vcloud-authorization"
8517 ],
8518 }
8519
8520 if add_headers:
8521 headers.update(add_headers)
8522
8523 if method == "GET":
8524 response = self.perform_request(req_type="GET", url=url, headers=headers)
8525 elif method == "PUT":
8526 response = self.perform_request(
8527 req_type="PUT", url=url, headers=headers, data=data
8528 )
8529 elif method == "POST":
8530 response = self.perform_request(
8531 req_type="POST", url=url, headers=headers, data=data
8532 )
8533 elif method == "DELETE":
8534 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
8535
8536 return response
8537
8538 def get_token(self):
8539 """Generate a new token if expired
8540
8541 Returns:
8542 The return client object that letter can be used to connect to vCloud director as admin for VDC
8543 """
8544 self.client = self.connect()
8545
8546 def get_vdc_details(self):
8547 """Get VDC details using pyVcloud Lib
8548
8549 Returns org and vdc object
8550 """
8551 vdc = None
8552
8553 try:
8554 org = Org(self.client, resource=self.client.get_org())
8555 vdc = org.get_vdc(self.tenant_name)
8556 except Exception as e:
8557 # pyvcloud not giving a specific exception, Refresh nevertheless
8558 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
8559
8560 # Retry once, if failed by refreshing token
8561 if vdc is None:
8562 self.get_token()
8563 org = Org(self.client, resource=self.client.get_org())
8564 vdc = org.get_vdc(self.tenant_name)
8565
8566 return org, vdc
8567
8568 def perform_request(self, req_type, url, headers=None, data=None):
8569 """Perform the POST/PUT/GET/DELETE request."""
8570 # Log REST request details
8571 self.log_request(req_type, url=url, headers=headers, data=data)
8572 # perform request and return its result
8573
8574 if req_type == "GET":
8575 response = requests.get(url=url, headers=headers, verify=False)
8576 elif req_type == "PUT":
8577 response = requests.put(url=url, headers=headers, data=data, verify=False)
8578 elif req_type == "POST":
8579 response = requests.post(url=url, headers=headers, data=data, verify=False)
8580 elif req_type == "DELETE":
8581 response = requests.delete(url=url, headers=headers, verify=False)
8582
8583 # Log the REST response
8584 self.log_response(response)
8585
8586 return response
8587
8588 def log_request(self, req_type, url=None, headers=None, data=None):
8589 """Logs REST request details"""
8590
8591 if req_type is not None:
8592 self.logger.debug("Request type: {}".format(req_type))
8593
8594 if url is not None:
8595 self.logger.debug("Request url: {}".format(url))
8596
8597 if headers is not None:
8598 for header in headers:
8599 self.logger.debug(
8600 "Request header: {}: {}".format(header, headers[header])
8601 )
8602
8603 if data is not None:
8604 self.logger.debug("Request data: {}".format(data))
8605
8606 def log_response(self, response):
8607 """Logs REST response details"""
8608
8609 self.logger.debug("Response status code: {} ".format(response.status_code))
8610
8611 def get_task_from_response(self, content):
8612 """
8613 content - API response.text(response.text)
8614 return task object
8615 """
8616 xmlroot = XmlElementTree.fromstring(content)
8617
8618 if xmlroot.tag.split("}")[1] == "Task":
8619 return xmlroot
8620 else:
8621 for ele in xmlroot:
8622 if ele.tag.split("}")[1] == "Tasks":
8623 task = ele[0]
8624 break
8625
8626 return task
8627
8628 def power_on_vapp(self, vapp_id, vapp_name):
8629 """
8630 vapp_id - vApp uuid
8631 vapp_name - vAapp name
8632 return - Task object
8633 """
8634 headers = {
8635 "Accept": "application/*+xml;version=" + API_VERSION,
8636 "x-vcloud-authorization": self.client._session.headers[
8637 "x-vcloud-authorization"
8638 ],
8639 }
8640
8641 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
8642 self.url, vapp_id
8643 )
8644 response = self.perform_request(
8645 req_type="POST", url=poweron_href, headers=headers
8646 )
8647
8648 if response.status_code != 202:
8649 self.logger.error(
8650 "REST call {} failed reason : {}"
8651 "status code : {} ".format(
8652 poweron_href, response.text, response.status_code
8653 )
8654 )
8655
8656 raise vimconn.VimConnException(
8657 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
8658 )
8659 else:
8660 poweron_task = self.get_task_from_response(response.text)
8661
8662 return poweron_task