Fix Bug 1910 Enabling stage-test failing by tox results
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_vcd_network_list(self):
697 """Method available organization for a logged in tenant
698
699 Returns:
700 The return vca object that letter can be used to connect to vcloud direct as admin
701 """
702
703 self.logger.debug(
704 "get_vcd_network_list(): retrieving network list for vcd {}".format(
705 self.tenant_name
706 )
707 )
708
709 if not self.tenant_name:
710 raise vimconn.VimConnConnectionException("Tenant name is empty.")
711
712 _, vdc = self.get_vdc_details()
713 if vdc is None:
714 raise vimconn.VimConnConnectionException(
715 "Can't retrieve information for a VDC {}".format(self.tenant_name)
716 )
717
718 vdc_uuid = vdc.get("id").split(":")[3]
719 if self.client._session:
720 headers = {
721 "Accept": "application/*+xml;version=" + API_VERSION,
722 "x-vcloud-authorization": self.client._session.headers[
723 "x-vcloud-authorization"
724 ],
725 }
726 response = self.perform_request(
727 req_type="GET", url=vdc.get("href"), headers=headers
728 )
729
730 if response.status_code != 200:
731 self.logger.error("Failed to get vdc content")
732 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
733 else:
734 content = XmlElementTree.fromstring(response.text)
735
736 network_list = []
737 try:
738 for item in content:
739 if item.tag.split("}")[-1] == "AvailableNetworks":
740 for net in item:
741 response = self.perform_request(
742 req_type="GET", url=net.get("href"), headers=headers
743 )
744
745 if response.status_code != 200:
746 self.logger.error("Failed to get network content")
747 raise vimconn.VimConnNotFoundException(
748 "Failed to get network content"
749 )
750 else:
751 net_details = XmlElementTree.fromstring(response.text)
752
753 filter_dict = {}
754 net_uuid = net_details.get("id").split(":")
755
756 if len(net_uuid) != 4:
757 continue
758 else:
759 net_uuid = net_uuid[3]
760 # create dict entry
761 self.logger.debug(
762 "get_vcd_network_list(): Adding network {} "
763 "to a list vcd id {} network {}".format(
764 net_uuid, vdc_uuid, net_details.get("name")
765 )
766 )
767 filter_dict["name"] = net_details.get("name")
768 filter_dict["id"] = net_uuid
769
770 if [
771 i.text
772 for i in net_details
773 if i.tag.split("}")[-1] == "IsShared"
774 ][0] == "true":
775 shared = True
776 else:
777 shared = False
778
779 filter_dict["shared"] = shared
780 filter_dict["tenant_id"] = vdc_uuid
781
782 if int(net_details.get("status")) == 1:
783 filter_dict["admin_state_up"] = True
784 else:
785 filter_dict["admin_state_up"] = False
786
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 network_list.append(filter_dict)
790 self.logger.debug(
791 "get_vcd_network_list adding entry {}".format(
792 filter_dict
793 )
794 )
795 except Exception:
796 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
797 pass
798
799 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
800
801 return network_list
802
803 def get_network_list(self, filter_dict={}):
804 """Obtain tenant networks of VIM
805 Filter_dict can be:
806 name: network name OR/AND
807 id: network uuid OR/AND
808 shared: boolean OR/AND
809 tenant_id: tenant OR/AND
810 admin_state_up: boolean
811 status: 'ACTIVE'
812
813 [{key : value , key : value}]
814
815 Returns the network list of dictionaries:
816 [{<the fields at Filter_dict plus some VIM specific>}, ...]
817 List can be empty
818 """
819
820 self.logger.debug(
821 "get_network_list(): retrieving network list for vcd {}".format(
822 self.tenant_name
823 )
824 )
825
826 if not self.tenant_name:
827 raise vimconn.VimConnConnectionException("Tenant name is empty.")
828
829 _, vdc = self.get_vdc_details()
830 if vdc is None:
831 raise vimconn.VimConnConnectionException(
832 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
833 )
834
835 try:
836 vdcid = vdc.get("id").split(":")[3]
837
838 if self.client._session:
839 headers = {
840 "Accept": "application/*+xml;version=" + API_VERSION,
841 "x-vcloud-authorization": self.client._session.headers[
842 "x-vcloud-authorization"
843 ],
844 }
845 response = self.perform_request(
846 req_type="GET", url=vdc.get("href"), headers=headers
847 )
848
849 if response.status_code != 200:
850 self.logger.error("Failed to get vdc content")
851 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
852 else:
853 content = XmlElementTree.fromstring(response.text)
854
855 network_list = []
856 for item in content:
857 if item.tag.split("}")[-1] == "AvailableNetworks":
858 for net in item:
859 response = self.perform_request(
860 req_type="GET", url=net.get("href"), headers=headers
861 )
862
863 if response.status_code != 200:
864 self.logger.error("Failed to get network content")
865 raise vimconn.VimConnNotFoundException(
866 "Failed to get network content"
867 )
868 else:
869 net_details = XmlElementTree.fromstring(response.text)
870
871 filter_entry = {}
872 net_uuid = net_details.get("id").split(":")
873
874 if len(net_uuid) != 4:
875 continue
876 else:
877 net_uuid = net_uuid[3]
878 # create dict entry
879 self.logger.debug(
880 "get_network_list(): Adding net {}"
881 " to a list vcd id {} network {}".format(
882 net_uuid, vdcid, net_details.get("name")
883 )
884 )
885 filter_entry["name"] = net_details.get("name")
886 filter_entry["id"] = net_uuid
887
888 if [
889 i.text
890 for i in net_details
891 if i.tag.split("}")[-1] == "IsShared"
892 ][0] == "true":
893 shared = True
894 else:
895 shared = False
896
897 filter_entry["shared"] = shared
898 filter_entry["tenant_id"] = vdcid
899
900 if int(net_details.get("status")) == 1:
901 filter_entry["admin_state_up"] = True
902 else:
903 filter_entry["admin_state_up"] = False
904
905 filter_entry["status"] = "ACTIVE"
906 filter_entry["type"] = "bridge"
907 filtered_entry = filter_entry.copy()
908
909 if filter_dict is not None and filter_dict:
910 # we remove all the key : value we don't care and match only
911 # respected field
912 filtered_dict = set(filter_entry.keys()) - set(
913 filter_dict
914 )
915
916 for unwanted_key in filtered_dict:
917 del filter_entry[unwanted_key]
918
919 if filter_dict == filter_entry:
920 network_list.append(filtered_entry)
921 else:
922 network_list.append(filtered_entry)
923 except Exception as e:
924 self.logger.debug("Error in get_network_list", exc_info=True)
925
926 if isinstance(e, vimconn.VimConnException):
927 raise
928 else:
929 raise vimconn.VimConnNotFoundException(
930 "Failed : Networks list not found {} ".format(e)
931 )
932
933 self.logger.debug("Returning {}".format(network_list))
934
935 return network_list
936
937 def get_network(self, net_id):
938 """Method obtains network details of net_id VIM network
939 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
940 try:
941 _, vdc = self.get_vdc_details()
942 vdc_id = vdc.get("id").split(":")[3]
943
944 if self.client._session:
945 headers = {
946 "Accept": "application/*+xml;version=" + API_VERSION,
947 "x-vcloud-authorization": self.client._session.headers[
948 "x-vcloud-authorization"
949 ],
950 }
951 response = self.perform_request(
952 req_type="GET", url=vdc.get("href"), headers=headers
953 )
954
955 if response.status_code != 200:
956 self.logger.error("Failed to get vdc content")
957 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
958 else:
959 content = XmlElementTree.fromstring(response.text)
960
961 filter_dict = {}
962
963 for item in content:
964 if item.tag.split("}")[-1] == "AvailableNetworks":
965 for net in item:
966 response = self.perform_request(
967 req_type="GET", url=net.get("href"), headers=headers
968 )
969
970 if response.status_code != 200:
971 self.logger.error("Failed to get network content")
972 raise vimconn.VimConnNotFoundException(
973 "Failed to get network content"
974 )
975 else:
976 net_details = XmlElementTree.fromstring(response.text)
977
978 vdc_network_id = net_details.get("id").split(":")
979 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
980 filter_dict["name"] = net_details.get("name")
981 filter_dict["id"] = vdc_network_id[3]
982
983 if [
984 i.text
985 for i in net_details
986 if i.tag.split("}")[-1] == "IsShared"
987 ][0] == "true":
988 shared = True
989 else:
990 shared = False
991
992 filter_dict["shared"] = shared
993 filter_dict["tenant_id"] = vdc_id
994
995 if int(net_details.get("status")) == 1:
996 filter_dict["admin_state_up"] = True
997 else:
998 filter_dict["admin_state_up"] = False
999
1000 filter_dict["status"] = "ACTIVE"
1001 filter_dict["type"] = "bridge"
1002 self.logger.debug("Returning {}".format(filter_dict))
1003
1004 return filter_dict
1005 else:
1006 raise vimconn.VimConnNotFoundException(
1007 "Network {} not found".format(net_id)
1008 )
1009 except Exception as e:
1010 self.logger.debug("Error in get_network")
1011 self.logger.debug(traceback.format_exc())
1012
1013 if isinstance(e, vimconn.VimConnException):
1014 raise
1015 else:
1016 raise vimconn.VimConnNotFoundException(
1017 "Failed : Network not found {} ".format(e)
1018 )
1019
1020 return filter_dict
1021
1022 def delete_network(self, net_id, created_items=None):
1023 """
1024 Removes a tenant network from VIM and its associated elements
1025 :param net_id: VIM identifier of the network, provided by method new_network
1026 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1027 Returns the network identifier or raises an exception upon error or when network is not found
1028 """
1029
1030 # ############# Stub code for SRIOV #################
1031 # dvport_group = self.get_dvport_group(net_id)
1032 # if dvport_group:
1033 # #delete portgroup
1034 # status = self.destroy_dvport_group(net_id)
1035 # if status:
1036 # # Remove vlanID from persistent info
1037 # if net_id in self.persistent_info["used_vlanIDs"]:
1038 # del self.persistent_info["used_vlanIDs"][net_id]
1039 #
1040 # return net_id
1041
1042 vcd_network = self.get_vcd_network(network_uuid=net_id)
1043 if vcd_network is not None and vcd_network:
1044 if self.delete_network_action(network_uuid=net_id):
1045 return net_id
1046 else:
1047 raise vimconn.VimConnNotFoundException(
1048 "Network {} not found".format(net_id)
1049 )
1050
1051 def refresh_nets_status(self, net_list):
1052 """Get the status of the networks
1053 Params: the list of network identifiers
1054 Returns a dictionary with:
1055 net_id: #VIM id of this network
1056 status: #Mandatory. Text with one of:
1057 # DELETED (not found at vim)
1058 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1059 # OTHER (Vim reported other status not understood)
1060 # ERROR (VIM indicates an ERROR status)
1061 # ACTIVE, INACTIVE, DOWN (admin down),
1062 # BUILD (on building process)
1063 #
1064 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1065 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1066
1067 """
1068 dict_entry = {}
1069 try:
1070 for net in net_list:
1071 errormsg = ""
1072 vcd_network = self.get_vcd_network(network_uuid=net)
1073 if vcd_network is not None and vcd_network:
1074 if vcd_network["status"] == "1":
1075 status = "ACTIVE"
1076 else:
1077 status = "DOWN"
1078 else:
1079 status = "DELETED"
1080 errormsg = "Network not found."
1081
1082 dict_entry[net] = {
1083 "status": status,
1084 "error_msg": errormsg,
1085 "vim_info": yaml.safe_dump(vcd_network),
1086 }
1087 except Exception:
1088 self.logger.debug("Error in refresh_nets_status")
1089 self.logger.debug(traceback.format_exc())
1090
1091 return dict_entry
1092
1093 def get_flavor(self, flavor_id):
1094 """Obtain flavor details from the VIM
1095 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
1096 """
1097 if flavor_id not in vimconnector.flavorlist:
1098 raise vimconn.VimConnNotFoundException("Flavor not found.")
1099
1100 return vimconnector.flavorlist[flavor_id]
1101
1102 def new_flavor(self, flavor_data):
1103 """Adds a tenant flavor to VIM
1104 flavor_data contains a dictionary with information, keys:
1105 name: flavor name
1106 ram: memory (cloud type) in MBytes
1107 vpcus: cpus (cloud type)
1108 extended: EPA parameters
1109 - numas: #items requested in same NUMA
1110 memory: number of 1G huge pages memory
1111 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
1112 threads
1113 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
1114 - name: interface name
1115 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
1116 bandwidth: X Gbps; requested guarantee bandwidth
1117 vpci: requested virtual PCI address
1118 disk: disk size
1119 is_public:
1120 #TODO to concrete
1121 Returns the flavor identifier"""
1122
1123 # generate a new uuid put to internal dict and return it.
1124 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1125 new_flavor = flavor_data
1126 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1127 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1128 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1129
1130 if not isinstance(ram, int):
1131 raise vimconn.VimConnException("Non-integer value for ram")
1132 elif not isinstance(cpu, int):
1133 raise vimconn.VimConnException("Non-integer value for cpu")
1134 elif not isinstance(disk, int):
1135 raise vimconn.VimConnException("Non-integer value for disk")
1136
1137 extended_flv = flavor_data.get("extended")
1138 if extended_flv:
1139 numas = extended_flv.get("numas")
1140 if numas:
1141 for numa in numas:
1142 # overwrite ram and vcpus
1143 if "memory" in numa:
1144 ram = numa["memory"] * 1024
1145
1146 if "paired-threads" in numa:
1147 cpu = numa["paired-threads"] * 2
1148 elif "cores" in numa:
1149 cpu = numa["cores"]
1150 elif "threads" in numa:
1151 cpu = numa["threads"]
1152
1153 new_flavor[FLAVOR_RAM_KEY] = ram
1154 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1155 new_flavor[FLAVOR_DISK_KEY] = disk
1156 # generate a new uuid put to internal dict and return it.
1157 flavor_id = uuid.uuid4()
1158 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1159 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1160
1161 return str(flavor_id)
1162
1163 def delete_flavor(self, flavor_id):
1164 """Deletes a tenant flavor from VIM identify by its id
1165
1166 Returns the used id or raise an exception
1167 """
1168 if flavor_id not in vimconnector.flavorlist:
1169 raise vimconn.VimConnNotFoundException("Flavor not found.")
1170
1171 vimconnector.flavorlist.pop(flavor_id, None)
1172
1173 return flavor_id
1174
1175 def new_image(self, image_dict):
1176 """
1177 Adds a tenant image to VIM
1178 Returns:
1179 200, image-id if the image is created
1180 <0, message if there is an error
1181 """
1182 return self.get_image_id_from_path(image_dict["location"])
1183
1184 def delete_image(self, image_id):
1185 """
1186 Deletes a tenant image from VIM
1187 Args:
1188 image_id is ID of Image to be deleted
1189 Return:
1190 returns the image identifier in UUID format or raises an exception on error
1191 """
1192 conn = self.connect_as_admin()
1193
1194 if not conn:
1195 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1196
1197 # Get Catalog details
1198 url_list = [self.url, "/api/catalog/", image_id]
1199 catalog_herf = "".join(url_list)
1200
1201 headers = {
1202 "Accept": "application/*+xml;version=" + API_VERSION,
1203 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1204 }
1205
1206 response = self.perform_request(
1207 req_type="GET", url=catalog_herf, headers=headers
1208 )
1209
1210 if response.status_code != requests.codes.ok:
1211 self.logger.debug(
1212 "delete_image():GET REST API call {} failed. "
1213 "Return status code {}".format(catalog_herf, response.status_code)
1214 )
1215
1216 raise vimconn.VimConnNotFoundException(
1217 "Fail to get image {}".format(image_id)
1218 )
1219
1220 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1221 namespaces = {
1222 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1223 }
1224 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1225
1226 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1227 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1228
1229 for catalogItem in catalogItems:
1230 catalogItem_href = catalogItem.attrib["href"]
1231
1232 response = self.perform_request(
1233 req_type="GET", url=catalogItem_href, headers=headers
1234 )
1235
1236 if response.status_code != requests.codes.ok:
1237 self.logger.debug(
1238 "delete_image():GET REST API call {} failed. "
1239 "Return status code {}".format(catalog_herf, response.status_code)
1240 )
1241 raise vimconn.VimConnNotFoundException(
1242 "Fail to get catalogItem {} for catalog {}".format(
1243 catalogItem, image_id
1244 )
1245 )
1246
1247 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1248 namespaces = {
1249 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1250 }
1251 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1252 catalogitem_remove_href = lxmlroot_respond.find(
1253 "xmlns:Link[@rel='remove']", namespaces
1254 ).attrib["href"]
1255
1256 # Remove catalogItem
1257 response = self.perform_request(
1258 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1259 )
1260
1261 if response.status_code == requests.codes.no_content:
1262 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1263 else:
1264 raise vimconn.VimConnException(
1265 "Fail to delete Catalog Item {}".format(catalogItem)
1266 )
1267
1268 # Remove catalog
1269 url_list = [self.url, "/api/admin/catalog/", image_id]
1270 catalog_remove_herf = "".join(url_list)
1271 response = self.perform_request(
1272 req_type="DELETE", url=catalog_remove_herf, headers=headers
1273 )
1274
1275 if response.status_code == requests.codes.no_content:
1276 self.logger.debug("Deleted Catalog {}".format(image_id))
1277
1278 return image_id
1279 else:
1280 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1281
1282 def catalog_exists(self, catalog_name, catalogs):
1283 """
1284
1285 :param catalog_name:
1286 :param catalogs:
1287 :return:
1288 """
1289 for catalog in catalogs:
1290 if catalog["name"] == catalog_name:
1291 return catalog["id"]
1292
1293 def create_vimcatalog(self, vca=None, catalog_name=None):
1294 """Create new catalog entry in vCloud director.
1295
1296 Args
1297 vca: vCloud director.
1298 catalog_name catalog that client wish to create. Note no validation done for a name.
1299 Client must make sure that provide valid string representation.
1300
1301 Returns catalog id if catalog created else None.
1302
1303 """
1304 try:
1305 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1306
1307 if lxml_catalog_element:
1308 id_attr_value = lxml_catalog_element.get("id")
1309 return id_attr_value.split(":")[-1]
1310
1311 catalogs = vca.list_catalogs()
1312 except Exception as ex:
1313 self.logger.error(
1314 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1315 catalog_name, ex
1316 )
1317 )
1318 raise
1319 return self.catalog_exists(catalog_name, catalogs)
1320
1321 # noinspection PyIncorrectDocstring
1322 def upload_ovf(
1323 self,
1324 vca=None,
1325 catalog_name=None,
1326 image_name=None,
1327 media_file_name=None,
1328 description="",
1329 progress=False,
1330 chunk_bytes=128 * 1024,
1331 ):
1332 """
1333 Uploads a OVF file to a vCloud catalog
1334
1335 :param chunk_bytes:
1336 :param progress:
1337 :param description:
1338 :param image_name:
1339 :param vca:
1340 :param catalog_name: (str): The name of the catalog to upload the media.
1341 :param media_file_name: (str): The name of the local media file to upload.
1342 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1343 """
1344 os.path.isfile(media_file_name)
1345 statinfo = os.stat(media_file_name)
1346
1347 # find a catalog entry where we upload OVF.
1348 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1349 # status change.
1350 # if VCD can parse OVF we upload VMDK file
1351 try:
1352 for catalog in vca.list_catalogs():
1353 if catalog_name != catalog["name"]:
1354 continue
1355 catalog_href = "{}/api/catalog/{}/action/upload".format(
1356 self.url, catalog["id"]
1357 )
1358 data = """
1359 <UploadVAppTemplateParams name="{}"
1360 xmlns="http://www.vmware.com/vcloud/v1.5"
1361 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1362 <Description>{} vApp Template</Description>
1363 </UploadVAppTemplateParams>
1364 """.format(
1365 catalog_name, description
1366 )
1367
1368 if self.client:
1369 headers = {
1370 "Accept": "application/*+xml;version=" + API_VERSION,
1371 "x-vcloud-authorization": self.client._session.headers[
1372 "x-vcloud-authorization"
1373 ],
1374 }
1375 headers[
1376 "Content-Type"
1377 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1378
1379 response = self.perform_request(
1380 req_type="POST", url=catalog_href, headers=headers, data=data
1381 )
1382
1383 if response.status_code == requests.codes.created:
1384 catalogItem = XmlElementTree.fromstring(response.text)
1385 entity = [
1386 child
1387 for child in catalogItem
1388 if child.get("type")
1389 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1390 ][0]
1391 href = entity.get("href")
1392 template = href
1393
1394 response = self.perform_request(
1395 req_type="GET", url=href, headers=headers
1396 )
1397
1398 if response.status_code == requests.codes.ok:
1399 headers["Content-Type"] = "Content-Type text/xml"
1400 result = re.search(
1401 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1402 response.text,
1403 )
1404
1405 if result:
1406 transfer_href = result.group(1)
1407
1408 response = self.perform_request(
1409 req_type="PUT",
1410 url=transfer_href,
1411 headers=headers,
1412 data=open(media_file_name, "rb"),
1413 )
1414
1415 if response.status_code != requests.codes.ok:
1416 self.logger.debug(
1417 "Failed create vApp template for catalog name {} and image {}".format(
1418 catalog_name, media_file_name
1419 )
1420 )
1421 return False
1422
1423 # TODO fix this with aync block
1424 time.sleep(5)
1425
1426 self.logger.debug(
1427 "vApp template for catalog name {} and image {}".format(
1428 catalog_name, media_file_name
1429 )
1430 )
1431
1432 # uploading VMDK file
1433 # check status of OVF upload and upload remaining files.
1434 response = self.perform_request(
1435 req_type="GET", url=template, headers=headers
1436 )
1437
1438 if response.status_code == requests.codes.ok:
1439 result = re.search(
1440 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1441 )
1442
1443 if result:
1444 link_href = result.group(1)
1445
1446 # we skip ovf since it already uploaded.
1447 if "ovf" in link_href:
1448 continue
1449
1450 # The OVF file and VMDK must be in a same directory
1451 head, _ = os.path.split(media_file_name)
1452 file_vmdk = head + "/" + link_href.split("/")[-1]
1453
1454 if not os.path.isfile(file_vmdk):
1455 return False
1456
1457 statinfo = os.stat(file_vmdk)
1458 if statinfo.st_size == 0:
1459 return False
1460
1461 hrefvmdk = link_href
1462
1463 if progress:
1464 widgets = [
1465 "Uploading file: ",
1466 Percentage(),
1467 " ",
1468 Bar(),
1469 " ",
1470 ETA(),
1471 " ",
1472 FileTransferSpeed(),
1473 ]
1474 progress_bar = ProgressBar(
1475 widgets=widgets, maxval=statinfo.st_size
1476 ).start()
1477
1478 bytes_transferred = 0
1479 f = open(file_vmdk, "rb")
1480
1481 while bytes_transferred < statinfo.st_size:
1482 my_bytes = f.read(chunk_bytes)
1483 if len(my_bytes) <= chunk_bytes:
1484 headers["Content-Range"] = "bytes {}-{}/{}".format(
1485 bytes_transferred,
1486 len(my_bytes) - 1,
1487 statinfo.st_size,
1488 )
1489 headers["Content-Length"] = str(len(my_bytes))
1490 response = requests.put(
1491 url=hrefvmdk,
1492 headers=headers,
1493 data=my_bytes,
1494 verify=False,
1495 )
1496
1497 if response.status_code == requests.codes.ok:
1498 bytes_transferred += len(my_bytes)
1499 if progress:
1500 progress_bar.update(bytes_transferred)
1501 else:
1502 self.logger.debug(
1503 "file upload failed with error: [{}] {}".format(
1504 response.status_code, response.text
1505 )
1506 )
1507
1508 f.close()
1509
1510 return False
1511
1512 f.close()
1513 if progress:
1514 progress_bar.finish()
1515 time.sleep(10)
1516
1517 return True
1518 else:
1519 self.logger.debug(
1520 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1521 catalog_name, media_file_name
1522 )
1523 )
1524 return False
1525 except Exception as exp:
1526 self.logger.debug(
1527 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1528 catalog_name, media_file_name, exp
1529 )
1530 )
1531
1532 raise vimconn.VimConnException(
1533 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1534 catalog_name, media_file_name, exp
1535 )
1536 )
1537
1538 self.logger.debug(
1539 "Failed retrieve catalog name {} for OVF file {}".format(
1540 catalog_name, media_file_name
1541 )
1542 )
1543
1544 return False
1545
1546 def upload_vimimage(
1547 self,
1548 vca=None,
1549 catalog_name=None,
1550 media_name=None,
1551 medial_file_name=None,
1552 progress=False,
1553 ):
1554 """Upload media file"""
1555 # TODO add named parameters for readability
1556 return self.upload_ovf(
1557 vca=vca,
1558 catalog_name=catalog_name,
1559 image_name=media_name.split(".")[0],
1560 media_file_name=medial_file_name,
1561 description="medial_file_name",
1562 progress=progress,
1563 )
1564
1565 def validate_uuid4(self, uuid_string=None):
1566 """Method validate correct format of UUID.
1567
1568 Return: true if string represent valid uuid
1569 """
1570 try:
1571 uuid.UUID(uuid_string, version=4)
1572 except ValueError:
1573 return False
1574
1575 return True
1576
1577 def get_catalogid(self, catalog_name=None, catalogs=None):
1578 """Method check catalog and return catalog ID in UUID format.
1579
1580 Args
1581 catalog_name: catalog name as string
1582 catalogs: list of catalogs.
1583
1584 Return: catalogs uuid
1585 """
1586 for catalog in catalogs:
1587 if catalog["name"] == catalog_name:
1588 catalog_id = catalog["id"]
1589 return catalog_id
1590
1591 return None
1592
1593 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1594 """Method check catalog and return catalog name lookup done by catalog UUID.
1595
1596 Args
1597 catalog_name: catalog name as string
1598 catalogs: list of catalogs.
1599
1600 Return: catalogs name or None
1601 """
1602 if not self.validate_uuid4(uuid_string=catalog_uuid):
1603 return None
1604
1605 for catalog in catalogs:
1606 catalog_id = catalog.get("id")
1607
1608 if catalog_id == catalog_uuid:
1609 return catalog.get("name")
1610
1611 return None
1612
1613 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1614 """Method check catalog and return catalog name lookup done by catalog UUID.
1615
1616 Args
1617 catalog_name: catalog name as string
1618 catalogs: list of catalogs.
1619
1620 Return: catalogs name or None
1621 """
1622 if not self.validate_uuid4(uuid_string=catalog_uuid):
1623 return None
1624
1625 for catalog in catalogs:
1626 catalog_id = catalog.get("id")
1627
1628 if catalog_id == catalog_uuid:
1629 return catalog
1630
1631 return None
1632
1633 def get_image_id_from_path(self, path=None, progress=False):
1634 """Method upload OVF image to vCloud director.
1635
1636 Each OVF image represented as single catalog entry in vcloud director.
1637 The method check for existing catalog entry. The check done by file name without file extension.
1638
1639 if given catalog name already present method will respond with existing catalog uuid otherwise
1640 it will create new catalog entry and upload OVF file to newly created catalog.
1641
1642 If method can't create catalog entry or upload a file it will throw exception.
1643
1644 Method accept boolean flag progress that will output progress bar. It useful method
1645 for standalone upload use case. In case to test large file upload.
1646
1647 Args
1648 path: - valid path to OVF file.
1649 progress - boolean progress bar show progress bar.
1650
1651 Return: if image uploaded correct method will provide image catalog UUID.
1652 """
1653 if not path:
1654 raise vimconn.VimConnException("Image path can't be None.")
1655
1656 if not os.path.isfile(path):
1657 raise vimconn.VimConnException("Can't read file. File not found.")
1658
1659 if not os.access(path, os.R_OK):
1660 raise vimconn.VimConnException(
1661 "Can't read file. Check file permission to read."
1662 )
1663
1664 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1665
1666 _, filename = os.path.split(path)
1667 _, file_extension = os.path.splitext(path)
1668 if file_extension != ".ovf":
1669 self.logger.debug(
1670 "Wrong file extension {} connector support only OVF container.".format(
1671 file_extension
1672 )
1673 )
1674
1675 raise vimconn.VimConnException(
1676 "Wrong container. vCloud director supports only OVF."
1677 )
1678
1679 catalog_name = os.path.splitext(filename)[0]
1680 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1681 self.logger.debug(
1682 "File name {} Catalog Name {} file path {} "
1683 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1684 )
1685
1686 try:
1687 org, _ = self.get_vdc_details()
1688 catalogs = org.list_catalogs()
1689 except Exception as exp:
1690 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1691
1692 raise vimconn.VimConnException(
1693 "Failed get catalogs() with Exception {} ".format(exp)
1694 )
1695
1696 if len(catalogs) == 0:
1697 self.logger.info(
1698 "Creating a new catalog entry {} in vcloud director".format(
1699 catalog_name
1700 )
1701 )
1702
1703 if self.create_vimcatalog(org, catalog_md5_name) is None:
1704 raise vimconn.VimConnException(
1705 "Failed create new catalog {} ".format(catalog_md5_name)
1706 )
1707
1708 result = self.upload_vimimage(
1709 vca=org,
1710 catalog_name=catalog_md5_name,
1711 media_name=filename,
1712 medial_file_name=path,
1713 progress=progress,
1714 )
1715
1716 if not result:
1717 raise vimconn.VimConnException(
1718 "Failed create vApp template for catalog {} ".format(catalog_name)
1719 )
1720
1721 return self.get_catalogid(catalog_name, catalogs)
1722 else:
1723 for catalog in catalogs:
1724 # search for existing catalog if we find same name we return ID
1725 # TODO optimize this
1726 if catalog["name"] == catalog_md5_name:
1727 self.logger.debug(
1728 "Found existing catalog entry for {} "
1729 "catalog id {}".format(
1730 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1731 )
1732 )
1733
1734 return self.get_catalogid(catalog_md5_name, catalogs)
1735
1736 # if we didn't find existing catalog we create a new one and upload image.
1737 self.logger.debug(
1738 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1739 )
1740 if self.create_vimcatalog(org, catalog_md5_name) is None:
1741 raise vimconn.VimConnException(
1742 "Failed create new catalog {} ".format(catalog_md5_name)
1743 )
1744
1745 result = self.upload_vimimage(
1746 vca=org,
1747 catalog_name=catalog_md5_name,
1748 media_name=filename,
1749 medial_file_name=path,
1750 progress=progress,
1751 )
1752 if not result:
1753 raise vimconn.VimConnException(
1754 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1755 )
1756
1757 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1758
1759 def get_image_list(self, filter_dict={}):
1760 """Obtain tenant images from VIM
1761 Filter_dict can be:
1762 name: image name
1763 id: image uuid
1764 checksum: image checksum
1765 location: image path
1766 Returns the image list of dictionaries:
1767 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1768 List can be empty
1769 """
1770 try:
1771 org, _ = self.get_vdc_details()
1772 image_list = []
1773 catalogs = org.list_catalogs()
1774
1775 if len(catalogs) == 0:
1776 return image_list
1777 else:
1778 for catalog in catalogs:
1779 catalog_uuid = catalog.get("id")
1780 name = catalog.get("name")
1781 filtered_dict = {}
1782
1783 if filter_dict.get("name") and filter_dict["name"] != name:
1784 continue
1785
1786 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1787 continue
1788
1789 filtered_dict["name"] = name
1790 filtered_dict["id"] = catalog_uuid
1791 image_list.append(filtered_dict)
1792
1793 self.logger.debug(
1794 "List of already created catalog items: {}".format(image_list)
1795 )
1796
1797 return image_list
1798 except Exception as exp:
1799 raise vimconn.VimConnException(
1800 "Exception occured while retriving catalog items {}".format(exp)
1801 )
1802
1803 def get_vappid(self, vdc=None, vapp_name=None):
1804 """Method takes vdc object and vApp name and returns vapp uuid or None
1805
1806 Args:
1807 vdc: The VDC object.
1808 vapp_name: is application vappp name identifier
1809
1810 Returns:
1811 The return vApp name otherwise None
1812 """
1813 if vdc is None or vapp_name is None:
1814 return None
1815
1816 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1817 try:
1818 refs = [
1819 ref
1820 for ref in vdc.ResourceEntities.ResourceEntity
1821 if ref.name == vapp_name
1822 and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1823 ]
1824
1825 if len(refs) == 1:
1826 return refs[0].href.split("vapp")[1][1:]
1827 except Exception as e:
1828 self.logger.exception(e)
1829 return False
1830
1831 return None
1832
1833 def check_vapp(self, vdc=None, vapp_uuid=None):
1834 """Method Method returns True or False if vapp deployed in vCloud director
1835
1836 Args:
1837 vca: Connector to VCA
1838 vdc: The VDC object.
1839 vappid: vappid is application identifier
1840
1841 Returns:
1842 The return True if vApp deployed
1843 :param vdc:
1844 :param vapp_uuid:
1845 """
1846 try:
1847 refs = [
1848 ref
1849 for ref in vdc.ResourceEntities.ResourceEntity
1850 if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1851 ]
1852
1853 for ref in refs:
1854 vappid = ref.href.split("vapp")[1][1:]
1855 # find vapp with respected vapp uuid
1856
1857 if vappid == vapp_uuid:
1858 return True
1859 except Exception as e:
1860 self.logger.exception(e)
1861
1862 return False
1863
1864 return False
1865
1866 def get_namebyvappid(self, vapp_uuid=None):
1867 """Method returns vApp name from vCD and lookup done by vapp_id.
1868
1869 Args:
1870 vapp_uuid: vappid is application identifier
1871
1872 Returns:
1873 The return vApp name otherwise None
1874 """
1875 try:
1876 if self.client and vapp_uuid:
1877 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1878 headers = {
1879 "Accept": "application/*+xml;version=" + API_VERSION,
1880 "x-vcloud-authorization": self.client._session.headers[
1881 "x-vcloud-authorization"
1882 ],
1883 }
1884
1885 response = self.perform_request(
1886 req_type="GET", url=vapp_call, headers=headers
1887 )
1888
1889 # Retry login if session expired & retry sending request
1890 if response.status_code == 403:
1891 response = self.retry_rest("GET", vapp_call)
1892
1893 tree = XmlElementTree.fromstring(response.text)
1894
1895 return tree.attrib["name"] if "name" in tree.attrib else None
1896 except Exception as e:
1897 self.logger.exception(e)
1898
1899 return None
1900
1901 return None
1902
1903 def new_vminstance(
1904 self,
1905 name=None,
1906 description="",
1907 start=False,
1908 image_id=None,
1909 flavor_id=None,
1910 net_list=[],
1911 cloud_config=None,
1912 disk_list=None,
1913 availability_zone_index=None,
1914 availability_zone_list=None,
1915 ):
1916 """Adds a VM instance to VIM
1917 Params:
1918 'start': (boolean) indicates if VM must start or created in pause mode.
1919 'image_id','flavor_id': image and flavor VIM id to use for the VM
1920 'net_list': list of interfaces, each one is a dictionary with:
1921 'name': (optional) name for the interface.
1922 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1923 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1924 capabilities
1925 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1926 'mac_address': (optional) mac address to assign to this interface
1927 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1928 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1929 for tagging VF
1930 'type': (mandatory) can be one of:
1931 'virtual', in this case always connected to a network of type 'net_type=bridge'
1932 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1933 data/ptp network or it can created unconnected
1934 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1935 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1936 are allocated on the same physical NIC
1937 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1938 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1939 or True, it must apply the default VIM behaviour
1940 After execution the method will add the key:
1941 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1942 interface. 'net_list' is modified
1943 'cloud_config': (optional) dictionary with:
1944 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1945 'users': (optional) list of users to be inserted, each item is a dict with:
1946 'name': (mandatory) user name,
1947 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1948 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1949 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1950 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1951 'dest': (mandatory) string with the destination absolute path
1952 'encoding': (optional, by default text). Can be one of:
1953 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1954 'content' (mandatory): string with the content of the file
1955 'permissions': (optional) string with file permissions, typically octal notation '0644'
1956 'owner': (optional) file owner, string with the format 'owner:group'
1957 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1958 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1959 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1960 'size': (mandatory) string with the size of the disk in GB
1961 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1962 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1963 availability_zone_index is None
1964 Returns a tuple with the instance identifier and created_items or raises an exception on error
1965 created_items can be None or a dictionary where this method can include key-values that will be passed to
1966 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1967 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1968 as not present.
1969 """
1970 self.logger.info("Creating new instance for entry {}".format(name))
1971 self.logger.debug(
1972 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1973 "availability_zone_index {} availability_zone_list {}".format(
1974 description,
1975 start,
1976 image_id,
1977 flavor_id,
1978 net_list,
1979 cloud_config,
1980 disk_list,
1981 availability_zone_index,
1982 availability_zone_list,
1983 )
1984 )
1985
1986 # new vm name = vmname + tenant_id + uuid
1987 new_vm_name = [name, "-", str(uuid.uuid4())]
1988 vmname_andid = "".join(new_vm_name)
1989
1990 for net in net_list:
1991 if net["type"] == "PCI-PASSTHROUGH":
1992 raise vimconn.VimConnNotSupportedException(
1993 "Current vCD version does not support type : {}".format(net["type"])
1994 )
1995
1996 if len(net_list) > 10:
1997 raise vimconn.VimConnNotSupportedException(
1998 "The VM hardware versions 7 and above support upto 10 NICs only"
1999 )
2000
2001 # if vm already deployed we return existing uuid
2002 # we check for presence of VDC, Catalog entry and Flavor.
2003 org, vdc = self.get_vdc_details()
2004 if vdc is None:
2005 raise vimconn.VimConnNotFoundException(
2006 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
2007 name
2008 )
2009 )
2010
2011 catalogs = org.list_catalogs()
2012 if catalogs is None:
2013 # Retry once, if failed by refreshing token
2014 self.get_token()
2015 org = Org(self.client, resource=self.client.get_org())
2016 catalogs = org.list_catalogs()
2017
2018 if catalogs is None:
2019 raise vimconn.VimConnNotFoundException(
2020 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
2021 name
2022 )
2023 )
2024
2025 catalog_hash_name = self.get_catalogbyid(
2026 catalog_uuid=image_id, catalogs=catalogs
2027 )
2028 if catalog_hash_name:
2029 self.logger.info(
2030 "Found catalog entry {} for image id {}".format(
2031 catalog_hash_name, image_id
2032 )
2033 )
2034 else:
2035 raise vimconn.VimConnNotFoundException(
2036 "new_vminstance(): Failed create vApp {}: "
2037 "(Failed retrieve catalog information {})".format(name, image_id)
2038 )
2039
2040 # Set vCPU and Memory based on flavor.
2041 vm_cpus = None
2042 vm_memory = None
2043 vm_disk = None
2044 numas = None
2045
2046 if flavor_id is not None:
2047 if flavor_id not in vimconnector.flavorlist:
2048 raise vimconn.VimConnNotFoundException(
2049 "new_vminstance(): Failed create vApp {}: "
2050 "Failed retrieve flavor information "
2051 "flavor id {}".format(name, flavor_id)
2052 )
2053 else:
2054 try:
2055 flavor = vimconnector.flavorlist[flavor_id]
2056 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
2057 vm_memory = flavor[FLAVOR_RAM_KEY]
2058 vm_disk = flavor[FLAVOR_DISK_KEY]
2059 extended = flavor.get("extended", None)
2060
2061 if extended:
2062 numas = extended.get("numas", None)
2063 except Exception as exp:
2064 raise vimconn.VimConnException(
2065 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
2066 )
2067
2068 # image upload creates template name as catalog name space Template.
2069 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
2070 # power_on = 'false'
2071 # if start:
2072 # power_on = 'true'
2073
2074 # client must provide at least one entry in net_list if not we report error
2075 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
2076 # If no mgmt, then the 1st NN in netlist is considered as primary net.
2077 primary_net = None
2078 primary_netname = None
2079 primary_net_href = None
2080 # network_mode = 'bridged'
2081 if net_list is not None and len(net_list) > 0:
2082 for net in net_list:
2083 if "use" in net and net["use"] == "mgmt" and not primary_net:
2084 primary_net = net
2085
2086 if primary_net is None:
2087 primary_net = net_list[0]
2088
2089 try:
2090 primary_net_id = primary_net["net_id"]
2091 url_list = [self.url, "/api/network/", primary_net_id]
2092 primary_net_href = "".join(url_list)
2093 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
2094
2095 if "name" in network_dict:
2096 primary_netname = network_dict["name"]
2097 except KeyError:
2098 raise vimconn.VimConnException(
2099 "Corrupted flavor. {}".format(primary_net)
2100 )
2101 else:
2102 raise vimconn.VimConnUnexpectedResponse(
2103 "new_vminstance(): Failed network list is empty."
2104 )
2105
2106 # use: 'data', 'bridge', 'mgmt'
2107 # create vApp. Set vcpu and ram based on flavor id.
2108 try:
2109 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
2110 if not vdc_obj:
2111 raise vimconn.VimConnNotFoundException(
2112 "new_vminstance(): Failed to get VDC object"
2113 )
2114
2115 for retry in (1, 2):
2116 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
2117 catalog_items = [items.attrib]
2118
2119 if len(catalog_items) == 1:
2120 if self.client:
2121 headers = {
2122 "Accept": "application/*+xml;version=" + API_VERSION,
2123 "x-vcloud-authorization": self.client._session.headers[
2124 "x-vcloud-authorization"
2125 ],
2126 }
2127
2128 response = self.perform_request(
2129 req_type="GET",
2130 url=catalog_items[0].get("href"),
2131 headers=headers,
2132 )
2133 catalogItem = XmlElementTree.fromstring(response.text)
2134 entity = [
2135 child
2136 for child in catalogItem
2137 if child.get("type")
2138 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
2139 ][0]
2140 vapp_tempalte_href = entity.get("href")
2141
2142 response = self.perform_request(
2143 req_type="GET", url=vapp_tempalte_href, headers=headers
2144 )
2145
2146 if response.status_code != requests.codes.ok:
2147 self.logger.debug(
2148 "REST API call {} failed. Return status code {}".format(
2149 vapp_tempalte_href, response.status_code
2150 )
2151 )
2152 else:
2153 result = (response.text).replace("\n", " ")
2154
2155 vapp_template_tree = XmlElementTree.fromstring(response.text)
2156 children_element = [
2157 child for child in vapp_template_tree if "Children" in child.tag
2158 ][0]
2159 vm_element = [child for child in children_element if "Vm" in child.tag][
2160 0
2161 ]
2162 vm_name = vm_element.get("name")
2163 vm_id = vm_element.get("id")
2164 vm_href = vm_element.get("href")
2165
2166 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
2167 # result).group(1)
2168 memory_mb = re.search(
2169 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
2170 result,
2171 ).group(1)
2172 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
2173
2174 headers[
2175 "Content-Type"
2176 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
2177 vdc_id = vdc.get("id").split(":")[-1]
2178 instantiate_vapp_href = (
2179 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
2180 self.url, vdc_id
2181 )
2182 )
2183
2184 with open(
2185 os.path.join(
2186 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2187 ),
2188 "r",
2189 ) as f:
2190 template = f.read()
2191
2192 data = template.format(
2193 vmname_andid,
2194 primary_netname,
2195 primary_net_href,
2196 vapp_tempalte_href,
2197 vm_href,
2198 vm_id,
2199 vm_name,
2200 primary_netname,
2201 cpu=vm_cpus,
2202 core=1,
2203 memory=vm_memory,
2204 )
2205
2206 response = self.perform_request(
2207 req_type="POST",
2208 url=instantiate_vapp_href,
2209 headers=headers,
2210 data=data,
2211 )
2212
2213 if response.status_code != 201:
2214 self.logger.error(
2215 "REST call {} failed reason : {}"
2216 "status code : {}".format(
2217 instantiate_vapp_href, response.text, response.status_code
2218 )
2219 )
2220 raise vimconn.VimConnException(
2221 "new_vminstance(): Failed to create"
2222 "vAapp {}".format(vmname_andid)
2223 )
2224 else:
2225 vapptask = self.get_task_from_response(response.text)
2226
2227 if vapptask is None and retry == 1:
2228 self.get_token() # Retry getting token
2229 continue
2230 else:
2231 break
2232
2233 if vapptask is None or vapptask is False:
2234 raise vimconn.VimConnUnexpectedResponse(
2235 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2236 )
2237
2238 # wait for task to complete
2239 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2240
2241 if result.get("status") == "success":
2242 self.logger.debug(
2243 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2244 )
2245 else:
2246 raise vimconn.VimConnUnexpectedResponse(
2247 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2248 )
2249 except Exception as exp:
2250 raise vimconn.VimConnUnexpectedResponse(
2251 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2252 vmname_andid, exp
2253 )
2254 )
2255
2256 # we should have now vapp in undeployed state.
2257 try:
2258 vdc_obj = VDC(self.client, href=vdc.get("href"))
2259 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2260 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2261 vapp = VApp(self.client, resource=vapp_resource)
2262 except Exception as exp:
2263 raise vimconn.VimConnUnexpectedResponse(
2264 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2265 vmname_andid, exp
2266 )
2267 )
2268
2269 if vapp_uuid is None:
2270 raise vimconn.VimConnUnexpectedResponse(
2271 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2272 vmname_andid
2273 )
2274 )
2275
2276 # Add PCI passthrough/SRIOV configrations
2277 pci_devices_info = []
2278 reserve_memory = False
2279
2280 for net in net_list:
2281 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2282 pci_devices_info.append(net)
2283 elif (
2284 net["type"] == "VF"
2285 or net["type"] == "SR-IOV"
2286 or net["type"] == "VFnotShared"
2287 ) and "net_id" in net:
2288 reserve_memory = True
2289
2290 # Add PCI
2291 if len(pci_devices_info) > 0:
2292 self.logger.info(
2293 "Need to add PCI devices {} into VM {}".format(
2294 pci_devices_info, vmname_andid
2295 )
2296 )
2297 PCI_devices_status, _, _ = self.add_pci_devices(
2298 vapp_uuid, pci_devices_info, vmname_andid
2299 )
2300
2301 if PCI_devices_status:
2302 self.logger.info(
2303 "Added PCI devives {} to VM {}".format(
2304 pci_devices_info, vmname_andid
2305 )
2306 )
2307 reserve_memory = True
2308 else:
2309 self.logger.info(
2310 "Fail to add PCI devives {} to VM {}".format(
2311 pci_devices_info, vmname_andid
2312 )
2313 )
2314
2315 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2316 self.add_serial_device(vapp_uuid)
2317
2318 if vm_disk:
2319 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2320 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2321 if result:
2322 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2323
2324 # Add new or existing disks to vApp
2325 if disk_list:
2326 added_existing_disk = False
2327 for disk in disk_list:
2328 if "device_type" in disk and disk["device_type"] == "cdrom":
2329 image_id = disk["image_id"]
2330 # Adding CD-ROM to VM
2331 # will revisit code once specification ready to support this feature
2332 self.insert_media_to_vm(vapp, image_id)
2333 elif "image_id" in disk and disk["image_id"] is not None:
2334 self.logger.debug(
2335 "Adding existing disk from image {} to vm {} ".format(
2336 disk["image_id"], vapp_uuid
2337 )
2338 )
2339 self.add_existing_disk(
2340 catalogs=catalogs,
2341 image_id=disk["image_id"],
2342 size=disk["size"],
2343 template_name=templateName,
2344 vapp_uuid=vapp_uuid,
2345 )
2346 added_existing_disk = True
2347 else:
2348 # Wait till added existing disk gets reflected into vCD database/API
2349 if added_existing_disk:
2350 time.sleep(5)
2351 added_existing_disk = False
2352 self.add_new_disk(vapp_uuid, disk["size"])
2353
2354 if numas:
2355 # Assigning numa affinity setting
2356 for numa in numas:
2357 if "paired-threads-id" in numa:
2358 paired_threads_id = numa["paired-threads-id"]
2359 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2360
2361 # add NICs & connect to networks in netlist
2362 try:
2363 vdc_obj = VDC(self.client, href=vdc.get("href"))
2364 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2365 vapp = VApp(self.client, resource=vapp_resource)
2366 vapp_id = vapp_resource.get("id").split(":")[-1]
2367
2368 self.logger.info("Removing primary NIC: ")
2369 # First remove all NICs so that NIC properties can be adjusted as needed
2370 self.remove_primary_network_adapter_from_all_vms(vapp)
2371
2372 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2373 primary_nic_index = 0
2374 nicIndex = 0
2375 for net in net_list:
2376 # openmano uses network id in UUID format.
2377 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2378 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2379 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2380
2381 if "net_id" not in net:
2382 continue
2383
2384 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2385 # Same will be returned in refresh_vms_status() as vim_interface_id
2386 net["vim_id"] = net[
2387 "net_id"
2388 ] # Provide the same VIM identifier as the VIM network
2389
2390 interface_net_id = net["net_id"]
2391 interface_net_name = self.get_network_name_by_id(
2392 network_uuid=interface_net_id
2393 )
2394 interface_network_mode = net["use"]
2395
2396 if interface_network_mode == "mgmt":
2397 primary_nic_index = nicIndex
2398
2399 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2400 - DHCP (The IP address is obtained from a DHCP service.)
2401 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2402 - NONE (No IP addressing mode specified.)"""
2403
2404 if primary_netname is not None:
2405 self.logger.debug(
2406 "new_vminstance(): Filtering by net name {}".format(
2407 interface_net_name
2408 )
2409 )
2410 nets = [
2411 n
2412 for n in self.get_network_list()
2413 if n.get("name") == interface_net_name
2414 ]
2415
2416 if len(nets) == 1:
2417 self.logger.info(
2418 "new_vminstance(): Found requested network: {}".format(
2419 nets[0].get("name")
2420 )
2421 )
2422
2423 if interface_net_name != primary_netname:
2424 # connect network to VM - with all DHCP by default
2425 self.logger.info(
2426 "new_vminstance(): Attaching net {} to vapp".format(
2427 interface_net_name
2428 )
2429 )
2430 self.connect_vapp_to_org_vdc_network(
2431 vapp_id, nets[0].get("name")
2432 )
2433
2434 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2435 nic_type = "VMXNET3"
2436 if "type" in net and net["type"] not in type_list:
2437 # fetching nic type from vnf
2438 if "model" in net:
2439 if net["model"] is not None:
2440 if (
2441 net["model"].lower() == "paravirt"
2442 or net["model"].lower() == "virtio"
2443 ):
2444 nic_type = "VMXNET3"
2445 else:
2446 nic_type = net["model"]
2447
2448 self.logger.info(
2449 "new_vminstance(): adding network adapter "
2450 "to a network {}".format(nets[0].get("name"))
2451 )
2452 self.add_network_adapter_to_vms(
2453 vapp,
2454 nets[0].get("name"),
2455 primary_nic_index,
2456 nicIndex,
2457 net,
2458 nic_type=nic_type,
2459 )
2460 else:
2461 self.logger.info(
2462 "new_vminstance(): adding network adapter "
2463 "to a network {}".format(nets[0].get("name"))
2464 )
2465
2466 if net["type"] in ["SR-IOV", "VF"]:
2467 nic_type = net["type"]
2468 self.add_network_adapter_to_vms(
2469 vapp,
2470 nets[0].get("name"),
2471 primary_nic_index,
2472 nicIndex,
2473 net,
2474 nic_type=nic_type,
2475 )
2476 nicIndex += 1
2477
2478 # cloud-init for ssh-key injection
2479 if cloud_config:
2480 # Create a catalog which will be carrying the config drive ISO
2481 # This catalog is deleted during vApp deletion. The catalog name carries
2482 # vApp UUID and thats how it gets identified during its deletion.
2483 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2484 self.logger.info(
2485 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2486 config_drive_catalog_name
2487 )
2488 )
2489 config_drive_catalog_id = self.create_vimcatalog(
2490 org, config_drive_catalog_name
2491 )
2492
2493 if config_drive_catalog_id is None:
2494 error_msg = (
2495 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2496 "ISO".format(config_drive_catalog_name)
2497 )
2498 raise Exception(error_msg)
2499
2500 # Create config-drive ISO
2501 _, userdata = self._create_user_data(cloud_config)
2502 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2503 iso_path = self.create_config_drive_iso(userdata)
2504 self.logger.debug(
2505 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2506 iso_path
2507 )
2508 )
2509
2510 self.logger.info(
2511 "new_vminstance(): uploading iso to catalog {}".format(
2512 config_drive_catalog_name
2513 )
2514 )
2515 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2516 # Attach the config-drive ISO to the VM
2517 self.logger.info(
2518 "new_vminstance(): Attaching the config-drive ISO to the VM"
2519 )
2520 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2521 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2522
2523 # If VM has PCI devices or SRIOV reserve memory for VM
2524 if reserve_memory:
2525 self.reserve_memory_for_all_vms(vapp, memory_mb)
2526
2527 self.logger.debug(
2528 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2529 )
2530
2531 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2532 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2533 if result.get("status") == "success":
2534 self.logger.info(
2535 "new_vminstance(): Successfully power on "
2536 "vApp {}".format(vmname_andid)
2537 )
2538 else:
2539 self.logger.error(
2540 "new_vminstance(): failed to power on vApp "
2541 "{}".format(vmname_andid)
2542 )
2543
2544 except Exception as exp:
2545 try:
2546 self.delete_vminstance(vapp_uuid)
2547 except Exception as exp2:
2548 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2549 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2550 self.logger.error(
2551 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2552 name, exp
2553 )
2554 )
2555 raise vimconn.VimConnException(
2556 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2557 name, exp
2558 )
2559 )
2560 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2561 wait_time = 0
2562 vapp_uuid = None
2563 while wait_time <= MAX_WAIT_TIME:
2564 try:
2565 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2566 vapp = VApp(self.client, resource=vapp_resource)
2567 except Exception as exp:
2568 raise vimconn.VimConnUnexpectedResponse(
2569 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2570 vmname_andid, exp
2571 )
2572 )
2573
2574 # if vapp and vapp.me.deployed:
2575 if vapp and vapp_resource.get("deployed") == "true":
2576 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2577 break
2578 else:
2579 self.logger.debug(
2580 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2581 )
2582 time.sleep(INTERVAL_TIME)
2583
2584 wait_time += INTERVAL_TIME
2585
2586 # SET Affinity Rule for VM
2587 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2588 # While creating VIM account user has to pass the Host Group names in availability_zone list
2589 # "availability_zone" is a part of VIM "config" parameters
2590 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2591 # Host groups are referred as availability zones
2592 # With following procedure, deployed VM will be added into a VM group.
2593 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2594 if availability_zone_list:
2595 self.logger.debug(
2596 "Existing Host Groups in VIM {}".format(
2597 self.config.get("availability_zone")
2598 )
2599 )
2600 # Admin access required for creating Affinity rules
2601 client = self.connect_as_admin()
2602
2603 if not client:
2604 raise vimconn.VimConnConnectionException(
2605 "Failed to connect vCD as admin"
2606 )
2607 else:
2608 self.client = client
2609
2610 if self.client:
2611 headers = {
2612 "Accept": "application/*+xml;version=27.0",
2613 "x-vcloud-authorization": self.client._session.headers[
2614 "x-vcloud-authorization"
2615 ],
2616 }
2617
2618 # Step1: Get provider vdc details from organization
2619 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2620 if pvdc_href is not None:
2621 # Step2: Found required pvdc, now get resource pool information
2622 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2623 if respool_href is None:
2624 # Raise error if respool_href not found
2625 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2626 pvdc_href
2627 )
2628 self.log_message(msg)
2629
2630 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2631 # get availability Zone
2632 vm_az = self.get_vm_availability_zone(
2633 availability_zone_index, availability_zone_list
2634 )
2635
2636 # check if provided av zone(hostGroup) is present in vCD VIM
2637 status = self.check_availibility_zone(vm_az, respool_href, headers)
2638 if status is False:
2639 msg = (
2640 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2641 "resource pool {} status: {}"
2642 ).format(vm_az, respool_href, status)
2643 self.log_message(msg)
2644 else:
2645 self.logger.debug(
2646 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2647 )
2648
2649 # Step4: Find VM group references to create vm group
2650 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2651 if vmgrp_href is None:
2652 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2653 self.log_message(msg)
2654
2655 # Step5: Create a VmGroup with name az_VmGroup
2656 vmgrp_name = (
2657 vm_az + "_" + name
2658 ) # Formed VM Group name = Host Group name + VM name
2659 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2660 if status is not True:
2661 msg = "new_vminstance(): Error in creating VM group {}".format(
2662 vmgrp_name
2663 )
2664 self.log_message(msg)
2665
2666 # VM Group url to add vms to vm group
2667 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2668
2669 # Step6: Add VM to VM Group
2670 # Find VM uuid from vapp_uuid
2671 vm_details = self.get_vapp_details_rest(vapp_uuid)
2672 vm_uuid = vm_details["vmuuid"]
2673
2674 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2675 if status is not True:
2676 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2677 vmgrp_name
2678 )
2679 self.log_message(msg)
2680
2681 # Step7: Create VM to Host affinity rule
2682 addrule_href = self.get_add_rule_reference(respool_href, headers)
2683 if addrule_href is None:
2684 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2685 respool_href
2686 )
2687 self.log_message(msg)
2688
2689 status = self.create_vm_to_host_affinity_rule(
2690 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2691 )
2692 if status is False:
2693 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2694 name, vm_az
2695 )
2696 self.log_message(msg)
2697 else:
2698 self.logger.debug(
2699 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2700 name, vm_az
2701 )
2702 )
2703 # Reset token to a normal user to perform other operations
2704 self.get_token()
2705
2706 if vapp_uuid is not None:
2707 return vapp_uuid, None
2708 else:
2709 raise vimconn.VimConnUnexpectedResponse(
2710 "new_vminstance(): Failed create new vm instance {}".format(name)
2711 )
2712
2713 def create_config_drive_iso(self, user_data):
2714 tmpdir = tempfile.mkdtemp()
2715 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2716 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2717 os.makedirs(latest_dir)
2718 with open(
2719 os.path.join(latest_dir, "meta_data.json"), "w"
2720 ) as meta_file_obj, open(
2721 os.path.join(latest_dir, "user_data"), "w"
2722 ) as userdata_file_obj:
2723 userdata_file_obj.write(user_data)
2724 meta_file_obj.write(
2725 json.dumps(
2726 {
2727 "availability_zone": "nova",
2728 "launch_index": 0,
2729 "name": "ConfigDrive",
2730 "uuid": str(uuid.uuid4()),
2731 }
2732 )
2733 )
2734 genisoimage_cmd = (
2735 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2736 iso_path=iso_path, source_dir_path=tmpdir
2737 )
2738 )
2739 self.logger.info(
2740 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2741 genisoimage_cmd
2742 )
2743 )
2744
2745 try:
2746 FNULL = open(os.devnull, "w")
2747 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2748 except subprocess.CalledProcessError as e:
2749 shutil.rmtree(tmpdir, ignore_errors=True)
2750 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2751 e
2752 )
2753 self.logger.error(error_msg)
2754 raise Exception(error_msg)
2755
2756 return iso_path
2757
2758 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2759 if not os.path.isfile(iso_file_path):
2760 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2761 iso_file_path
2762 )
2763 self.logger.error(error_msg)
2764 raise Exception(error_msg)
2765
2766 iso_file_stat = os.stat(iso_file_path)
2767 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2768 <Media
2769 xmlns="http://www.vmware.com/vcloud/v1.5"
2770 name="{iso_name}"
2771 size="{iso_size}"
2772 imageType="iso">
2773 <Description>ISO image for config-drive</Description>
2774 </Media>""".format(
2775 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2776 )
2777 headers = {
2778 "Accept": "application/*+xml;version=" + API_VERSION,
2779 "x-vcloud-authorization": self.client._session.headers[
2780 "x-vcloud-authorization"
2781 ],
2782 }
2783 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2784 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2785 response = self.perform_request(
2786 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2787 )
2788
2789 if response.status_code != 201:
2790 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2791 catalog_href
2792 )
2793 self.logger.error(error_msg)
2794 raise Exception(error_msg)
2795
2796 catalogItem = XmlElementTree.fromstring(response.text)
2797 entity = [
2798 child
2799 for child in catalogItem
2800 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2801 ][0]
2802 entity_href = entity.get("href")
2803
2804 response = self.perform_request(
2805 req_type="GET", url=entity_href, headers=headers
2806 )
2807 if response.status_code != 200:
2808 raise Exception(
2809 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2810 entity_href
2811 )
2812 )
2813
2814 match = re.search(
2815 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2816 response.text,
2817 re.DOTALL,
2818 )
2819 if match:
2820 media_upload_href = match.group(1)
2821 else:
2822 raise Exception(
2823 "Could not parse the upload URL for the media file from the last response"
2824 )
2825 upload_iso_task = self.get_task_from_response(response.text)
2826 headers["Content-Type"] = "application/octet-stream"
2827 response = self.perform_request(
2828 req_type="PUT",
2829 url=media_upload_href,
2830 headers=headers,
2831 data=open(iso_file_path, "rb"),
2832 )
2833
2834 if response.status_code != 200:
2835 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2836
2837 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2838 if result.get("status") != "success":
2839 raise Exception(
2840 "The upload iso task failed with status {}".format(result.get("status"))
2841 )
2842
2843 def get_vcd_availibility_zones(self, respool_href, headers):
2844 """Method to find presence of av zone is VIM resource pool
2845
2846 Args:
2847 respool_href - resource pool href
2848 headers - header information
2849
2850 Returns:
2851 vcd_az - list of azone present in vCD
2852 """
2853 vcd_az = []
2854 url = respool_href
2855 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2856
2857 if resp.status_code != requests.codes.ok:
2858 self.logger.debug(
2859 "REST API call {} failed. Return status code {}".format(
2860 url, resp.status_code
2861 )
2862 )
2863 else:
2864 # Get the href to hostGroups and find provided hostGroup is present in it
2865 resp_xml = XmlElementTree.fromstring(resp.content)
2866 for child in resp_xml:
2867 if "VMWProviderVdcResourcePool" in child.tag:
2868 for schild in child:
2869 if "Link" in schild.tag:
2870 if (
2871 schild.attrib.get("type")
2872 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
2873 ):
2874 hostGroup = schild.attrib.get("href")
2875 hg_resp = self.perform_request(
2876 req_type="GET", url=hostGroup, headers=headers
2877 )
2878
2879 if hg_resp.status_code != requests.codes.ok:
2880 self.logger.debug(
2881 "REST API call {} failed. Return status code {}".format(
2882 hostGroup, hg_resp.status_code
2883 )
2884 )
2885 else:
2886 hg_resp_xml = XmlElementTree.fromstring(
2887 hg_resp.content
2888 )
2889 for hostGroup in hg_resp_xml:
2890 if "HostGroup" in hostGroup.tag:
2891 # append host group name to the list
2892 vcd_az.append(hostGroup.attrib.get("name"))
2893
2894 return vcd_az
2895
2896 def set_availability_zones(self):
2897 """
2898 Set vim availability zone
2899 """
2900 vim_availability_zones = None
2901 availability_zone = None
2902
2903 if "availability_zone" in self.config:
2904 vim_availability_zones = self.config.get("availability_zone")
2905
2906 if isinstance(vim_availability_zones, str):
2907 availability_zone = [vim_availability_zones]
2908 elif isinstance(vim_availability_zones, list):
2909 availability_zone = vim_availability_zones
2910 else:
2911 return availability_zone
2912
2913 return availability_zone
2914
2915 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2916 """
2917 Return the availability zone to be used by the created VM.
2918 returns: The VIM availability zone to be used or None
2919 """
2920 if availability_zone_index is None:
2921 if not self.config.get("availability_zone"):
2922 return None
2923 elif isinstance(self.config.get("availability_zone"), str):
2924 return self.config["availability_zone"]
2925 else:
2926 return self.config["availability_zone"][0]
2927
2928 vim_availability_zones = self.availability_zone
2929
2930 # check if VIM offer enough availability zones describe in the VNFD
2931 if vim_availability_zones and len(availability_zone_list) <= len(
2932 vim_availability_zones
2933 ):
2934 # check if all the names of NFV AV match VIM AV names
2935 match_by_index = False
2936 for av in availability_zone_list:
2937 if av not in vim_availability_zones:
2938 match_by_index = True
2939 break
2940
2941 if match_by_index:
2942 self.logger.debug(
2943 "Required Availability zone or Host Group not found in VIM config"
2944 )
2945 self.logger.debug(
2946 "Input Availability zone list: {}".format(availability_zone_list)
2947 )
2948 self.logger.debug(
2949 "VIM configured Availability zones: {}".format(
2950 vim_availability_zones
2951 )
2952 )
2953 self.logger.debug("VIM Availability zones will be used by index")
2954 return vim_availability_zones[availability_zone_index]
2955 else:
2956 return availability_zone_list[availability_zone_index]
2957 else:
2958 raise vimconn.VimConnConflictException(
2959 "No enough availability zones at VIM for this deployment"
2960 )
2961
2962 def create_vm_to_host_affinity_rule(
2963 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2964 ):
2965 """Method to create VM to Host Affinity rule in vCD
2966
2967 Args:
2968 addrule_href - href to make a POST request
2969 vmgrpname - name of the VM group created
2970 hostgrpnmae - name of the host group created earlier
2971 polarity - Affinity or Anti-affinity (default: Affinity)
2972 headers - headers to make REST call
2973
2974 Returns:
2975 True- if rule is created
2976 False- Failed to create rule due to some error
2977
2978 """
2979 task_status = False
2980 rule_name = polarity + "_" + vmgrpname
2981 payload = """<?xml version="1.0" encoding="UTF-8"?>
2982 <vmext:VMWVmHostAffinityRule
2983 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2984 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2985 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2986 <vcloud:Name>{}</vcloud:Name>
2987 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2988 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2989 <vcloud:Polarity>{}</vcloud:Polarity>
2990 <vmext:HostGroupName>{}</vmext:HostGroupName>
2991 <vmext:VmGroupName>{}</vmext:VmGroupName>
2992 </vmext:VMWVmHostAffinityRule>""".format(
2993 rule_name, polarity, hostgrpname, vmgrpname
2994 )
2995
2996 resp = self.perform_request(
2997 req_type="POST", url=addrule_href, headers=headers, data=payload
2998 )
2999
3000 if resp.status_code != requests.codes.accepted:
3001 self.logger.debug(
3002 "REST API call {} failed. Return status code {}".format(
3003 addrule_href, resp.status_code
3004 )
3005 )
3006 task_status = False
3007
3008 return task_status
3009 else:
3010 affinity_task = self.get_task_from_response(resp.content)
3011 self.logger.debug("affinity_task: {}".format(affinity_task))
3012
3013 if affinity_task is None or affinity_task is False:
3014 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
3015 # wait for task to complete
3016 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
3017
3018 if result.get("status") == "success":
3019 self.logger.debug(
3020 "Successfully created affinity rule {}".format(rule_name)
3021 )
3022 return True
3023 else:
3024 raise vimconn.VimConnUnexpectedResponse(
3025 "failed to create affinity rule {}".format(rule_name)
3026 )
3027
3028 def get_add_rule_reference(self, respool_href, headers):
3029 """This method finds href to add vm to host affinity rule to vCD
3030
3031 Args:
3032 respool_href- href to resource pool
3033 headers- header information to make REST call
3034
3035 Returns:
3036 None - if no valid href to add rule found or
3037 addrule_href - href to add vm to host affinity rule of resource pool
3038 """
3039 addrule_href = None
3040 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3041
3042 if resp.status_code != requests.codes.ok:
3043 self.logger.debug(
3044 "REST API call {} failed. Return status code {}".format(
3045 respool_href, resp.status_code
3046 )
3047 )
3048 else:
3049 resp_xml = XmlElementTree.fromstring(resp.content)
3050 for child in resp_xml:
3051 if "VMWProviderVdcResourcePool" in child.tag:
3052 for schild in child:
3053 if "Link" in schild.tag:
3054 if (
3055 schild.attrib.get("type")
3056 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
3057 and schild.attrib.get("rel") == "add"
3058 ):
3059 addrule_href = schild.attrib.get("href")
3060 break
3061
3062 return addrule_href
3063
3064 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
3065 """Method to add deployed VM to newly created VM Group.
3066 This is required to create VM to Host affinity in vCD
3067
3068 Args:
3069 vm_uuid- newly created vm uuid
3070 vmGroupNameURL- URL to VM Group name
3071 vmGroup_name- Name of VM group created
3072 headers- Headers for REST request
3073
3074 Returns:
3075 True- if VM added to VM group successfully
3076 False- if any error encounter
3077 """
3078 addvm_resp = self.perform_request(
3079 req_type="GET", url=vmGroupNameURL, headers=headers
3080 ) # , data=payload)
3081
3082 if addvm_resp.status_code != requests.codes.ok:
3083 self.logger.debug(
3084 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
3085 vmGroupNameURL, addvm_resp.status_code
3086 )
3087 )
3088 return False
3089 else:
3090 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
3091 for child in resp_xml:
3092 if child.tag.split("}")[1] == "Link":
3093 if child.attrib.get("rel") == "addVms":
3094 addvmtogrpURL = child.attrib.get("href")
3095
3096 # Get vm details
3097 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
3098 vmdetailsURL = "".join(url_list)
3099
3100 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
3101
3102 if resp.status_code != requests.codes.ok:
3103 self.logger.debug(
3104 "REST API call {} failed. Return status code {}".format(
3105 vmdetailsURL, resp.status_code
3106 )
3107 )
3108 return False
3109
3110 # Parse VM details
3111 resp_xml = XmlElementTree.fromstring(resp.content)
3112 if resp_xml.tag.split("}")[1] == "Vm":
3113 vm_id = resp_xml.attrib.get("id")
3114 vm_name = resp_xml.attrib.get("name")
3115 vm_href = resp_xml.attrib.get("href")
3116 # print vm_id, vm_name, vm_href
3117
3118 # Add VM into VMgroup
3119 payload = """<?xml version="1.0" encoding="UTF-8"?>\
3120 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
3121 xmlns="http://www.vmware.com/vcloud/versions" \
3122 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
3123 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
3124 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
3125 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
3126 xmlns:ns7="http://www.vmware.com/schema/ovf" \
3127 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
3128 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
3129 <ns2:VmReference href="{}" id="{}" name="{}" \
3130 type="application/vnd.vmware.vcloud.vm+xml" />\
3131 </ns2:Vms>""".format(
3132 vm_href, vm_id, vm_name
3133 )
3134
3135 addvmtogrp_resp = self.perform_request(
3136 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
3137 )
3138
3139 if addvmtogrp_resp.status_code != requests.codes.accepted:
3140 self.logger.debug(
3141 "REST API call {} failed. Return status code {}".format(
3142 addvmtogrpURL, addvmtogrp_resp.status_code
3143 )
3144 )
3145
3146 return False
3147 else:
3148 self.logger.debug(
3149 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
3150 )
3151
3152 return True
3153
3154 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
3155 """Method to create a VM group in vCD
3156
3157 Args:
3158 vmgroup_name : Name of VM group to be created
3159 vmgroup_href : href for vmgroup
3160 headers- Headers for REST request
3161 """
3162 # POST to add URL with required data
3163 vmgroup_status = False
3164 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
3165 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
3166 <vmCount>1</vmCount>\
3167 </VMWVmGroup>""".format(
3168 vmgroup_name
3169 )
3170 resp = self.perform_request(
3171 req_type="POST", url=vmgroup_href, headers=headers, data=payload
3172 )
3173
3174 if resp.status_code != requests.codes.accepted:
3175 self.logger.debug(
3176 "REST API call {} failed. Return status code {}".format(
3177 vmgroup_href, resp.status_code
3178 )
3179 )
3180
3181 return vmgroup_status
3182 else:
3183 vmgroup_task = self.get_task_from_response(resp.content)
3184 if vmgroup_task is None or vmgroup_task is False:
3185 raise vimconn.VimConnUnexpectedResponse(
3186 "create_vmgroup(): failed to create VM group {}".format(
3187 vmgroup_name
3188 )
3189 )
3190
3191 # wait for task to complete
3192 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
3193
3194 if result.get("status") == "success":
3195 self.logger.debug(
3196 "create_vmgroup(): Successfully created VM group {}".format(
3197 vmgroup_name
3198 )
3199 )
3200 # time.sleep(10)
3201 vmgroup_status = True
3202
3203 return vmgroup_status
3204 else:
3205 raise vimconn.VimConnUnexpectedResponse(
3206 "create_vmgroup(): failed to create VM group {}".format(
3207 vmgroup_name
3208 )
3209 )
3210
3211 def find_vmgroup_reference(self, url, headers):
3212 """Method to create a new VMGroup which is required to add created VM
3213 Args:
3214 url- resource pool href
3215 headers- header information
3216
3217 Returns:
3218 returns href to VM group to create VM group
3219 """
3220 # Perform GET on resource pool to find 'add' link to create VMGroup
3221 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
3222 vmgrp_href = None
3223 resp = self.perform_request(req_type="GET", url=url, headers=headers)
3224
3225 if resp.status_code != requests.codes.ok:
3226 self.logger.debug(
3227 "REST API call {} failed. Return status code {}".format(
3228 url, resp.status_code
3229 )
3230 )
3231 else:
3232 # Get the href to add vmGroup to vCD
3233 resp_xml = XmlElementTree.fromstring(resp.content)
3234 for child in resp_xml:
3235 if "VMWProviderVdcResourcePool" in child.tag:
3236 for schild in child:
3237 if "Link" in schild.tag:
3238 # Find href with type VMGroup and rel with add
3239 if (
3240 schild.attrib.get("type")
3241 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3242 and schild.attrib.get("rel") == "add"
3243 ):
3244 vmgrp_href = schild.attrib.get("href")
3245
3246 return vmgrp_href
3247
3248 def check_availibility_zone(self, az, respool_href, headers):
3249 """Method to verify requested av zone is present or not in provided
3250 resource pool
3251
3252 Args:
3253 az - name of hostgroup (availibility_zone)
3254 respool_href - Resource Pool href
3255 headers - Headers to make REST call
3256 Returns:
3257 az_found - True if availibility_zone is found else False
3258 """
3259 az_found = False
3260 headers["Accept"] = "application/*+xml;version=27.0"
3261 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3262
3263 if resp.status_code != requests.codes.ok:
3264 self.logger.debug(
3265 "REST API call {} failed. Return status code {}".format(
3266 respool_href, resp.status_code
3267 )
3268 )
3269 else:
3270 # Get the href to hostGroups and find provided hostGroup is present in it
3271 resp_xml = XmlElementTree.fromstring(resp.content)
3272
3273 for child in resp_xml:
3274 if "VMWProviderVdcResourcePool" in child.tag:
3275 for schild in child:
3276 if "Link" in schild.tag:
3277 if (
3278 schild.attrib.get("type")
3279 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3280 ):
3281 hostGroup_href = schild.attrib.get("href")
3282 hg_resp = self.perform_request(
3283 req_type="GET", url=hostGroup_href, headers=headers
3284 )
3285
3286 if hg_resp.status_code != requests.codes.ok:
3287 self.logger.debug(
3288 "REST API call {} failed. Return status code {}".format(
3289 hostGroup_href, hg_resp.status_code
3290 )
3291 )
3292 else:
3293 hg_resp_xml = XmlElementTree.fromstring(
3294 hg_resp.content
3295 )
3296 for hostGroup in hg_resp_xml:
3297 if "HostGroup" in hostGroup.tag:
3298 if hostGroup.attrib.get("name") == az:
3299 az_found = True
3300 break
3301
3302 return az_found
3303
3304 def get_pvdc_for_org(self, org_vdc, headers):
3305 """This method gets provider vdc references from organisation
3306
3307 Args:
3308 org_vdc - name of the organisation VDC to find pvdc
3309 headers - headers to make REST call
3310
3311 Returns:
3312 None - if no pvdc href found else
3313 pvdc_href - href to pvdc
3314 """
3315 # Get provider VDC references from vCD
3316 pvdc_href = None
3317 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3318 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3319 url = "".join(url_list)
3320
3321 response = self.perform_request(req_type="GET", url=url, headers=headers)
3322 if response.status_code != requests.codes.ok:
3323 self.logger.debug(
3324 "REST API call {} failed. Return status code {}".format(
3325 url, response.status_code
3326 )
3327 )
3328 else:
3329 xmlroot_response = XmlElementTree.fromstring(response.text)
3330 for child in xmlroot_response:
3331 if "ProviderVdcReference" in child.tag:
3332 pvdc_href = child.attrib.get("href")
3333 # Get vdcReferences to find org
3334 pvdc_resp = self.perform_request(
3335 req_type="GET", url=pvdc_href, headers=headers
3336 )
3337
3338 if pvdc_resp.status_code != requests.codes.ok:
3339 raise vimconn.VimConnException(
3340 "REST API call {} failed. "
3341 "Return status code {}".format(url, pvdc_resp.status_code)
3342 )
3343
3344 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3345 for child in pvdc_resp_xml:
3346 if "Link" in child.tag:
3347 if (
3348 child.attrib.get("type")
3349 == "application/vnd.vmware.admin.vdcReferences+xml"
3350 ):
3351 vdc_href = child.attrib.get("href")
3352
3353 # Check if provided org is present in vdc
3354 vdc_resp = self.perform_request(
3355 req_type="GET", url=vdc_href, headers=headers
3356 )
3357
3358 if vdc_resp.status_code != requests.codes.ok:
3359 raise vimconn.VimConnException(
3360 "REST API call {} failed. "
3361 "Return status code {}".format(
3362 url, vdc_resp.status_code
3363 )
3364 )
3365 vdc_resp_xml = XmlElementTree.fromstring(
3366 vdc_resp.content
3367 )
3368
3369 for child in vdc_resp_xml:
3370 if "VdcReference" in child.tag:
3371 if child.attrib.get("name") == org_vdc:
3372 return pvdc_href
3373
3374 def get_resource_pool_details(self, pvdc_href, headers):
3375 """Method to get resource pool information.
3376 Host groups are property of resource group.
3377 To get host groups, we need to GET details of resource pool.
3378
3379 Args:
3380 pvdc_href: href to pvdc details
3381 headers: headers
3382
3383 Returns:
3384 respool_href - Returns href link reference to resource pool
3385 """
3386 respool_href = None
3387 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3388
3389 if resp.status_code != requests.codes.ok:
3390 self.logger.debug(
3391 "REST API call {} failed. Return status code {}".format(
3392 pvdc_href, resp.status_code
3393 )
3394 )
3395 else:
3396 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3397 for child in respool_resp_xml:
3398 if "Link" in child.tag:
3399 if (
3400 child.attrib.get("type")
3401 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3402 ):
3403 respool_href = child.attrib.get("href")
3404 break
3405
3406 return respool_href
3407
3408 def log_message(self, msg):
3409 """
3410 Method to log error messages related to Affinity rule creation
3411 in new_vminstance & raise Exception
3412 Args :
3413 msg - Error message to be logged
3414
3415 """
3416 # get token to connect vCD as a normal user
3417 self.get_token()
3418 self.logger.debug(msg)
3419
3420 raise vimconn.VimConnException(msg)
3421
3422 # #
3423 # #
3424 # # based on current discussion
3425 # #
3426 # #
3427 # # server:
3428 # created: '2016-09-08T11:51:58'
3429 # description: simple-instance.linux1.1
3430 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
3431 # hostId: e836c036-74e7-11e6-b249-0800273e724c
3432 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
3433 # status: ACTIVE
3434 # error_msg:
3435 # interfaces: …
3436 #
3437 def get_vminstance(self, vim_vm_uuid=None):
3438 """Returns the VM instance information from VIM"""
3439 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3440
3441 _, vdc = self.get_vdc_details()
3442 if vdc is None:
3443 raise vimconn.VimConnConnectionException(
3444 "Failed to get a reference of VDC for a tenant {}".format(
3445 self.tenant_name
3446 )
3447 )
3448
3449 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3450 if not vm_info_dict:
3451 self.logger.debug(
3452 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3453 vim_vm_uuid
3454 )
3455 )
3456 raise vimconn.VimConnNotFoundException(
3457 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3458 )
3459
3460 status_key = vm_info_dict["status"]
3461 error = ""
3462 try:
3463 vm_dict = {
3464 "created": vm_info_dict["created"],
3465 "description": vm_info_dict["name"],
3466 "status": vcdStatusCode2manoFormat[int(status_key)],
3467 "hostId": vm_info_dict["vmuuid"],
3468 "error_msg": error,
3469 "vim_info": yaml.safe_dump(vm_info_dict),
3470 "interfaces": [],
3471 }
3472
3473 if "interfaces" in vm_info_dict:
3474 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3475 else:
3476 vm_dict["interfaces"] = []
3477 except KeyError:
3478 vm_dict = {
3479 "created": "",
3480 "description": "",
3481 "status": vcdStatusCode2manoFormat[int(-1)],
3482 "hostId": vm_info_dict["vmuuid"],
3483 "error_msg": "Inconsistency state",
3484 "vim_info": yaml.safe_dump(vm_info_dict),
3485 "interfaces": [],
3486 }
3487
3488 return vm_dict
3489
3490 def delete_vminstance(self, vm__vim_uuid, created_items=None):
3491 """Method poweroff and remove VM instance from vcloud director network.
3492
3493 Args:
3494 vm__vim_uuid: VM UUID
3495
3496 Returns:
3497 Returns the instance identifier
3498 """
3499 self.logger.debug(
3500 "Client requesting delete vm instance {} ".format(vm__vim_uuid)
3501 )
3502
3503 _, vdc = self.get_vdc_details()
3504 vdc_obj = VDC(self.client, href=vdc.get("href"))
3505 if vdc_obj is None:
3506 self.logger.debug(
3507 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3508 self.tenant_name
3509 )
3510 )
3511 raise vimconn.VimConnException(
3512 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3513 self.tenant_name
3514 )
3515 )
3516
3517 try:
3518 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3519 if vapp_name is None:
3520 self.logger.debug(
3521 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3522 vm__vim_uuid
3523 )
3524 )
3525
3526 return (
3527 -1,
3528 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3529 vm__vim_uuid
3530 ),
3531 )
3532
3533 self.logger.info(
3534 "Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
3535 )
3536 vapp_resource = vdc_obj.get_vapp(vapp_name)
3537 vapp = VApp(self.client, resource=vapp_resource)
3538
3539 # Delete vApp and wait for status change if task executed and vApp is None.
3540 if vapp:
3541 if vapp_resource.get("deployed") == "true":
3542 self.logger.info("Powering off vApp {}".format(vapp_name))
3543 # Power off vApp
3544 powered_off = False
3545 wait_time = 0
3546
3547 while wait_time <= MAX_WAIT_TIME:
3548 power_off_task = vapp.power_off()
3549 result = self.client.get_task_monitor().wait_for_success(
3550 task=power_off_task
3551 )
3552
3553 if result.get("status") == "success":
3554 powered_off = True
3555 break
3556 else:
3557 self.logger.info(
3558 "Wait for vApp {} to power off".format(vapp_name)
3559 )
3560 time.sleep(INTERVAL_TIME)
3561
3562 wait_time += INTERVAL_TIME
3563
3564 if not powered_off:
3565 self.logger.debug(
3566 "delete_vminstance(): Failed to power off VM instance {} ".format(
3567 vm__vim_uuid
3568 )
3569 )
3570 else:
3571 self.logger.info(
3572 "delete_vminstance(): Powered off VM instance {} ".format(
3573 vm__vim_uuid
3574 )
3575 )
3576
3577 # Undeploy vApp
3578 self.logger.info("Undeploy vApp {}".format(vapp_name))
3579 wait_time = 0
3580 undeployed = False
3581 while wait_time <= MAX_WAIT_TIME:
3582 vapp = VApp(self.client, resource=vapp_resource)
3583 if not vapp:
3584 self.logger.debug(
3585 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3586 vm__vim_uuid
3587 )
3588 )
3589
3590 return (
3591 -1,
3592 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3593 vm__vim_uuid
3594 ),
3595 )
3596
3597 undeploy_task = vapp.undeploy()
3598 result = self.client.get_task_monitor().wait_for_success(
3599 task=undeploy_task
3600 )
3601
3602 if result.get("status") == "success":
3603 undeployed = True
3604 break
3605 else:
3606 self.logger.debug(
3607 "Wait for vApp {} to undeploy".format(vapp_name)
3608 )
3609 time.sleep(INTERVAL_TIME)
3610
3611 wait_time += INTERVAL_TIME
3612
3613 if not undeployed:
3614 self.logger.debug(
3615 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3616 vm__vim_uuid
3617 )
3618 )
3619
3620 # delete vapp
3621 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3622 if vapp is not None:
3623 wait_time = 0
3624 result = False
3625
3626 while wait_time <= MAX_WAIT_TIME:
3627 vapp = VApp(self.client, resource=vapp_resource)
3628 if not vapp:
3629 self.logger.debug(
3630 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3631 vm__vim_uuid
3632 )
3633 )
3634
3635 return (
3636 -1,
3637 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3638 vm__vim_uuid
3639 ),
3640 )
3641
3642 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3643 result = self.client.get_task_monitor().wait_for_success(
3644 task=delete_task
3645 )
3646 if result.get("status") == "success":
3647 break
3648 else:
3649 self.logger.debug(
3650 "Wait for vApp {} to delete".format(vapp_name)
3651 )
3652 time.sleep(INTERVAL_TIME)
3653
3654 wait_time += INTERVAL_TIME
3655
3656 if result is None:
3657 self.logger.debug(
3658 "delete_vminstance(): Failed delete uuid {} ".format(
3659 vm__vim_uuid
3660 )
3661 )
3662 else:
3663 self.logger.info(
3664 "Deleted vm instance {} sccessfully".format(vm__vim_uuid)
3665 )
3666 config_drive_catalog_name, config_drive_catalog_id = (
3667 "cfg_drv-" + vm__vim_uuid,
3668 None,
3669 )
3670 catalog_list = self.get_image_list()
3671
3672 try:
3673 config_drive_catalog_id = [
3674 catalog_["id"]
3675 for catalog_ in catalog_list
3676 if catalog_["name"] == config_drive_catalog_name
3677 ][0]
3678 except IndexError:
3679 pass
3680
3681 if config_drive_catalog_id:
3682 self.logger.debug(
3683 "delete_vminstance(): Found a config drive catalog {} matching "
3684 'vapp_name"{}". Deleting it.'.format(
3685 config_drive_catalog_id, vapp_name
3686 )
3687 )
3688 self.delete_image(config_drive_catalog_id)
3689
3690 return vm__vim_uuid
3691 except Exception:
3692 self.logger.debug(traceback.format_exc())
3693
3694 raise vimconn.VimConnException(
3695 "delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid)
3696 )
3697
3698 def refresh_vms_status(self, vm_list):
3699 """Get the status of the virtual machines and their interfaces/ports
3700 Params: the list of VM identifiers
3701 Returns a dictionary with:
3702 vm_id: #VIM id of this Virtual Machine
3703 status: #Mandatory. Text with one of:
3704 # DELETED (not found at vim)
3705 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3706 # OTHER (Vim reported other status not understood)
3707 # ERROR (VIM indicates an ERROR status)
3708 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3709 # CREATING (on building process), ERROR
3710 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3711 #
3712 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3713 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3714 interfaces:
3715 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3716 mac_address: #Text format XX:XX:XX:XX:XX:XX
3717 vim_net_id: #network id where this interface is connected
3718 vim_interface_id: #interface/port VIM id
3719 ip_address: #null, or text with IPv4, IPv6 address
3720 """
3721 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3722
3723 _, vdc = self.get_vdc_details()
3724 if vdc is None:
3725 raise vimconn.VimConnException(
3726 "Failed to get a reference of VDC for a tenant {}".format(
3727 self.tenant_name
3728 )
3729 )
3730
3731 vms_dict = {}
3732 nsx_edge_list = []
3733 for vmuuid in vm_list:
3734 vapp_name = self.get_namebyvappid(vmuuid)
3735 if vapp_name is not None:
3736 try:
3737 vm_pci_details = self.get_vm_pci_details(vmuuid)
3738 vdc_obj = VDC(self.client, href=vdc.get("href"))
3739 vapp_resource = vdc_obj.get_vapp(vapp_name)
3740 the_vapp = VApp(self.client, resource=vapp_resource)
3741
3742 vm_details = {}
3743 for vm in the_vapp.get_all_vms():
3744 headers = {
3745 "Accept": "application/*+xml;version=" + API_VERSION,
3746 "x-vcloud-authorization": self.client._session.headers[
3747 "x-vcloud-authorization"
3748 ],
3749 }
3750 response = self.perform_request(
3751 req_type="GET", url=vm.get("href"), headers=headers
3752 )
3753
3754 if response.status_code != 200:
3755 self.logger.error(
3756 "refresh_vms_status : REST call {} failed reason : {}"
3757 "status code : {}".format(
3758 vm.get("href"), response.text, response.status_code
3759 )
3760 )
3761 raise vimconn.VimConnException(
3762 "refresh_vms_status : Failed to get VM details"
3763 )
3764
3765 xmlroot = XmlElementTree.fromstring(response.text)
3766 result = response.text.replace("\n", " ")
3767 hdd_match = re.search(
3768 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3769 result,
3770 )
3771
3772 if hdd_match:
3773 hdd_mb = hdd_match.group(1)
3774 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3775
3776 cpus_match = re.search(
3777 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3778 result,
3779 )
3780
3781 if cpus_match:
3782 cpus = cpus_match.group(1)
3783 vm_details["cpus"] = int(cpus) if cpus else None
3784
3785 memory_mb = re.search(
3786 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3787 result,
3788 ).group(1)
3789 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3790 vm_details["status"] = vcdStatusCode2manoFormat[
3791 int(xmlroot.get("status"))
3792 ]
3793 vm_details["id"] = xmlroot.get("id")
3794 vm_details["name"] = xmlroot.get("name")
3795 vm_info = [vm_details]
3796
3797 if vm_pci_details:
3798 vm_info[0].update(vm_pci_details)
3799
3800 vm_dict = {
3801 "status": vcdStatusCode2manoFormat[
3802 int(vapp_resource.get("status"))
3803 ],
3804 "error_msg": vcdStatusCode2manoFormat[
3805 int(vapp_resource.get("status"))
3806 ],
3807 "vim_info": yaml.safe_dump(vm_info),
3808 "interfaces": [],
3809 }
3810
3811 # get networks
3812 vm_ip = None
3813 vm_mac = None
3814 networks = re.findall(
3815 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3816 result,
3817 )
3818
3819 for network in networks:
3820 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3821 vm_mac = mac_s.group(1) if mac_s else None
3822 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3823 vm_ip = ip_s.group(1) if ip_s else None
3824
3825 if vm_ip is None:
3826 if not nsx_edge_list:
3827 nsx_edge_list = self.get_edge_details()
3828 if nsx_edge_list is None:
3829 raise vimconn.VimConnException(
3830 "refresh_vms_status:"
3831 "Failed to get edge details from NSX Manager"
3832 )
3833
3834 if vm_mac is not None:
3835 vm_ip = self.get_ipaddr_from_NSXedge(
3836 nsx_edge_list, vm_mac
3837 )
3838
3839 net_s = re.search('network="(.*?)"', network)
3840 network_name = net_s.group(1) if net_s else None
3841 vm_net_id = self.get_network_id_by_name(network_name)
3842 interface = {
3843 "mac_address": vm_mac,
3844 "vim_net_id": vm_net_id,
3845 "vim_interface_id": vm_net_id,
3846 "ip_address": vm_ip,
3847 }
3848 vm_dict["interfaces"].append(interface)
3849
3850 # add a vm to vm dict
3851 vms_dict.setdefault(vmuuid, vm_dict)
3852 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3853 except Exception as exp:
3854 self.logger.debug("Error in response {}".format(exp))
3855 self.logger.debug(traceback.format_exc())
3856
3857 return vms_dict
3858
3859 def get_edge_details(self):
3860 """Get the NSX edge list from NSX Manager
3861 Returns list of NSX edges
3862 """
3863 edge_list = []
3864 rheaders = {"Content-Type": "application/xml"}
3865 nsx_api_url = "/api/4.0/edges"
3866
3867 self.logger.debug(
3868 "Get edge details from NSX Manager {} {}".format(
3869 self.nsx_manager, nsx_api_url
3870 )
3871 )
3872
3873 try:
3874 resp = requests.get(
3875 self.nsx_manager + nsx_api_url,
3876 auth=(self.nsx_user, self.nsx_password),
3877 verify=False,
3878 headers=rheaders,
3879 )
3880 if resp.status_code == requests.codes.ok:
3881 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3882 for edge_pages in paged_Edge_List:
3883 if edge_pages.tag == "edgePage":
3884 for edge_summary in edge_pages:
3885 if edge_summary.tag == "pagingInfo":
3886 for element in edge_summary:
3887 if (
3888 element.tag == "totalCount"
3889 and element.text == "0"
3890 ):
3891 raise vimconn.VimConnException(
3892 "get_edge_details: No NSX edges details found: {}".format(
3893 self.nsx_manager
3894 )
3895 )
3896
3897 if edge_summary.tag == "edgeSummary":
3898 for element in edge_summary:
3899 if element.tag == "id":
3900 edge_list.append(element.text)
3901 else:
3902 raise vimconn.VimConnException(
3903 "get_edge_details: No NSX edge details found: {}".format(
3904 self.nsx_manager
3905 )
3906 )
3907
3908 if not edge_list:
3909 raise vimconn.VimConnException(
3910 "get_edge_details: "
3911 "No NSX edge details found: {}".format(self.nsx_manager)
3912 )
3913 else:
3914 self.logger.debug(
3915 "get_edge_details: Found NSX edges {}".format(edge_list)
3916 )
3917
3918 return edge_list
3919 else:
3920 self.logger.debug(
3921 "get_edge_details: "
3922 "Failed to get NSX edge details from NSX Manager: {}".format(
3923 resp.content
3924 )
3925 )
3926
3927 return None
3928
3929 except Exception as exp:
3930 self.logger.debug(
3931 "get_edge_details: "
3932 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3933 )
3934 raise vimconn.VimConnException(
3935 "get_edge_details: "
3936 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3937 )
3938
3939 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3940 """Get IP address details from NSX edges, using the MAC address
3941 PARAMS: nsx_edges : List of NSX edges
3942 mac_address : Find IP address corresponding to this MAC address
3943 Returns: IP address corrresponding to the provided MAC address
3944 """
3945 ip_addr = None
3946 rheaders = {"Content-Type": "application/xml"}
3947
3948 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3949
3950 try:
3951 for edge in nsx_edges:
3952 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3953
3954 resp = requests.get(
3955 self.nsx_manager + nsx_api_url,
3956 auth=(self.nsx_user, self.nsx_password),
3957 verify=False,
3958 headers=rheaders,
3959 )
3960
3961 if resp.status_code == requests.codes.ok:
3962 dhcp_leases = XmlElementTree.fromstring(resp.text)
3963 for child in dhcp_leases:
3964 if child.tag == "dhcpLeaseInfo":
3965 dhcpLeaseInfo = child
3966 for leaseInfo in dhcpLeaseInfo:
3967 for elem in leaseInfo:
3968 if (elem.tag) == "macAddress":
3969 edge_mac_addr = elem.text
3970
3971 if (elem.tag) == "ipAddress":
3972 ip_addr = elem.text
3973
3974 if edge_mac_addr is not None:
3975 if edge_mac_addr == mac_address:
3976 self.logger.debug(
3977 "Found ip addr {} for mac {} at NSX edge {}".format(
3978 ip_addr, mac_address, edge
3979 )
3980 )
3981
3982 return ip_addr
3983 else:
3984 self.logger.debug(
3985 "get_ipaddr_from_NSXedge: "
3986 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3987 resp.content
3988 )
3989 )
3990
3991 self.logger.debug(
3992 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3993 )
3994
3995 return None
3996
3997 except XmlElementTree.ParseError as Err:
3998 self.logger.debug(
3999 "ParseError in response from NSX Manager {}".format(Err.message),
4000 exc_info=True,
4001 )
4002
4003 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
4004 """Send and action over a VM instance from VIM
4005 Returns the vm_id if the action was successfully sent to the VIM"""
4006
4007 self.logger.debug(
4008 "Received action for vm {} and action dict {}".format(
4009 vm__vim_uuid, action_dict
4010 )
4011 )
4012
4013 if vm__vim_uuid is None or action_dict is None:
4014 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
4015
4016 _, vdc = self.get_vdc_details()
4017 if vdc is None:
4018 raise vimconn.VimConnException(
4019 "Failed to get a reference of VDC for a tenant {}".format(
4020 self.tenant_name
4021 )
4022 )
4023
4024 vapp_name = self.get_namebyvappid(vm__vim_uuid)
4025 if vapp_name is None:
4026 self.logger.debug(
4027 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
4028 vm__vim_uuid
4029 )
4030 )
4031
4032 raise vimconn.VimConnException(
4033 "Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
4034 )
4035 else:
4036 self.logger.info(
4037 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
4038 )
4039
4040 try:
4041 vdc_obj = VDC(self.client, href=vdc.get("href"))
4042 vapp_resource = vdc_obj.get_vapp(vapp_name)
4043 vapp = VApp(self.client, resource=vapp_resource)
4044
4045 if "start" in action_dict:
4046 self.logger.info(
4047 "action_vminstance: Power on vApp: {}".format(vapp_name)
4048 )
4049 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4050 result = self.client.get_task_monitor().wait_for_success(
4051 task=poweron_task
4052 )
4053 self.instance_actions_result("start", result, vapp_name)
4054 elif "rebuild" in action_dict:
4055 self.logger.info(
4056 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
4057 )
4058 rebuild_task = vapp.deploy(power_on=True)
4059 result = self.client.get_task_monitor().wait_for_success(
4060 task=rebuild_task
4061 )
4062 self.instance_actions_result("rebuild", result, vapp_name)
4063 elif "pause" in action_dict:
4064 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
4065 pause_task = vapp.undeploy(action="suspend")
4066 result = self.client.get_task_monitor().wait_for_success(
4067 task=pause_task
4068 )
4069 self.instance_actions_result("pause", result, vapp_name)
4070 elif "resume" in action_dict:
4071 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
4072 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4073 result = self.client.get_task_monitor().wait_for_success(
4074 task=poweron_task
4075 )
4076 self.instance_actions_result("resume", result, vapp_name)
4077 elif "shutoff" in action_dict or "shutdown" in action_dict:
4078 action_name, _ = list(action_dict.items())[0]
4079 self.logger.info(
4080 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
4081 )
4082 shutdown_task = vapp.shutdown()
4083 result = self.client.get_task_monitor().wait_for_success(
4084 task=shutdown_task
4085 )
4086 if action_name == "shutdown":
4087 self.instance_actions_result("shutdown", result, vapp_name)
4088 else:
4089 self.instance_actions_result("shutoff", result, vapp_name)
4090 elif "forceOff" in action_dict:
4091 result = vapp.undeploy(action="powerOff")
4092 self.instance_actions_result("forceOff", result, vapp_name)
4093 elif "reboot" in action_dict:
4094 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
4095 reboot_task = vapp.reboot()
4096 self.client.get_task_monitor().wait_for_success(task=reboot_task)
4097 else:
4098 raise vimconn.VimConnException(
4099 "action_vminstance: Invalid action {} or action is None.".format(
4100 action_dict
4101 )
4102 )
4103
4104 return vm__vim_uuid
4105 except Exception as exp:
4106 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
4107
4108 raise vimconn.VimConnException(
4109 "action_vminstance: Failed with Exception {}".format(exp)
4110 )
4111
4112 def instance_actions_result(self, action, result, vapp_name):
4113 if result.get("status") == "success":
4114 self.logger.info(
4115 "action_vminstance: Sucessfully {} the vApp: {}".format(
4116 action, vapp_name
4117 )
4118 )
4119 else:
4120 self.logger.error(
4121 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
4122 )
4123
4124 def get_vminstance_console(self, vm_id, console_type="novnc"):
4125 """
4126 Get a console for the virtual machine
4127 Params:
4128 vm_id: uuid of the VM
4129 console_type, can be:
4130 "novnc" (by default), "xvpvnc" for VNC types,
4131 "rdp-html5" for RDP types, "spice-html5" for SPICE types
4132 Returns dict with the console parameters:
4133 protocol: ssh, ftp, http, https, ...
4134 server: usually ip address
4135 port: the http, ssh, ... port
4136 suffix: extra text, e.g. the http path and query string
4137 """
4138 console_dict = {}
4139
4140 if console_type is None or console_type == "novnc":
4141 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
4142 self.url, vm_id
4143 )
4144 headers = {
4145 "Accept": "application/*+xml;version=" + API_VERSION,
4146 "x-vcloud-authorization": self.client._session.headers[
4147 "x-vcloud-authorization"
4148 ],
4149 }
4150 response = self.perform_request(
4151 req_type="POST", url=url_rest_call, headers=headers
4152 )
4153
4154 if response.status_code == 403:
4155 response = self.retry_rest("GET", url_rest_call)
4156
4157 if response.status_code != 200:
4158 self.logger.error(
4159 "REST call {} failed reason : {}"
4160 "status code : {}".format(
4161 url_rest_call, response.text, response.status_code
4162 )
4163 )
4164 raise vimconn.VimConnException(
4165 "get_vminstance_console : Failed to get " "VM Mks ticket details"
4166 )
4167
4168 s = re.search("<Host>(.*?)</Host>", response.text)
4169 console_dict["server"] = s.group(1) if s else None
4170 s1 = re.search("<Port>(\d+)</Port>", response.text)
4171 console_dict["port"] = s1.group(1) if s1 else None
4172 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
4173 self.url, vm_id
4174 )
4175 headers = {
4176 "Accept": "application/*+xml;version=" + API_VERSION,
4177 "x-vcloud-authorization": self.client._session.headers[
4178 "x-vcloud-authorization"
4179 ],
4180 }
4181 response = self.perform_request(
4182 req_type="POST", url=url_rest_call, headers=headers
4183 )
4184
4185 if response.status_code == 403:
4186 response = self.retry_rest("GET", url_rest_call)
4187
4188 if response.status_code != 200:
4189 self.logger.error(
4190 "REST call {} failed reason : {}"
4191 "status code : {}".format(
4192 url_rest_call, response.text, response.status_code
4193 )
4194 )
4195 raise vimconn.VimConnException(
4196 "get_vminstance_console : Failed to get " "VM console details"
4197 )
4198
4199 s = re.search(">.*?/(vm-\d+.*)</", response.text)
4200 console_dict["suffix"] = s.group(1) if s else None
4201 console_dict["protocol"] = "https"
4202
4203 return console_dict
4204
4205 # NOT USED METHODS in current version
4206
4207 def host_vim2gui(self, host, server_dict):
4208 """Transform host dictionary from VIM format to GUI format,
4209 and append to the server_dict
4210 """
4211 raise vimconn.VimConnNotImplemented("Should have implemented this")
4212
4213 def get_hosts_info(self):
4214 """Get the information of deployed hosts
4215 Returns the hosts content"""
4216 raise vimconn.VimConnNotImplemented("Should have implemented this")
4217
4218 def get_hosts(self, vim_tenant):
4219 """Get the hosts and deployed instances
4220 Returns the hosts content"""
4221 raise vimconn.VimConnNotImplemented("Should have implemented this")
4222
4223 def get_processor_rankings(self):
4224 """Get the processor rankings in the VIM database"""
4225 raise vimconn.VimConnNotImplemented("Should have implemented this")
4226
4227 def new_host(self, host_data):
4228 """Adds a new host to VIM"""
4229 """Returns status code of the VIM response"""
4230 raise vimconn.VimConnNotImplemented("Should have implemented this")
4231
4232 def new_external_port(self, port_data):
4233 """Adds a external port to VIM"""
4234 """Returns the port identifier"""
4235 raise vimconn.VimConnNotImplemented("Should have implemented this")
4236
4237 def new_external_network(self, net_name, net_type):
4238 """Adds a external network to VIM (shared)"""
4239 """Returns the network identifier"""
4240 raise vimconn.VimConnNotImplemented("Should have implemented this")
4241
4242 def connect_port_network(self, port_id, network_id, admin=False):
4243 """Connects a external port to a network"""
4244 """Returns status code of the VIM response"""
4245 raise vimconn.VimConnNotImplemented("Should have implemented this")
4246
4247 def new_vminstancefromJSON(self, vm_data):
4248 """Adds a VM instance to VIM"""
4249 """Returns the instance identifier"""
4250 raise vimconn.VimConnNotImplemented("Should have implemented this")
4251
4252 def get_network_name_by_id(self, network_uuid=None):
4253 """Method gets vcloud director network named based on supplied uuid.
4254
4255 Args:
4256 network_uuid: network_id
4257
4258 Returns:
4259 The return network name.
4260 """
4261
4262 if not network_uuid:
4263 return None
4264
4265 try:
4266 org_dict = self.get_org(self.org_uuid)
4267 if "networks" in org_dict:
4268 org_network_dict = org_dict["networks"]
4269
4270 for net_uuid in org_network_dict:
4271 if net_uuid == network_uuid:
4272 return org_network_dict[net_uuid]
4273 except Exception:
4274 self.logger.debug("Exception in get_network_name_by_id")
4275 self.logger.debug(traceback.format_exc())
4276
4277 return None
4278
4279 def get_network_id_by_name(self, network_name=None):
4280 """Method gets vcloud director network uuid based on supplied name.
4281
4282 Args:
4283 network_name: network_name
4284 Returns:
4285 The return network uuid.
4286 network_uuid: network_id
4287 """
4288 if not network_name:
4289 self.logger.debug("get_network_id_by_name() : Network name is empty")
4290 return None
4291
4292 try:
4293 org_dict = self.get_org(self.org_uuid)
4294 if org_dict and "networks" in org_dict:
4295 org_network_dict = org_dict["networks"]
4296
4297 for net_uuid, net_name in org_network_dict.items():
4298 if net_name == network_name:
4299 return net_uuid
4300
4301 except KeyError as exp:
4302 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4303
4304 return None
4305
4306 def get_physical_network_by_name(self, physical_network_name):
4307 """
4308 Methos returns uuid of physical network which passed
4309 Args:
4310 physical_network_name: physical network name
4311 Returns:
4312 UUID of physical_network_name
4313 """
4314 try:
4315 client_as_admin = self.connect_as_admin()
4316
4317 if not client_as_admin:
4318 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4319
4320 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4321 vm_list_rest_call = "".join(url_list)
4322
4323 if client_as_admin._session:
4324 headers = {
4325 "Accept": "application/*+xml;version=" + API_VERSION,
4326 "x-vcloud-authorization": client_as_admin._session.headers[
4327 "x-vcloud-authorization"
4328 ],
4329 }
4330 response = self.perform_request(
4331 req_type="GET", url=vm_list_rest_call, headers=headers
4332 )
4333 provider_network = None
4334 available_network = None
4335 # add_vdc_rest_url = None
4336
4337 if response.status_code != requests.codes.ok:
4338 self.logger.debug(
4339 "REST API call {} failed. Return status code {}".format(
4340 vm_list_rest_call, response.status_code
4341 )
4342 )
4343 return None
4344 else:
4345 try:
4346 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4347 for child in vm_list_xmlroot:
4348 if child.tag.split("}")[1] == "ProviderVdcReference":
4349 provider_network = child.attrib.get("href")
4350 # application/vnd.vmware.admin.providervdc+xml
4351
4352 if child.tag.split("}")[1] == "Link":
4353 if (
4354 child.attrib.get("type")
4355 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4356 and child.attrib.get("rel") == "add"
4357 ):
4358 child.attrib.get("href")
4359 except Exception:
4360 self.logger.debug(
4361 "Failed parse respond for rest api call {}".format(
4362 vm_list_rest_call
4363 )
4364 )
4365 self.logger.debug("Respond body {}".format(response.text))
4366
4367 return None
4368
4369 # find pvdc provided available network
4370 response = self.perform_request(
4371 req_type="GET", url=provider_network, headers=headers
4372 )
4373
4374 if response.status_code != requests.codes.ok:
4375 self.logger.debug(
4376 "REST API call {} failed. Return status code {}".format(
4377 vm_list_rest_call, response.status_code
4378 )
4379 )
4380
4381 return None
4382
4383 try:
4384 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4385 for child in vm_list_xmlroot.iter():
4386 if child.tag.split("}")[1] == "AvailableNetworks":
4387 for networks in child.iter():
4388 if (
4389 networks.attrib.get("href") is not None
4390 and networks.attrib.get("name") is not None
4391 ):
4392 if (
4393 networks.attrib.get("name")
4394 == physical_network_name
4395 ):
4396 network_url = networks.attrib.get("href")
4397 available_network = network_url[
4398 network_url.rindex("/") + 1 :
4399 ]
4400 break
4401 except Exception:
4402 return None
4403
4404 return available_network
4405 except Exception as e:
4406 self.logger.error("Error while getting physical network: {}".format(e))
4407
4408 def list_org_action(self):
4409 """
4410 Method leverages vCloud director and query for available organization for particular user
4411
4412 Args:
4413 vca - is active VCA connection.
4414 vdc_name - is a vdc name that will be used to query vms action
4415
4416 Returns:
4417 The return XML respond
4418 """
4419 url_list = [self.url, "/api/org"]
4420 vm_list_rest_call = "".join(url_list)
4421
4422 if self.client._session:
4423 headers = {
4424 "Accept": "application/*+xml;version=" + API_VERSION,
4425 "x-vcloud-authorization": self.client._session.headers[
4426 "x-vcloud-authorization"
4427 ],
4428 }
4429
4430 response = self.perform_request(
4431 req_type="GET", url=vm_list_rest_call, headers=headers
4432 )
4433
4434 if response.status_code == 403:
4435 response = self.retry_rest("GET", vm_list_rest_call)
4436
4437 if response.status_code == requests.codes.ok:
4438 return response.text
4439
4440 return None
4441
4442 def get_org_action(self, org_uuid=None):
4443 """
4444 Method leverages vCloud director and retrieve available object for organization.
4445
4446 Args:
4447 org_uuid - vCD organization uuid
4448 self.client - is active connection.
4449
4450 Returns:
4451 The return XML respond
4452 """
4453
4454 if org_uuid is None:
4455 return None
4456
4457 url_list = [self.url, "/api/org/", org_uuid]
4458 vm_list_rest_call = "".join(url_list)
4459
4460 if self.client._session:
4461 headers = {
4462 "Accept": "application/*+xml;version=" + API_VERSION,
4463 "x-vcloud-authorization": self.client._session.headers[
4464 "x-vcloud-authorization"
4465 ],
4466 }
4467
4468 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4469 response = self.perform_request(
4470 req_type="GET", url=vm_list_rest_call, headers=headers
4471 )
4472
4473 if response.status_code == 403:
4474 response = self.retry_rest("GET", vm_list_rest_call)
4475
4476 if response.status_code == requests.codes.ok:
4477 return response.text
4478
4479 return None
4480
4481 def get_org(self, org_uuid=None):
4482 """
4483 Method retrieves available organization in vCloud Director
4484
4485 Args:
4486 org_uuid - is a organization uuid.
4487
4488 Returns:
4489 The return dictionary with following key
4490 "network" - for network list under the org
4491 "catalogs" - for network list under the org
4492 "vdcs" - for vdc list under org
4493 """
4494
4495 org_dict = {}
4496
4497 if org_uuid is None:
4498 return org_dict
4499
4500 content = self.get_org_action(org_uuid=org_uuid)
4501 try:
4502 vdc_list = {}
4503 network_list = {}
4504 catalog_list = {}
4505 vm_list_xmlroot = XmlElementTree.fromstring(content)
4506 for child in vm_list_xmlroot:
4507 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4508 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4509 "name"
4510 ]
4511 org_dict["vdcs"] = vdc_list
4512
4513 if (
4514 child.attrib["type"]
4515 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4516 ):
4517 network_list[
4518 child.attrib["href"].split("/")[-1:][0]
4519 ] = child.attrib["name"]
4520 org_dict["networks"] = network_list
4521
4522 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4523 catalog_list[
4524 child.attrib["href"].split("/")[-1:][0]
4525 ] = child.attrib["name"]
4526 org_dict["catalogs"] = catalog_list
4527 except Exception:
4528 pass
4529
4530 return org_dict
4531
4532 def get_org_list(self):
4533 """
4534 Method retrieves available organization in vCloud Director
4535
4536 Args:
4537 vca - is active VCA connection.
4538
4539 Returns:
4540 The return dictionary and key for each entry VDC UUID
4541 """
4542 org_dict = {}
4543
4544 content = self.list_org_action()
4545 try:
4546 vm_list_xmlroot = XmlElementTree.fromstring(content)
4547
4548 for vm_xml in vm_list_xmlroot:
4549 if vm_xml.tag.split("}")[1] == "Org":
4550 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4551 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4552 except Exception:
4553 pass
4554
4555 return org_dict
4556
4557 def vms_view_action(self, vdc_name=None):
4558 """Method leverages vCloud director vms query call
4559
4560 Args:
4561 vca - is active VCA connection.
4562 vdc_name - is a vdc name that will be used to query vms action
4563
4564 Returns:
4565 The return XML respond
4566 """
4567 vca = self.connect()
4568 if vdc_name is None:
4569 return None
4570
4571 url_list = [vca.host, "/api/vms/query"]
4572 vm_list_rest_call = "".join(url_list)
4573
4574 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4575 refs = [
4576 ref
4577 for ref in vca.vcloud_session.organization.Link
4578 if ref.name == vdc_name
4579 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4580 ]
4581
4582 if len(refs) == 1:
4583 response = self.perform_request(
4584 req_type="GET",
4585 url=vm_list_rest_call,
4586 headers=vca.vcloud_session.get_vcloud_headers(),
4587 verify=vca.verify,
4588 logger=vca.logger,
4589 )
4590
4591 if response.status_code == requests.codes.ok:
4592 return response.text
4593
4594 return None
4595
4596 def get_vapp_list(self, vdc_name=None):
4597 """
4598 Method retrieves vApp list deployed vCloud director and returns a dictionary
4599 contains a list of all vapp deployed for queried VDC.
4600 The key for a dictionary is vApp UUID
4601
4602
4603 Args:
4604 vca - is active VCA connection.
4605 vdc_name - is a vdc name that will be used to query vms action
4606
4607 Returns:
4608 The return dictionary and key for each entry vapp UUID
4609 """
4610 vapp_dict = {}
4611
4612 if vdc_name is None:
4613 return vapp_dict
4614
4615 content = self.vms_view_action(vdc_name=vdc_name)
4616 try:
4617 vm_list_xmlroot = XmlElementTree.fromstring(content)
4618 for vm_xml in vm_list_xmlroot:
4619 if vm_xml.tag.split("}")[1] == "VMRecord":
4620 if vm_xml.attrib["isVAppTemplate"] == "true":
4621 rawuuid = vm_xml.attrib["container"].split("/")[-1:]
4622 if "vappTemplate-" in rawuuid[0]:
4623 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4624 # vm and use raw UUID as key
4625 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
4626 except Exception:
4627 pass
4628
4629 return vapp_dict
4630
4631 def get_vm_list(self, vdc_name=None):
4632 """
4633 Method retrieves VM's list deployed vCloud director. It returns a dictionary
4634 contains a list of all VM's deployed for queried VDC.
4635 The key for a dictionary is VM UUID
4636
4637
4638 Args:
4639 vca - is active VCA connection.
4640 vdc_name - is a vdc name that will be used to query vms action
4641
4642 Returns:
4643 The return dictionary and key for each entry vapp UUID
4644 """
4645 vm_dict = {}
4646
4647 if vdc_name is None:
4648 return vm_dict
4649
4650 content = self.vms_view_action(vdc_name=vdc_name)
4651 try:
4652 vm_list_xmlroot = XmlElementTree.fromstring(content)
4653 for vm_xml in vm_list_xmlroot:
4654 if vm_xml.tag.split("}")[1] == "VMRecord":
4655 if vm_xml.attrib["isVAppTemplate"] == "false":
4656 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4657 if "vm-" in rawuuid[0]:
4658 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4659 # vm and use raw UUID as key
4660 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4661 except Exception:
4662 pass
4663
4664 return vm_dict
4665
4666 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4667 """
4668 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4669 contains a list of all VM's deployed for queried VDC.
4670 The key for a dictionary is VM UUID
4671
4672
4673 Args:
4674 vca - is active VCA connection.
4675 vdc_name - is a vdc name that will be used to query vms action
4676
4677 Returns:
4678 The return dictionary and key for each entry vapp UUID
4679 """
4680 vm_dict = {}
4681 vca = self.connect()
4682
4683 if not vca:
4684 raise vimconn.VimConnConnectionException("self.connect() is failed")
4685
4686 if vdc_name is None:
4687 return vm_dict
4688
4689 content = self.vms_view_action(vdc_name=vdc_name)
4690 try:
4691 vm_list_xmlroot = XmlElementTree.fromstring(content)
4692 for vm_xml in vm_list_xmlroot:
4693 if (
4694 vm_xml.tag.split("}")[1] == "VMRecord"
4695 and vm_xml.attrib["isVAppTemplate"] == "false"
4696 ):
4697 # lookup done by UUID
4698 if isuuid:
4699 if vapp_name in vm_xml.attrib["container"]:
4700 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4701 if "vm-" in rawuuid[0]:
4702 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4703 break
4704 # lookup done by Name
4705 else:
4706 if vapp_name in vm_xml.attrib["name"]:
4707 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4708 if "vm-" in rawuuid[0]:
4709 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4710 break
4711 except Exception:
4712 pass
4713
4714 return vm_dict
4715
4716 def get_network_action(self, network_uuid=None):
4717 """
4718 Method leverages vCloud director and query network based on network uuid
4719
4720 Args:
4721 vca - is active VCA connection.
4722 network_uuid - is a network uuid
4723
4724 Returns:
4725 The return XML respond
4726 """
4727 if network_uuid is None:
4728 return None
4729
4730 url_list = [self.url, "/api/network/", network_uuid]
4731 vm_list_rest_call = "".join(url_list)
4732
4733 if self.client._session:
4734 headers = {
4735 "Accept": "application/*+xml;version=" + API_VERSION,
4736 "x-vcloud-authorization": self.client._session.headers[
4737 "x-vcloud-authorization"
4738 ],
4739 }
4740 response = self.perform_request(
4741 req_type="GET", url=vm_list_rest_call, headers=headers
4742 )
4743
4744 # Retry login if session expired & retry sending request
4745 if response.status_code == 403:
4746 response = self.retry_rest("GET", vm_list_rest_call)
4747
4748 if response.status_code == requests.codes.ok:
4749 return response.text
4750
4751 return None
4752
4753 def get_vcd_network(self, network_uuid=None):
4754 """
4755 Method retrieves available network from vCloud Director
4756
4757 Args:
4758 network_uuid - is VCD network UUID
4759
4760 Each element serialized as key : value pair
4761
4762 Following keys available for access. network_configuration['Gateway'}
4763 <Configuration>
4764 <IpScopes>
4765 <IpScope>
4766 <IsInherited>true</IsInherited>
4767 <Gateway>172.16.252.100</Gateway>
4768 <Netmask>255.255.255.0</Netmask>
4769 <Dns1>172.16.254.201</Dns1>
4770 <Dns2>172.16.254.202</Dns2>
4771 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4772 <IsEnabled>true</IsEnabled>
4773 <IpRanges>
4774 <IpRange>
4775 <StartAddress>172.16.252.1</StartAddress>
4776 <EndAddress>172.16.252.99</EndAddress>
4777 </IpRange>
4778 </IpRanges>
4779 </IpScope>
4780 </IpScopes>
4781 <FenceMode>bridged</FenceMode>
4782
4783 Returns:
4784 The return dictionary and key for each entry vapp UUID
4785 """
4786 network_configuration = {}
4787
4788 if network_uuid is None:
4789 return network_uuid
4790
4791 try:
4792 content = self.get_network_action(network_uuid=network_uuid)
4793 if content is not None:
4794 vm_list_xmlroot = XmlElementTree.fromstring(content)
4795 network_configuration["status"] = vm_list_xmlroot.get("status")
4796 network_configuration["name"] = vm_list_xmlroot.get("name")
4797 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4798
4799 for child in vm_list_xmlroot:
4800 if child.tag.split("}")[1] == "IsShared":
4801 network_configuration["isShared"] = child.text.strip()
4802
4803 if child.tag.split("}")[1] == "Configuration":
4804 for configuration in child.iter():
4805 tagKey = configuration.tag.split("}")[1].strip()
4806 if tagKey != "":
4807 network_configuration[
4808 tagKey
4809 ] = configuration.text.strip()
4810 except Exception as exp:
4811 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4812
4813 raise vimconn.VimConnException(
4814 "get_vcd_network: Failed with Exception {}".format(exp)
4815 )
4816
4817 return network_configuration
4818
4819 def delete_network_action(self, network_uuid=None):
4820 """
4821 Method delete given network from vCloud director
4822
4823 Args:
4824 network_uuid - is a network uuid that client wish to delete
4825
4826 Returns:
4827 The return None or XML respond or false
4828 """
4829 client = self.connect_as_admin()
4830
4831 if not client:
4832 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4833
4834 if network_uuid is None:
4835 return False
4836
4837 url_list = [self.url, "/api/admin/network/", network_uuid]
4838 vm_list_rest_call = "".join(url_list)
4839
4840 if client._session:
4841 headers = {
4842 "Accept": "application/*+xml;version=" + API_VERSION,
4843 "x-vcloud-authorization": client._session.headers[
4844 "x-vcloud-authorization"
4845 ],
4846 }
4847 response = self.perform_request(
4848 req_type="DELETE", url=vm_list_rest_call, headers=headers
4849 )
4850
4851 if response.status_code == 202:
4852 return True
4853
4854 return False
4855
4856 def create_network(
4857 self,
4858 network_name=None,
4859 net_type="bridge",
4860 parent_network_uuid=None,
4861 ip_profile=None,
4862 isshared="true",
4863 ):
4864 """
4865 Method create network in vCloud director
4866
4867 Args:
4868 network_name - is network name to be created.
4869 net_type - can be 'bridge','data','ptp','mgmt'.
4870 ip_profile is a dict containing the IP parameters of the network
4871 isshared - is a boolean
4872 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4873 It optional attribute. by default if no parent network indicate the first available will be used.
4874
4875 Returns:
4876 The return network uuid or return None
4877 """
4878 new_network_name = [network_name, "-", str(uuid.uuid4())]
4879 content = self.create_network_rest(
4880 network_name="".join(new_network_name),
4881 ip_profile=ip_profile,
4882 net_type=net_type,
4883 parent_network_uuid=parent_network_uuid,
4884 isshared=isshared,
4885 )
4886
4887 if content is None:
4888 self.logger.debug("Failed create network {}.".format(network_name))
4889
4890 return None
4891
4892 try:
4893 vm_list_xmlroot = XmlElementTree.fromstring(content)
4894 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4895 if len(vcd_uuid) == 4:
4896 self.logger.info(
4897 "Created new network name: {} uuid: {}".format(
4898 network_name, vcd_uuid[3]
4899 )
4900 )
4901
4902 return vcd_uuid[3]
4903 except Exception:
4904 self.logger.debug("Failed create network {}".format(network_name))
4905
4906 return None
4907
4908 def create_network_rest(
4909 self,
4910 network_name=None,
4911 net_type="bridge",
4912 parent_network_uuid=None,
4913 ip_profile=None,
4914 isshared="true",
4915 ):
4916 """
4917 Method create network in vCloud director
4918
4919 Args:
4920 network_name - is network name to be created.
4921 net_type - can be 'bridge','data','ptp','mgmt'.
4922 ip_profile is a dict containing the IP parameters of the network
4923 isshared - is a boolean
4924 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4925 It optional attribute. by default if no parent network indicate the first available will be used.
4926
4927 Returns:
4928 The return network uuid or return None
4929 """
4930 client_as_admin = self.connect_as_admin()
4931
4932 if not client_as_admin:
4933 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4934
4935 if network_name is None:
4936 return None
4937
4938 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4939 vm_list_rest_call = "".join(url_list)
4940
4941 if client_as_admin._session:
4942 headers = {
4943 "Accept": "application/*+xml;version=" + API_VERSION,
4944 "x-vcloud-authorization": client_as_admin._session.headers[
4945 "x-vcloud-authorization"
4946 ],
4947 }
4948 response = self.perform_request(
4949 req_type="GET", url=vm_list_rest_call, headers=headers
4950 )
4951 provider_network = None
4952 available_networks = None
4953 add_vdc_rest_url = None
4954
4955 if response.status_code != requests.codes.ok:
4956 self.logger.debug(
4957 "REST API call {} failed. Return status code {}".format(
4958 vm_list_rest_call, response.status_code
4959 )
4960 )
4961
4962 return None
4963 else:
4964 try:
4965 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4966 for child in vm_list_xmlroot:
4967 if child.tag.split("}")[1] == "ProviderVdcReference":
4968 provider_network = child.attrib.get("href")
4969 # application/vnd.vmware.admin.providervdc+xml
4970
4971 if child.tag.split("}")[1] == "Link":
4972 if (
4973 child.attrib.get("type")
4974 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4975 and child.attrib.get("rel") == "add"
4976 ):
4977 add_vdc_rest_url = child.attrib.get("href")
4978 except Exception:
4979 self.logger.debug(
4980 "Failed parse respond for rest api call {}".format(
4981 vm_list_rest_call
4982 )
4983 )
4984 self.logger.debug("Respond body {}".format(response.text))
4985
4986 return None
4987
4988 # find pvdc provided available network
4989 response = self.perform_request(
4990 req_type="GET", url=provider_network, headers=headers
4991 )
4992
4993 if response.status_code != requests.codes.ok:
4994 self.logger.debug(
4995 "REST API call {} failed. Return status code {}".format(
4996 vm_list_rest_call, response.status_code
4997 )
4998 )
4999
5000 return None
5001
5002 if parent_network_uuid is None:
5003 try:
5004 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5005 for child in vm_list_xmlroot.iter():
5006 if child.tag.split("}")[1] == "AvailableNetworks":
5007 for networks in child.iter():
5008 # application/vnd.vmware.admin.network+xml
5009 if networks.attrib.get("href") is not None:
5010 available_networks = networks.attrib.get("href")
5011 break
5012 except Exception:
5013 return None
5014
5015 try:
5016 # Configure IP profile of the network
5017 ip_profile = (
5018 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
5019 )
5020
5021 if (
5022 "subnet_address" not in ip_profile
5023 or ip_profile["subnet_address"] is None
5024 ):
5025 subnet_rand = random.randint(0, 255)
5026 ip_base = "192.168.{}.".format(subnet_rand)
5027 ip_profile["subnet_address"] = ip_base + "0/24"
5028 else:
5029 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
5030
5031 if (
5032 "gateway_address" not in ip_profile
5033 or ip_profile["gateway_address"] is None
5034 ):
5035 ip_profile["gateway_address"] = ip_base + "1"
5036
5037 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
5038 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
5039
5040 if (
5041 "dhcp_enabled" not in ip_profile
5042 or ip_profile["dhcp_enabled"] is None
5043 ):
5044 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
5045
5046 if (
5047 "dhcp_start_address" not in ip_profile
5048 or ip_profile["dhcp_start_address"] is None
5049 ):
5050 ip_profile["dhcp_start_address"] = ip_base + "3"
5051
5052 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
5053 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
5054
5055 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
5056 ip_profile["dns_address"] = ip_base + "2"
5057
5058 gateway_address = ip_profile["gateway_address"]
5059 dhcp_count = int(ip_profile["dhcp_count"])
5060 subnet_address = self.convert_cidr_to_netmask(
5061 ip_profile["subnet_address"]
5062 )
5063
5064 if ip_profile["dhcp_enabled"] is True:
5065 dhcp_enabled = "true"
5066 else:
5067 dhcp_enabled = "false"
5068
5069 dhcp_start_address = ip_profile["dhcp_start_address"]
5070
5071 # derive dhcp_end_address from dhcp_start_address & dhcp_count
5072 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
5073 end_ip_int += dhcp_count - 1
5074 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
5075
5076 # ip_version = ip_profile['ip_version']
5077 dns_address = ip_profile["dns_address"]
5078 except KeyError as exp:
5079 self.logger.debug("Create Network REST: Key error {}".format(exp))
5080
5081 raise vimconn.VimConnException(
5082 "Create Network REST: Key error{}".format(exp)
5083 )
5084
5085 # either use client provided UUID or search for a first available
5086 # if both are not defined we return none
5087 if parent_network_uuid is not None:
5088 provider_network = None
5089 available_networks = None
5090 add_vdc_rest_url = None
5091 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
5092 add_vdc_rest_url = "".join(url_list)
5093 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
5094 available_networks = "".join(url_list)
5095
5096 # Creating all networks as Direct Org VDC type networks.
5097 # Unused in case of Underlay (data/ptp) network interface.
5098 fence_mode = "isolated"
5099 is_inherited = "false"
5100 dns_list = dns_address.split(";")
5101 dns1 = dns_list[0]
5102 dns2_text = ""
5103
5104 if len(dns_list) >= 2:
5105 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
5106 dns_list[1]
5107 )
5108
5109 if net_type == "isolated":
5110 fence_mode = "isolated"
5111 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5112 <Description>Openmano created</Description>
5113 <Configuration>
5114 <IpScopes>
5115 <IpScope>
5116 <IsInherited>{1:s}</IsInherited>
5117 <Gateway>{2:s}</Gateway>
5118 <Netmask>{3:s}</Netmask>
5119 <Dns1>{4:s}</Dns1>{5:s}
5120 <IsEnabled>{6:s}</IsEnabled>
5121 <IpRanges>
5122 <IpRange>
5123 <StartAddress>{7:s}</StartAddress>
5124 <EndAddress>{8:s}</EndAddress>
5125 </IpRange>
5126 </IpRanges>
5127 </IpScope>
5128 </IpScopes>
5129 <FenceMode>{9:s}</FenceMode>
5130 </Configuration>
5131 <IsShared>{10:s}</IsShared>
5132 </OrgVdcNetwork> """.format(
5133 escape(network_name),
5134 is_inherited,
5135 gateway_address,
5136 subnet_address,
5137 dns1,
5138 dns2_text,
5139 dhcp_enabled,
5140 dhcp_start_address,
5141 dhcp_end_address,
5142 fence_mode,
5143 isshared,
5144 )
5145 else:
5146 fence_mode = "bridged"
5147 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5148 <Description>Openmano created</Description>
5149 <Configuration>
5150 <IpScopes>
5151 <IpScope>
5152 <IsInherited>{1:s}</IsInherited>
5153 <Gateway>{2:s}</Gateway>
5154 <Netmask>{3:s}</Netmask>
5155 <Dns1>{4:s}</Dns1>{5:s}
5156 <IsEnabled>{6:s}</IsEnabled>
5157 <IpRanges>
5158 <IpRange>
5159 <StartAddress>{7:s}</StartAddress>
5160 <EndAddress>{8:s}</EndAddress>
5161 </IpRange>
5162 </IpRanges>
5163 </IpScope>
5164 </IpScopes>
5165 <ParentNetwork href="{9:s}"/>
5166 <FenceMode>{10:s}</FenceMode>
5167 </Configuration>
5168 <IsShared>{11:s}</IsShared>
5169 </OrgVdcNetwork> """.format(
5170 escape(network_name),
5171 is_inherited,
5172 gateway_address,
5173 subnet_address,
5174 dns1,
5175 dns2_text,
5176 dhcp_enabled,
5177 dhcp_start_address,
5178 dhcp_end_address,
5179 available_networks,
5180 fence_mode,
5181 isshared,
5182 )
5183
5184 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
5185 try:
5186 response = self.perform_request(
5187 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
5188 )
5189
5190 if response.status_code != 201:
5191 self.logger.debug(
5192 "Create Network POST REST API call failed. "
5193 "Return status code {}, response.text: {}".format(
5194 response.status_code, response.text
5195 )
5196 )
5197 else:
5198 network_task = self.get_task_from_response(response.text)
5199 self.logger.debug(
5200 "Create Network REST : Waiting for Network creation complete"
5201 )
5202 time.sleep(5)
5203 result = self.client.get_task_monitor().wait_for_success(
5204 task=network_task
5205 )
5206
5207 if result.get("status") == "success":
5208 return response.text
5209 else:
5210 self.logger.debug(
5211 "create_network_rest task failed. Network Create response : {}".format(
5212 response.text
5213 )
5214 )
5215 except Exception as exp:
5216 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
5217
5218 return None
5219
5220 def convert_cidr_to_netmask(self, cidr_ip=None):
5221 """
5222 Method sets convert CIDR netmask address to normal IP format
5223 Args:
5224 cidr_ip : CIDR IP address
5225 Returns:
5226 netmask : Converted netmask
5227 """
5228 if cidr_ip is not None:
5229 if "/" in cidr_ip:
5230 _, net_bits = cidr_ip.split("/")
5231 netmask = socket.inet_ntoa(
5232 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
5233 )
5234 else:
5235 netmask = cidr_ip
5236
5237 return netmask
5238
5239 return None
5240
5241 def get_provider_rest(self, vca=None):
5242 """
5243 Method gets provider vdc view from vcloud director
5244
5245 Args:
5246 network_name - is network name to be created.
5247 parent_network_uuid - is parent provider vdc network that will be used for mapping.
5248 It optional attribute. by default if no parent network indicate the first available will be used.
5249
5250 Returns:
5251 The return xml content of respond or None
5252 """
5253 url_list = [self.url, "/api/admin"]
5254
5255 if vca:
5256 headers = {
5257 "Accept": "application/*+xml;version=" + API_VERSION,
5258 "x-vcloud-authorization": self.client._session.headers[
5259 "x-vcloud-authorization"
5260 ],
5261 }
5262 response = self.perform_request(
5263 req_type="GET", url="".join(url_list), headers=headers
5264 )
5265
5266 if response.status_code == requests.codes.ok:
5267 return response.text
5268
5269 return None
5270
5271 def create_vdc(self, vdc_name=None):
5272 vdc_dict = {}
5273 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
5274
5275 if xml_content is not None:
5276 try:
5277 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
5278 for child in task_resp_xmlroot:
5279 if child.tag.split("}")[1] == "Owner":
5280 vdc_id = child.attrib.get("href").split("/")[-1]
5281 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
5282
5283 return vdc_dict
5284 except Exception:
5285 self.logger.debug("Respond body {}".format(xml_content))
5286
5287 return None
5288
5289 def create_vdc_from_tmpl_rest(self, vdc_name=None):
5290 """
5291 Method create vdc in vCloud director based on VDC template.
5292 it uses pre-defined template.
5293
5294 Args:
5295 vdc_name - name of a new vdc.
5296
5297 Returns:
5298 The return xml content of respond or None
5299 """
5300 # pre-requesite atleast one vdc template should be available in vCD
5301 self.logger.info("Creating new vdc {}".format(vdc_name))
5302 vca = self.connect_as_admin()
5303
5304 if not vca:
5305 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5306
5307 if vdc_name is None:
5308 return None
5309
5310 url_list = [self.url, "/api/vdcTemplates"]
5311 vm_list_rest_call = "".join(url_list)
5312 headers = {
5313 "Accept": "application/*+xml;version=" + API_VERSION,
5314 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
5315 }
5316 response = self.perform_request(
5317 req_type="GET", url=vm_list_rest_call, headers=headers
5318 )
5319
5320 # container url to a template
5321 vdc_template_ref = None
5322 try:
5323 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5324 for child in vm_list_xmlroot:
5325 # application/vnd.vmware.admin.providervdc+xml
5326 # we need find a template from witch we instantiate VDC
5327 if child.tag.split("}")[1] == "VdcTemplate":
5328 if (
5329 child.attrib.get("type")
5330 == "application/vnd.vmware.admin.vdcTemplate+xml"
5331 ):
5332 vdc_template_ref = child.attrib.get("href")
5333 except Exception:
5334 self.logger.debug(
5335 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5336 )
5337 self.logger.debug("Respond body {}".format(response.text))
5338
5339 return None
5340
5341 # if we didn't found required pre defined template we return None
5342 if vdc_template_ref is None:
5343 return None
5344
5345 try:
5346 # instantiate vdc
5347 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
5348 vm_list_rest_call = "".join(url_list)
5349 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5350 <Source href="{1:s}"></Source>
5351 <Description>opnemano</Description>
5352 </InstantiateVdcTemplateParams>""".format(
5353 vdc_name, vdc_template_ref
5354 )
5355 headers[
5356 "Content-Type"
5357 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
5358 response = self.perform_request(
5359 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
5360 )
5361 vdc_task = self.get_task_from_response(response.text)
5362 self.client.get_task_monitor().wait_for_success(task=vdc_task)
5363
5364 # if we all ok we respond with content otherwise by default None
5365 if response.status_code >= 200 and response.status_code < 300:
5366 return response.text
5367
5368 return None
5369 except Exception:
5370 self.logger.debug(
5371 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5372 )
5373 self.logger.debug("Respond body {}".format(response.text))
5374
5375 return None
5376
5377 def create_vdc_rest(self, vdc_name=None):
5378 """
5379 Method create network in vCloud director
5380
5381 Args:
5382 vdc_name - vdc name to be created
5383 Returns:
5384 The return response
5385 """
5386 self.logger.info("Creating new vdc {}".format(vdc_name))
5387 vca = self.connect_as_admin()
5388
5389 if not vca:
5390 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5391
5392 if vdc_name is None:
5393 return None
5394
5395 url_list = [self.url, "/api/admin/org/", self.org_uuid]
5396 vm_list_rest_call = "".join(url_list)
5397
5398 if vca._session:
5399 headers = {
5400 "Accept": "application/*+xml;version=" + API_VERSION,
5401 "x-vcloud-authorization": self.client._session.headers[
5402 "x-vcloud-authorization"
5403 ],
5404 }
5405 response = self.perform_request(
5406 req_type="GET", url=vm_list_rest_call, headers=headers
5407 )
5408 provider_vdc_ref = None
5409 add_vdc_rest_url = None
5410 # available_networks = None
5411
5412 if response.status_code != requests.codes.ok:
5413 self.logger.debug(
5414 "REST API call {} failed. Return status code {}".format(
5415 vm_list_rest_call, response.status_code
5416 )
5417 )
5418
5419 return None
5420 else:
5421 try:
5422 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5423 for child in vm_list_xmlroot:
5424 # application/vnd.vmware.admin.providervdc+xml
5425 if child.tag.split("}")[1] == "Link":
5426 if (
5427 child.attrib.get("type")
5428 == "application/vnd.vmware.admin.createVdcParams+xml"
5429 and child.attrib.get("rel") == "add"
5430 ):
5431 add_vdc_rest_url = child.attrib.get("href")
5432 except Exception:
5433 self.logger.debug(
5434 "Failed parse respond for rest api call {}".format(
5435 vm_list_rest_call
5436 )
5437 )
5438 self.logger.debug("Respond body {}".format(response.text))
5439
5440 return None
5441
5442 response = self.get_provider_rest(vca=vca)
5443 try:
5444 vm_list_xmlroot = XmlElementTree.fromstring(response)
5445 for child in vm_list_xmlroot:
5446 if child.tag.split("}")[1] == "ProviderVdcReferences":
5447 for sub_child in child:
5448 provider_vdc_ref = sub_child.attrib.get("href")
5449 except Exception:
5450 self.logger.debug(
5451 "Failed parse respond for rest api call {}".format(
5452 vm_list_rest_call
5453 )
5454 )
5455 self.logger.debug("Respond body {}".format(response))
5456
5457 return None
5458
5459 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
5460 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
5461 <AllocationModel>ReservationPool</AllocationModel>
5462 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
5463 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
5464 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
5465 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
5466 <ProviderVdcReference
5467 name="Main Provider"
5468 href="{2:s}" />
5469 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
5470 escape(vdc_name), escape(vdc_name), provider_vdc_ref
5471 )
5472 headers[
5473 "Content-Type"
5474 ] = "application/vnd.vmware.admin.createVdcParams+xml"
5475 response = self.perform_request(
5476 req_type="POST",
5477 url=add_vdc_rest_url,
5478 headers=headers,
5479 data=data,
5480 )
5481
5482 # if we all ok we respond with content otherwise by default None
5483 if response.status_code == 201:
5484 return response.text
5485
5486 return None
5487
5488 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5489 """
5490 Method retrieve vapp detail from vCloud director
5491
5492 Args:
5493 vapp_uuid - is vapp identifier.
5494
5495 Returns:
5496 The return network uuid or return None
5497 """
5498 parsed_respond = {}
5499 vca = None
5500
5501 if need_admin_access:
5502 vca = self.connect_as_admin()
5503 else:
5504 vca = self.client
5505
5506 if not vca:
5507 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5508 if vapp_uuid is None:
5509 return None
5510
5511 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5512 get_vapp_restcall = "".join(url_list)
5513
5514 if vca._session:
5515 headers = {
5516 "Accept": "application/*+xml;version=" + API_VERSION,
5517 "x-vcloud-authorization": vca._session.headers[
5518 "x-vcloud-authorization"
5519 ],
5520 }
5521 response = self.perform_request(
5522 req_type="GET", url=get_vapp_restcall, headers=headers
5523 )
5524
5525 if response.status_code == 403:
5526 if need_admin_access is False:
5527 response = self.retry_rest("GET", get_vapp_restcall)
5528
5529 if response.status_code != requests.codes.ok:
5530 self.logger.debug(
5531 "REST API call {} failed. Return status code {}".format(
5532 get_vapp_restcall, response.status_code
5533 )
5534 )
5535
5536 return parsed_respond
5537
5538 try:
5539 xmlroot_respond = XmlElementTree.fromstring(response.text)
5540 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5541 "ovfDescriptorUploaded"
5542 ]
5543 namespaces = {
5544 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5545 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5546 "vmw": "http://www.vmware.com/schema/ovf",
5547 "vm": "http://www.vmware.com/vcloud/v1.5",
5548 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5549 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5550 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5551 }
5552
5553 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5554 if created_section is not None:
5555 parsed_respond["created"] = created_section.text
5556
5557 network_section = xmlroot_respond.find(
5558 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5559 )
5560 if (
5561 network_section is not None
5562 and "networkName" in network_section.attrib
5563 ):
5564 parsed_respond["networkname"] = network_section.attrib[
5565 "networkName"
5566 ]
5567
5568 ipscopes_section = xmlroot_respond.find(
5569 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5570 namespaces,
5571 )
5572 if ipscopes_section is not None:
5573 for ipscope in ipscopes_section:
5574 for scope in ipscope:
5575 tag_key = scope.tag.split("}")[1]
5576 if tag_key == "IpRanges":
5577 ip_ranges = scope.getchildren()
5578 for ipblock in ip_ranges:
5579 for block in ipblock:
5580 parsed_respond[
5581 block.tag.split("}")[1]
5582 ] = block.text
5583 else:
5584 parsed_respond[tag_key] = scope.text
5585
5586 # parse children section for other attrib
5587 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5588 if children_section is not None:
5589 parsed_respond["name"] = children_section.attrib["name"]
5590 parsed_respond["nestedHypervisorEnabled"] = (
5591 children_section.attrib["nestedHypervisorEnabled"]
5592 if "nestedHypervisorEnabled" in children_section.attrib
5593 else None
5594 )
5595 parsed_respond["deployed"] = children_section.attrib["deployed"]
5596 parsed_respond["status"] = children_section.attrib["status"]
5597 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5598 -1
5599 ]
5600 network_adapter = children_section.find(
5601 "vm:NetworkConnectionSection", namespaces
5602 )
5603 nic_list = []
5604 for adapters in network_adapter:
5605 adapter_key = adapters.tag.split("}")[1]
5606 if adapter_key == "PrimaryNetworkConnectionIndex":
5607 parsed_respond["primarynetwork"] = adapters.text
5608
5609 if adapter_key == "NetworkConnection":
5610 vnic = {}
5611 if "network" in adapters.attrib:
5612 vnic["network"] = adapters.attrib["network"]
5613 for adapter in adapters:
5614 setting_key = adapter.tag.split("}")[1]
5615 vnic[setting_key] = adapter.text
5616 nic_list.append(vnic)
5617
5618 for link in children_section:
5619 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5620 if link.attrib["rel"] == "screen:acquireTicket":
5621 parsed_respond["acquireTicket"] = link.attrib
5622
5623 if link.attrib["rel"] == "screen:acquireMksTicket":
5624 parsed_respond["acquireMksTicket"] = link.attrib
5625
5626 parsed_respond["interfaces"] = nic_list
5627 vCloud_extension_section = children_section.find(
5628 "xmlns:VCloudExtension", namespaces
5629 )
5630 if vCloud_extension_section is not None:
5631 vm_vcenter_info = {}
5632 vim_info = vCloud_extension_section.find(
5633 "vmext:VmVimInfo", namespaces
5634 )
5635 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5636
5637 if vmext is not None:
5638 vm_vcenter_info["vm_moref_id"] = vmext.find(
5639 "vmext:MoRef", namespaces
5640 ).text
5641
5642 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5643
5644 virtual_hardware_section = children_section.find(
5645 "ovf:VirtualHardwareSection", namespaces
5646 )
5647 vm_virtual_hardware_info = {}
5648 if virtual_hardware_section is not None:
5649 for item in virtual_hardware_section.iterfind(
5650 "ovf:Item", namespaces
5651 ):
5652 if (
5653 item.find("rasd:Description", namespaces).text
5654 == "Hard disk"
5655 ):
5656 disk_size = item.find(
5657 "rasd:HostResource", namespaces
5658 ).attrib["{" + namespaces["vm"] + "}capacity"]
5659 vm_virtual_hardware_info["disk_size"] = disk_size
5660 break
5661
5662 for link in virtual_hardware_section:
5663 if (
5664 link.tag.split("}")[1] == "Link"
5665 and "rel" in link.attrib
5666 ):
5667 if link.attrib["rel"] == "edit" and link.attrib[
5668 "href"
5669 ].endswith("/disks"):
5670 vm_virtual_hardware_info[
5671 "disk_edit_href"
5672 ] = link.attrib["href"]
5673 break
5674
5675 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5676 except Exception as exp:
5677 self.logger.info(
5678 "Error occurred calling rest api for getting vApp details {}".format(
5679 exp
5680 )
5681 )
5682
5683 return parsed_respond
5684
5685 def acquire_console(self, vm_uuid=None):
5686 if vm_uuid is None:
5687 return None
5688
5689 if self.client._session:
5690 headers = {
5691 "Accept": "application/*+xml;version=" + API_VERSION,
5692 "x-vcloud-authorization": self.client._session.headers[
5693 "x-vcloud-authorization"
5694 ],
5695 }
5696 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
5697 console_dict = vm_dict["acquireTicket"]
5698 console_rest_call = console_dict["href"]
5699
5700 response = self.perform_request(
5701 req_type="POST", url=console_rest_call, headers=headers
5702 )
5703
5704 if response.status_code == 403:
5705 response = self.retry_rest("POST", console_rest_call)
5706
5707 if response.status_code == requests.codes.ok:
5708 return response.text
5709
5710 return None
5711
5712 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5713 """
5714 Method retrieve vm disk details
5715
5716 Args:
5717 vapp_uuid - is vapp identifier.
5718 flavor_disk - disk size as specified in VNFD (flavor)
5719
5720 Returns:
5721 The return network uuid or return None
5722 """
5723 status = None
5724 try:
5725 # Flavor disk is in GB convert it into MB
5726 flavor_disk = int(flavor_disk) * 1024
5727 vm_details = self.get_vapp_details_rest(vapp_uuid)
5728
5729 if vm_details:
5730 vm_name = vm_details["name"]
5731 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5732
5733 if vm_details and "vm_virtual_hardware" in vm_details:
5734 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5735 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5736 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5737
5738 if flavor_disk > vm_disk:
5739 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5740 self.logger.info(
5741 "Modify disk of VM {} from {} to {} MB".format(
5742 vm_name, vm_disk, flavor_disk
5743 )
5744 )
5745 else:
5746 status = True
5747 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5748
5749 return status
5750 except Exception as exp:
5751 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5752
5753 def modify_vm_disk_rest(self, disk_href, disk_size):
5754 """
5755 Method retrieve modify vm disk size
5756
5757 Args:
5758 disk_href - vCD API URL to GET and PUT disk data
5759 disk_size - disk size as specified in VNFD (flavor)
5760
5761 Returns:
5762 The return network uuid or return None
5763 """
5764 if disk_href is None or disk_size is None:
5765 return None
5766
5767 if self.client._session:
5768 headers = {
5769 "Accept": "application/*+xml;version=" + API_VERSION,
5770 "x-vcloud-authorization": self.client._session.headers[
5771 "x-vcloud-authorization"
5772 ],
5773 }
5774 response = self.perform_request(
5775 req_type="GET", url=disk_href, headers=headers
5776 )
5777
5778 if response.status_code == 403:
5779 response = self.retry_rest("GET", disk_href)
5780
5781 if response.status_code != requests.codes.ok:
5782 self.logger.debug(
5783 "GET REST API call {} failed. Return status code {}".format(
5784 disk_href, response.status_code
5785 )
5786 )
5787
5788 return None
5789
5790 try:
5791 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5792 namespaces = {
5793 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5794 }
5795 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5796
5797 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5798 if item.find("rasd:Description", namespaces).text == "Hard disk":
5799 disk_item = item.find("rasd:HostResource", namespaces)
5800 if disk_item is not None:
5801 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5802 disk_size
5803 )
5804 break
5805
5806 data = lxmlElementTree.tostring(
5807 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5808 )
5809
5810 # Send PUT request to modify disk size
5811 headers[
5812 "Content-Type"
5813 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5814
5815 response = self.perform_request(
5816 req_type="PUT", url=disk_href, headers=headers, data=data
5817 )
5818 if response.status_code == 403:
5819 add_headers = {"Content-Type": headers["Content-Type"]}
5820 response = self.retry_rest("PUT", disk_href, add_headers, data)
5821
5822 if response.status_code != 202:
5823 self.logger.debug(
5824 "PUT REST API call {} failed. Return status code {}".format(
5825 disk_href, response.status_code
5826 )
5827 )
5828 else:
5829 modify_disk_task = self.get_task_from_response(response.text)
5830 result = self.client.get_task_monitor().wait_for_success(
5831 task=modify_disk_task
5832 )
5833 if result.get("status") == "success":
5834 return True
5835 else:
5836 return False
5837
5838 return None
5839 except Exception as exp:
5840 self.logger.info(
5841 "Error occurred calling rest api for modifing disk size {}".format(exp)
5842 )
5843
5844 return None
5845
5846 def add_serial_device(self, vapp_uuid):
5847 """
5848 Method to attach a serial device to a VM
5849
5850 Args:
5851 vapp_uuid - uuid of vApp/VM
5852
5853 Returns:
5854 """
5855 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5856 _, content = self.get_vcenter_content()
5857 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5858
5859 if vm_moref_id:
5860 try:
5861 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5862 self.logger.info(
5863 "VM {} is currently on host {}".format(vm_obj, host_obj)
5864 )
5865 if host_obj and vm_obj:
5866 spec = vim.vm.ConfigSpec()
5867 spec.deviceChange = []
5868 serial_spec = vim.vm.device.VirtualDeviceSpec()
5869 serial_spec.operation = "add"
5870 serial_port = vim.vm.device.VirtualSerialPort()
5871 serial_port.yieldOnPoll = True
5872 backing = serial_port.URIBackingInfo()
5873 backing.serviceURI = "tcp://:65500"
5874 backing.direction = "server"
5875 serial_port.backing = backing
5876 serial_spec.device = serial_port
5877 spec.deviceChange.append(serial_spec)
5878 vm_obj.ReconfigVM_Task(spec=spec)
5879 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5880 except vmodl.MethodFault as error:
5881 self.logger.error("Error occurred while adding PCI devices {} ", error)
5882
5883 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5884 """
5885 Method to attach pci devices to VM
5886
5887 Args:
5888 vapp_uuid - uuid of vApp/VM
5889 pci_devices - pci devices infromation as specified in VNFD (flavor)
5890
5891 Returns:
5892 The status of add pci device task , vm object and
5893 vcenter_conect object
5894 """
5895 vm_obj = None
5896 self.logger.info(
5897 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5898 )
5899 vcenter_conect, content = self.get_vcenter_content()
5900 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5901
5902 if vm_moref_id:
5903 try:
5904 no_of_pci_devices = len(pci_devices)
5905 if no_of_pci_devices > 0:
5906 # Get VM and its host
5907 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5908 self.logger.info(
5909 "VM {} is currently on host {}".format(vm_obj, host_obj)
5910 )
5911
5912 if host_obj and vm_obj:
5913 # get PCI devies from host on which vapp is currently installed
5914 avilable_pci_devices = self.get_pci_devices(
5915 host_obj, no_of_pci_devices
5916 )
5917
5918 if avilable_pci_devices is None:
5919 # find other hosts with active pci devices
5920 (
5921 new_host_obj,
5922 avilable_pci_devices,
5923 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5924
5925 if (
5926 new_host_obj is not None
5927 and avilable_pci_devices is not None
5928 and len(avilable_pci_devices) > 0
5929 ):
5930 # Migrate vm to the host where PCI devices are availble
5931 self.logger.info(
5932 "Relocate VM {} on new host {}".format(
5933 vm_obj, new_host_obj
5934 )
5935 )
5936
5937 task = self.relocate_vm(new_host_obj, vm_obj)
5938 if task is not None:
5939 result = self.wait_for_vcenter_task(
5940 task, vcenter_conect
5941 )
5942 self.logger.info(
5943 "Migrate VM status: {}".format(result)
5944 )
5945 host_obj = new_host_obj
5946 else:
5947 self.logger.info(
5948 "Fail to migrate VM : {}".format(result)
5949 )
5950 raise vimconn.VimConnNotFoundException(
5951 "Fail to migrate VM : {} to host {}".format(
5952 vmname_andid, new_host_obj
5953 )
5954 )
5955
5956 if (
5957 host_obj is not None
5958 and avilable_pci_devices is not None
5959 and len(avilable_pci_devices) > 0
5960 ):
5961 # Add PCI devices one by one
5962 for pci_device in avilable_pci_devices:
5963 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5964 if task:
5965 status = self.wait_for_vcenter_task(
5966 task, vcenter_conect
5967 )
5968
5969 if status:
5970 self.logger.info(
5971 "Added PCI device {} to VM {}".format(
5972 pci_device, str(vm_obj)
5973 )
5974 )
5975 else:
5976 self.logger.error(
5977 "Fail to add PCI device {} to VM {}".format(
5978 pci_device, str(vm_obj)
5979 )
5980 )
5981
5982 return True, vm_obj, vcenter_conect
5983 else:
5984 self.logger.error(
5985 "Currently there is no host with"
5986 " {} number of avaialble PCI devices required for VM {}".format(
5987 no_of_pci_devices, vmname_andid
5988 )
5989 )
5990
5991 raise vimconn.VimConnNotFoundException(
5992 "Currently there is no host with {} "
5993 "number of avaialble PCI devices required for VM {}".format(
5994 no_of_pci_devices, vmname_andid
5995 )
5996 )
5997 else:
5998 self.logger.debug(
5999 "No infromation about PCI devices {} ", pci_devices
6000 )
6001 except vmodl.MethodFault as error:
6002 self.logger.error("Error occurred while adding PCI devices {} ", error)
6003
6004 return None, vm_obj, vcenter_conect
6005
6006 def get_vm_obj(self, content, mob_id):
6007 """
6008 Method to get the vsphere VM object associated with a given morf ID
6009 Args:
6010 vapp_uuid - uuid of vApp/VM
6011 content - vCenter content object
6012 mob_id - mob_id of VM
6013
6014 Returns:
6015 VM and host object
6016 """
6017 vm_obj = None
6018 host_obj = None
6019
6020 try:
6021 container = content.viewManager.CreateContainerView(
6022 content.rootFolder, [vim.VirtualMachine], True
6023 )
6024 for vm in container.view:
6025 mobID = vm._GetMoId()
6026
6027 if mobID == mob_id:
6028 vm_obj = vm
6029 host_obj = vm_obj.runtime.host
6030 break
6031 except Exception as exp:
6032 self.logger.error("Error occurred while finding VM object : {}".format(exp))
6033
6034 return host_obj, vm_obj
6035
6036 def get_pci_devices(self, host, need_devices):
6037 """
6038 Method to get the details of pci devices on given host
6039 Args:
6040 host - vSphere host object
6041 need_devices - number of pci devices needed on host
6042
6043 Returns:
6044 array of pci devices
6045 """
6046 all_devices = []
6047 all_device_ids = []
6048 used_devices_ids = []
6049
6050 try:
6051 if host:
6052 pciPassthruInfo = host.config.pciPassthruInfo
6053 pciDevies = host.hardware.pciDevice
6054
6055 for pci_status in pciPassthruInfo:
6056 if pci_status.passthruActive:
6057 for device in pciDevies:
6058 if device.id == pci_status.id:
6059 all_device_ids.append(device.id)
6060 all_devices.append(device)
6061
6062 # check if devices are in use
6063 avalible_devices = all_devices
6064 for vm in host.vm:
6065 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
6066 vm_devices = vm.config.hardware.device
6067 for device in vm_devices:
6068 if type(device) is vim.vm.device.VirtualPCIPassthrough:
6069 if device.backing.id in all_device_ids:
6070 for use_device in avalible_devices:
6071 if use_device.id == device.backing.id:
6072 avalible_devices.remove(use_device)
6073
6074 used_devices_ids.append(device.backing.id)
6075 self.logger.debug(
6076 "Device {} from devices {}"
6077 "is in use".format(device.backing.id, device)
6078 )
6079 if len(avalible_devices) < need_devices:
6080 self.logger.debug(
6081 "Host {} don't have {} number of active devices".format(
6082 host, need_devices
6083 )
6084 )
6085 self.logger.debug(
6086 "found only {} devices {}".format(
6087 len(avalible_devices), avalible_devices
6088 )
6089 )
6090
6091 return None
6092 else:
6093 required_devices = avalible_devices[:need_devices]
6094 self.logger.info(
6095 "Found {} PCI devices on host {} but required only {}".format(
6096 len(avalible_devices), host, need_devices
6097 )
6098 )
6099 self.logger.info(
6100 "Retruning {} devices as {}".format(need_devices, required_devices)
6101 )
6102
6103 return required_devices
6104 except Exception as exp:
6105 self.logger.error(
6106 "Error {} occurred while finding pci devices on host: {}".format(
6107 exp, host
6108 )
6109 )
6110
6111 return None
6112
6113 def get_host_and_PCIdevices(self, content, need_devices):
6114 """
6115 Method to get the details of pci devices infromation on all hosts
6116
6117 Args:
6118 content - vSphere host object
6119 need_devices - number of pci devices needed on host
6120
6121 Returns:
6122 array of pci devices and host object
6123 """
6124 host_obj = None
6125 pci_device_objs = None
6126
6127 try:
6128 if content:
6129 container = content.viewManager.CreateContainerView(
6130 content.rootFolder, [vim.HostSystem], True
6131 )
6132 for host in container.view:
6133 devices = self.get_pci_devices(host, need_devices)
6134
6135 if devices:
6136 host_obj = host
6137 pci_device_objs = devices
6138 break
6139 except Exception as exp:
6140 self.logger.error(
6141 "Error {} occurred while finding pci devices on host: {}".format(
6142 exp, host_obj
6143 )
6144 )
6145
6146 return host_obj, pci_device_objs
6147
6148 def relocate_vm(self, dest_host, vm):
6149 """
6150 Method to get the relocate VM to new host
6151
6152 Args:
6153 dest_host - vSphere host object
6154 vm - vSphere VM object
6155
6156 Returns:
6157 task object
6158 """
6159 task = None
6160
6161 try:
6162 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
6163 task = vm.Relocate(relocate_spec)
6164 self.logger.info(
6165 "Migrating {} to destination host {}".format(vm, dest_host)
6166 )
6167 except Exception as exp:
6168 self.logger.error(
6169 "Error occurred while relocate VM {} to new host {}: {}".format(
6170 dest_host, vm, exp
6171 )
6172 )
6173
6174 return task
6175
6176 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
6177 """
6178 Waits and provides updates on a vSphere task
6179 """
6180 while task.info.state == vim.TaskInfo.State.running:
6181 time.sleep(2)
6182
6183 if task.info.state == vim.TaskInfo.State.success:
6184 if task.info.result is not None and not hideResult:
6185 self.logger.info(
6186 "{} completed successfully, result: {}".format(
6187 actionName, task.info.result
6188 )
6189 )
6190 else:
6191 self.logger.info("Task {} completed successfully.".format(actionName))
6192 else:
6193 self.logger.error(
6194 "{} did not complete successfully: {} ".format(
6195 actionName, task.info.error
6196 )
6197 )
6198
6199 return task.info.result
6200
6201 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
6202 """
6203 Method to add pci device in given VM
6204
6205 Args:
6206 host_object - vSphere host object
6207 vm_object - vSphere VM object
6208 host_pci_dev - host_pci_dev must be one of the devices from the
6209 host_object.hardware.pciDevice list
6210 which is configured as a PCI passthrough device
6211
6212 Returns:
6213 task object
6214 """
6215 task = None
6216
6217 if vm_object and host_object and host_pci_dev:
6218 try:
6219 # Add PCI device to VM
6220 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
6221 host=None
6222 ).pciPassthrough
6223 systemid_by_pciid = {
6224 item.pciDevice.id: item.systemId for item in pci_passthroughs
6225 }
6226
6227 if host_pci_dev.id not in systemid_by_pciid:
6228 self.logger.error(
6229 "Device {} is not a passthrough device ".format(host_pci_dev)
6230 )
6231 return None
6232
6233 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
6234 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
6235 deviceId=deviceId,
6236 id=host_pci_dev.id,
6237 systemId=systemid_by_pciid[host_pci_dev.id],
6238 vendorId=host_pci_dev.vendorId,
6239 deviceName=host_pci_dev.deviceName,
6240 )
6241
6242 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
6243 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
6244 new_device_config.operation = "add"
6245 vmConfigSpec = vim.vm.ConfigSpec()
6246 vmConfigSpec.deviceChange = [new_device_config]
6247 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
6248 self.logger.info(
6249 "Adding PCI device {} into VM {} from host {} ".format(
6250 host_pci_dev, vm_object, host_object
6251 )
6252 )
6253 except Exception as exp:
6254 self.logger.error(
6255 "Error occurred while adding pci devive {} to VM {}: {}".format(
6256 host_pci_dev, vm_object, exp
6257 )
6258 )
6259
6260 return task
6261
6262 def get_vm_vcenter_info(self):
6263 """
6264 Method to get details of vCenter and vm
6265
6266 Args:
6267 vapp_uuid - uuid of vApp or VM
6268
6269 Returns:
6270 Moref Id of VM and deails of vCenter
6271 """
6272 vm_vcenter_info = {}
6273
6274 if self.vcenter_ip is not None:
6275 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
6276 else:
6277 raise vimconn.VimConnException(
6278 message="vCenter IP is not provided."
6279 " Please provide vCenter IP while attaching datacenter "
6280 "to tenant in --config"
6281 )
6282
6283 if self.vcenter_port is not None:
6284 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
6285 else:
6286 raise vimconn.VimConnException(
6287 message="vCenter port is not provided."
6288 " Please provide vCenter port while attaching datacenter "
6289 "to tenant in --config"
6290 )
6291
6292 if self.vcenter_user is not None:
6293 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
6294 else:
6295 raise vimconn.VimConnException(
6296 message="vCenter user is not provided."
6297 " Please provide vCenter user while attaching datacenter "
6298 "to tenant in --config"
6299 )
6300
6301 if self.vcenter_password is not None:
6302 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
6303 else:
6304 raise vimconn.VimConnException(
6305 message="vCenter user password is not provided."
6306 " Please provide vCenter user password while attaching datacenter "
6307 "to tenant in --config"
6308 )
6309
6310 return vm_vcenter_info
6311
6312 def get_vm_pci_details(self, vmuuid):
6313 """
6314 Method to get VM PCI device details from vCenter
6315
6316 Args:
6317 vm_obj - vSphere VM object
6318
6319 Returns:
6320 dict of PCI devives attached to VM
6321
6322 """
6323 vm_pci_devices_info = {}
6324
6325 try:
6326 _, content = self.get_vcenter_content()
6327 vm_moref_id = self.get_vm_moref_id(vmuuid)
6328 if vm_moref_id:
6329 # Get VM and its host
6330 if content:
6331 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6332 if host_obj and vm_obj:
6333 vm_pci_devices_info["host_name"] = host_obj.name
6334 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
6335 0
6336 ].spec.ip.ipAddress
6337
6338 for device in vm_obj.config.hardware.device:
6339 if type(device) == vim.vm.device.VirtualPCIPassthrough:
6340 device_details = {
6341 "devide_id": device.backing.id,
6342 "pciSlotNumber": device.slotInfo.pciSlotNumber,
6343 }
6344 vm_pci_devices_info[
6345 device.deviceInfo.label
6346 ] = device_details
6347 else:
6348 self.logger.error(
6349 "Can not connect to vCenter while getting "
6350 "PCI devices infromationn"
6351 )
6352
6353 return vm_pci_devices_info
6354 except Exception as exp:
6355 self.logger.error(
6356 "Error occurred while getting VM information" " for VM : {}".format(exp)
6357 )
6358
6359 raise vimconn.VimConnException(message=exp)
6360
6361 def reserve_memory_for_all_vms(self, vapp, memory_mb):
6362 """
6363 Method to reserve memory for all VMs
6364 Args :
6365 vapp - VApp
6366 memory_mb - Memory in MB
6367 Returns:
6368 None
6369 """
6370 self.logger.info("Reserve memory for all VMs")
6371
6372 for vms in vapp.get_all_vms():
6373 vm_id = vms.get("id").split(":")[-1]
6374 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
6375 self.url, vm_id
6376 )
6377 headers = {
6378 "Accept": "application/*+xml;version=" + API_VERSION,
6379 "x-vcloud-authorization": self.client._session.headers[
6380 "x-vcloud-authorization"
6381 ],
6382 }
6383 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
6384 response = self.perform_request(
6385 req_type="GET", url=url_rest_call, headers=headers
6386 )
6387
6388 if response.status_code == 403:
6389 response = self.retry_rest("GET", url_rest_call)
6390
6391 if response.status_code != 200:
6392 self.logger.error(
6393 "REST call {} failed reason : {}"
6394 "status code : {}".format(
6395 url_rest_call, response.text, response.status_code
6396 )
6397 )
6398 raise vimconn.VimConnException(
6399 "reserve_memory_for_all_vms : Failed to get " "memory"
6400 )
6401
6402 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
6403 contentelem = lxmlElementTree.XML(bytexml)
6404 namespaces = {
6405 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
6406 }
6407 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6408
6409 # Find the reservation element in the response
6410 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
6411 for memelem in memelem_list:
6412 memelem.text = str(memory_mb)
6413
6414 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
6415
6416 response = self.perform_request(
6417 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6418 )
6419
6420 if response.status_code == 403:
6421 add_headers = {"Content-Type": headers["Content-Type"]}
6422 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6423
6424 if response.status_code != 202:
6425 self.logger.error(
6426 "REST call {} failed reason : {}"
6427 "status code : {} ".format(
6428 url_rest_call, response.text, response.status_code
6429 )
6430 )
6431 raise vimconn.VimConnException(
6432 "reserve_memory_for_all_vms : Failed to update "
6433 "virtual hardware memory section"
6434 )
6435 else:
6436 mem_task = self.get_task_from_response(response.text)
6437 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
6438
6439 if result.get("status") == "success":
6440 self.logger.info(
6441 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
6442 )
6443 else:
6444 self.logger.error(
6445 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
6446 )
6447
6448 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
6449 """
6450 Configure VApp network config with org vdc network
6451 Args :
6452 vapp - VApp
6453 Returns:
6454 None
6455 """
6456
6457 self.logger.info(
6458 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
6459 )
6460
6461 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
6462 self.url, vapp_id
6463 )
6464
6465 headers = {
6466 "Accept": "application/*+xml;version=" + API_VERSION,
6467 "x-vcloud-authorization": self.client._session.headers[
6468 "x-vcloud-authorization"
6469 ],
6470 }
6471 response = self.perform_request(
6472 req_type="GET", url=url_rest_call, headers=headers
6473 )
6474
6475 if response.status_code == 403:
6476 response = self.retry_rest("GET", url_rest_call)
6477
6478 if response.status_code != 200:
6479 self.logger.error(
6480 "REST call {} failed reason : {}"
6481 "status code : {}".format(
6482 url_rest_call, response.text, response.status_code
6483 )
6484 )
6485 raise vimconn.VimConnException(
6486 "connect_vapp_to_org_vdc_network : Failed to get "
6487 "network config section"
6488 )
6489
6490 data = response.text
6491 headers[
6492 "Content-Type"
6493 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
6494 net_id = self.get_network_id_by_name(net_name)
6495 if not net_id:
6496 raise vimconn.VimConnException(
6497 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
6498 )
6499
6500 bytexml = bytes(bytearray(data, encoding="utf-8"))
6501 newelem = lxmlElementTree.XML(bytexml)
6502 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6503 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6504 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6505
6506 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6507 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6508 if parentnetworklist:
6509 for pn in parentnetworklist:
6510 if "href" not in pn.keys():
6511 id_val = pn.get("id")
6512 href_val = "{}/api/network/{}".format(self.url, id_val)
6513 pn.set("href", href_val)
6514
6515 newstr = """<NetworkConfig networkName="{}">
6516 <Configuration>
6517 <ParentNetwork href="{}/api/network/{}"/>
6518 <FenceMode>bridged</FenceMode>
6519 </Configuration>
6520 </NetworkConfig>
6521 """.format(
6522 net_name, self.url, net_id
6523 )
6524 newcfgelem = lxmlElementTree.fromstring(newstr)
6525 if nwcfglist:
6526 nwcfglist[0].addnext(newcfgelem)
6527
6528 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6529
6530 response = self.perform_request(
6531 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6532 )
6533
6534 if response.status_code == 403:
6535 add_headers = {"Content-Type": headers["Content-Type"]}
6536 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6537
6538 if response.status_code != 202:
6539 self.logger.error(
6540 "REST call {} failed reason : {}"
6541 "status code : {} ".format(
6542 url_rest_call, response.text, response.status_code
6543 )
6544 )
6545 raise vimconn.VimConnException(
6546 "connect_vapp_to_org_vdc_network : Failed to update "
6547 "network config section"
6548 )
6549 else:
6550 vapp_task = self.get_task_from_response(response.text)
6551 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6552 if result.get("status") == "success":
6553 self.logger.info(
6554 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6555 "network {}".format(vapp_id, net_name)
6556 )
6557 else:
6558 self.logger.error(
6559 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6560 "connect to network {}".format(vapp_id, net_name)
6561 )
6562
6563 def remove_primary_network_adapter_from_all_vms(self, vapp):
6564 """
6565 Method to remove network adapter type to vm
6566 Args :
6567 vapp - VApp
6568 Returns:
6569 None
6570 """
6571 self.logger.info("Removing network adapter from all VMs")
6572
6573 for vms in vapp.get_all_vms():
6574 vm_id = vms.get("id").split(":")[-1]
6575
6576 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6577 self.url, vm_id
6578 )
6579
6580 headers = {
6581 "Accept": "application/*+xml;version=" + API_VERSION,
6582 "x-vcloud-authorization": self.client._session.headers[
6583 "x-vcloud-authorization"
6584 ],
6585 }
6586 response = self.perform_request(
6587 req_type="GET", url=url_rest_call, headers=headers
6588 )
6589
6590 if response.status_code == 403:
6591 response = self.retry_rest("GET", url_rest_call)
6592
6593 if response.status_code != 200:
6594 self.logger.error(
6595 "REST call {} failed reason : {}"
6596 "status code : {}".format(
6597 url_rest_call, response.text, response.status_code
6598 )
6599 )
6600 raise vimconn.VimConnException(
6601 "remove_primary_network_adapter : Failed to get "
6602 "network connection section"
6603 )
6604
6605 data = response.text
6606 data = data.split('<Link rel="edit"')[0]
6607
6608 headers[
6609 "Content-Type"
6610 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6611
6612 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6613 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6614 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6615 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6616 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6617 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6618 xmlns:vmw="http://www.vmware.com/schema/ovf"
6619 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6620 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6621 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6622 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6623 ovf:required="false">
6624 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6625 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6626 <Link rel="edit" href="{url}"
6627 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6628 </NetworkConnectionSection>""".format(
6629 url=url_rest_call
6630 )
6631 response = self.perform_request(
6632 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6633 )
6634
6635 if response.status_code == 403:
6636 add_headers = {"Content-Type": headers["Content-Type"]}
6637 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6638
6639 if response.status_code != 202:
6640 self.logger.error(
6641 "REST call {} failed reason : {}"
6642 "status code : {} ".format(
6643 url_rest_call, response.text, response.status_code
6644 )
6645 )
6646 raise vimconn.VimConnException(
6647 "remove_primary_network_adapter : Failed to update "
6648 "network connection section"
6649 )
6650 else:
6651 nic_task = self.get_task_from_response(response.text)
6652 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6653 if result.get("status") == "success":
6654 self.logger.info(
6655 "remove_primary_network_adapter(): VM {} conneced to "
6656 "default NIC type".format(vm_id)
6657 )
6658 else:
6659 self.logger.error(
6660 "remove_primary_network_adapter(): VM {} failed to "
6661 "connect NIC type".format(vm_id)
6662 )
6663
6664 def add_network_adapter_to_vms(
6665 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6666 ):
6667 """
6668 Method to add network adapter type to vm
6669 Args :
6670 network_name - name of network
6671 primary_nic_index - int value for primary nic index
6672 nicIndex - int value for nic index
6673 nic_type - specify model name to which add to vm
6674 Returns:
6675 None
6676 """
6677
6678 self.logger.info(
6679 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6680 network_name, nicIndex, nic_type
6681 )
6682 )
6683 try:
6684 ip_address = None
6685 floating_ip = False
6686 mac_address = None
6687 if "floating_ip" in net:
6688 floating_ip = net["floating_ip"]
6689
6690 # Stub for ip_address feature
6691 if "ip_address" in net:
6692 ip_address = net["ip_address"]
6693
6694 if "mac_address" in net:
6695 mac_address = net["mac_address"]
6696
6697 if floating_ip:
6698 allocation_mode = "POOL"
6699 elif ip_address:
6700 allocation_mode = "MANUAL"
6701 else:
6702 allocation_mode = "DHCP"
6703
6704 if not nic_type:
6705 for vms in vapp.get_all_vms():
6706 vm_id = vms.get("id").split(":")[-1]
6707
6708 url_rest_call = (
6709 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6710 self.url, vm_id
6711 )
6712 )
6713
6714 headers = {
6715 "Accept": "application/*+xml;version=" + API_VERSION,
6716 "x-vcloud-authorization": self.client._session.headers[
6717 "x-vcloud-authorization"
6718 ],
6719 }
6720 response = self.perform_request(
6721 req_type="GET", url=url_rest_call, headers=headers
6722 )
6723
6724 if response.status_code == 403:
6725 response = self.retry_rest("GET", url_rest_call)
6726
6727 if response.status_code != 200:
6728 self.logger.error(
6729 "REST call {} failed reason : {}"
6730 "status code : {}".format(
6731 url_rest_call, response.text, response.status_code
6732 )
6733 )
6734 raise vimconn.VimConnException(
6735 "add_network_adapter_to_vms : Failed to get "
6736 "network connection section"
6737 )
6738
6739 data = response.text
6740 data = data.split('<Link rel="edit"')[0]
6741 if "<PrimaryNetworkConnectionIndex>" not in data:
6742 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6743 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6744 <NetworkConnection network="{}">
6745 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6746 <IsConnected>true</IsConnected>
6747 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6748 </NetworkConnection>""".format(
6749 primary_nic_index, network_name, nicIndex, allocation_mode
6750 )
6751
6752 # Stub for ip_address feature
6753 if ip_address:
6754 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6755 item = item.replace(
6756 "</NetworkConnectionIndex>\n",
6757 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6758 )
6759
6760 if mac_address:
6761 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6762 item = item.replace(
6763 "</IsConnected>\n",
6764 "</IsConnected>\n{}\n".format(mac_tag),
6765 )
6766
6767 data = data.replace(
6768 "</ovf:Info>\n",
6769 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6770 )
6771 else:
6772 self.logger.debug("add_network_adapter PrimaryNIC in data")
6773 new_item = """<NetworkConnection network="{}">
6774 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6775 <IsConnected>true</IsConnected>
6776 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6777 </NetworkConnection>""".format(
6778 network_name, nicIndex, allocation_mode
6779 )
6780
6781 # Stub for ip_address feature
6782 if ip_address:
6783 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6784 new_item = new_item.replace(
6785 "</NetworkConnectionIndex>\n",
6786 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6787 )
6788
6789 if mac_address:
6790 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6791 new_item = new_item.replace(
6792 "</IsConnected>\n",
6793 "</IsConnected>\n{}\n".format(mac_tag),
6794 )
6795
6796 data = data + new_item + "</NetworkConnectionSection>"
6797
6798 headers[
6799 "Content-Type"
6800 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6801
6802 response = self.perform_request(
6803 req_type="PUT", url=url_rest_call, headers=headers, data=data
6804 )
6805
6806 if response.status_code == 403:
6807 add_headers = {"Content-Type": headers["Content-Type"]}
6808 response = self.retry_rest(
6809 "PUT", url_rest_call, add_headers, data
6810 )
6811
6812 if response.status_code != 202:
6813 self.logger.error(
6814 "REST call {} failed reason : {}"
6815 "status code : {} ".format(
6816 url_rest_call, response.text, response.status_code
6817 )
6818 )
6819 raise vimconn.VimConnException(
6820 "add_network_adapter_to_vms : Failed to update "
6821 "network connection section"
6822 )
6823 else:
6824 nic_task = self.get_task_from_response(response.text)
6825 result = self.client.get_task_monitor().wait_for_success(
6826 task=nic_task
6827 )
6828
6829 if result.get("status") == "success":
6830 self.logger.info(
6831 "add_network_adapter_to_vms(): VM {} conneced to "
6832 "default NIC type".format(vm_id)
6833 )
6834 else:
6835 self.logger.error(
6836 "add_network_adapter_to_vms(): VM {} failed to "
6837 "connect NIC type".format(vm_id)
6838 )
6839 else:
6840 for vms in vapp.get_all_vms():
6841 vm_id = vms.get("id").split(":")[-1]
6842
6843 url_rest_call = (
6844 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6845 self.url, vm_id
6846 )
6847 )
6848
6849 headers = {
6850 "Accept": "application/*+xml;version=" + API_VERSION,
6851 "x-vcloud-authorization": self.client._session.headers[
6852 "x-vcloud-authorization"
6853 ],
6854 }
6855 response = self.perform_request(
6856 req_type="GET", url=url_rest_call, headers=headers
6857 )
6858
6859 if response.status_code == 403:
6860 response = self.retry_rest("GET", url_rest_call)
6861
6862 if response.status_code != 200:
6863 self.logger.error(
6864 "REST call {} failed reason : {}"
6865 "status code : {}".format(
6866 url_rest_call, response.text, response.status_code
6867 )
6868 )
6869 raise vimconn.VimConnException(
6870 "add_network_adapter_to_vms : Failed to get "
6871 "network connection section"
6872 )
6873 data = response.text
6874 data = data.split('<Link rel="edit"')[0]
6875 vcd_netadapter_type = nic_type
6876
6877 if nic_type in ["SR-IOV", "VF"]:
6878 vcd_netadapter_type = "SRIOVETHERNETCARD"
6879
6880 if "<PrimaryNetworkConnectionIndex>" not in data:
6881 self.logger.debug(
6882 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6883 nic_type
6884 )
6885 )
6886 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6887 <NetworkConnection network="{}">
6888 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6889 <IsConnected>true</IsConnected>
6890 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6891 <NetworkAdapterType>{}</NetworkAdapterType>
6892 </NetworkConnection>""".format(
6893 primary_nic_index,
6894 network_name,
6895 nicIndex,
6896 allocation_mode,
6897 vcd_netadapter_type,
6898 )
6899
6900 # Stub for ip_address feature
6901 if ip_address:
6902 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6903 item = item.replace(
6904 "</NetworkConnectionIndex>\n",
6905 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6906 )
6907
6908 if mac_address:
6909 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6910 item = item.replace(
6911 "</IsConnected>\n",
6912 "</IsConnected>\n{}\n".format(mac_tag),
6913 )
6914
6915 data = data.replace(
6916 "</ovf:Info>\n",
6917 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6918 )
6919 else:
6920 self.logger.debug(
6921 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6922 nic_type
6923 )
6924 )
6925 new_item = """<NetworkConnection network="{}">
6926 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6927 <IsConnected>true</IsConnected>
6928 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6929 <NetworkAdapterType>{}</NetworkAdapterType>
6930 </NetworkConnection>""".format(
6931 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6932 )
6933
6934 # Stub for ip_address feature
6935 if ip_address:
6936 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6937 new_item = new_item.replace(
6938 "</NetworkConnectionIndex>\n",
6939 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6940 )
6941
6942 if mac_address:
6943 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6944 new_item = new_item.replace(
6945 "</IsConnected>\n",
6946 "</IsConnected>\n{}\n".format(mac_tag),
6947 )
6948
6949 data = data + new_item + "</NetworkConnectionSection>"
6950
6951 headers[
6952 "Content-Type"
6953 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6954
6955 response = self.perform_request(
6956 req_type="PUT", url=url_rest_call, headers=headers, data=data
6957 )
6958
6959 if response.status_code == 403:
6960 add_headers = {"Content-Type": headers["Content-Type"]}
6961 response = self.retry_rest(
6962 "PUT", url_rest_call, add_headers, data
6963 )
6964
6965 if response.status_code != 202:
6966 self.logger.error(
6967 "REST call {} failed reason : {}"
6968 "status code : {}".format(
6969 url_rest_call, response.text, response.status_code
6970 )
6971 )
6972 raise vimconn.VimConnException(
6973 "add_network_adapter_to_vms : Failed to update "
6974 "network connection section"
6975 )
6976 else:
6977 nic_task = self.get_task_from_response(response.text)
6978 result = self.client.get_task_monitor().wait_for_success(
6979 task=nic_task
6980 )
6981
6982 if result.get("status") == "success":
6983 self.logger.info(
6984 "add_network_adapter_to_vms(): VM {} "
6985 "conneced to NIC type {}".format(vm_id, nic_type)
6986 )
6987 else:
6988 self.logger.error(
6989 "add_network_adapter_to_vms(): VM {} "
6990 "failed to connect NIC type {}".format(vm_id, nic_type)
6991 )
6992 except Exception as exp:
6993 self.logger.error(
6994 "add_network_adapter_to_vms() : exception occurred "
6995 "while adding Network adapter"
6996 )
6997
6998 raise vimconn.VimConnException(message=exp)
6999
7000 def set_numa_affinity(self, vmuuid, paired_threads_id):
7001 """
7002 Method to assign numa affinity in vm configuration parammeters
7003 Args :
7004 vmuuid - vm uuid
7005 paired_threads_id - one or more virtual processor
7006 numbers
7007 Returns:
7008 return if True
7009 """
7010 try:
7011 vcenter_conect, content = self.get_vcenter_content()
7012 vm_moref_id = self.get_vm_moref_id(vmuuid)
7013 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7014
7015 if vm_obj:
7016 config_spec = vim.vm.ConfigSpec()
7017 config_spec.extraConfig = []
7018 opt = vim.option.OptionValue()
7019 opt.key = "numa.nodeAffinity"
7020 opt.value = str(paired_threads_id)
7021 config_spec.extraConfig.append(opt)
7022 task = vm_obj.ReconfigVM_Task(config_spec)
7023
7024 if task:
7025 self.wait_for_vcenter_task(task, vcenter_conect)
7026 extra_config = vm_obj.config.extraConfig
7027 flag = False
7028
7029 for opts in extra_config:
7030 if "numa.nodeAffinity" in opts.key:
7031 flag = True
7032 self.logger.info(
7033 "set_numa_affinity: Sucessfully assign numa affinity "
7034 "value {} for vm {}".format(opt.value, vm_obj)
7035 )
7036
7037 if flag:
7038 return
7039 else:
7040 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
7041 except Exception as exp:
7042 self.logger.error(
7043 "set_numa_affinity : exception occurred while setting numa affinity "
7044 "for VM {} : {}".format(vm_obj, vm_moref_id)
7045 )
7046
7047 raise vimconn.VimConnException(
7048 "set_numa_affinity : Error {} failed to assign numa "
7049 "affinity".format(exp)
7050 )
7051
7052 def cloud_init(self, vapp, cloud_config):
7053 """
7054 Method to inject ssh-key
7055 vapp - vapp object
7056 cloud_config a dictionary with:
7057 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
7058 'users': (optional) list of users to be inserted, each item is a dict with:
7059 'name': (mandatory) user name,
7060 'key-pairs': (optional) list of strings with the public key to be inserted to the user
7061 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
7062 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
7063 'config-files': (optional). List of files to be transferred. Each item is a dict with:
7064 'dest': (mandatory) string with the destination absolute path
7065 'encoding': (optional, by default text). Can be one of:
7066 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
7067 'content' (mandatory): string with the content of the file
7068 'permissions': (optional) string with file permissions, typically octal notation '0644'
7069 'owner': (optional) file owner, string with the format 'owner:group'
7070 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
7071 """
7072 try:
7073 if not isinstance(cloud_config, dict):
7074 raise Exception(
7075 "cloud_init : parameter cloud_config is not a dictionary"
7076 )
7077 else:
7078 key_pairs = []
7079 userdata = []
7080
7081 if "key-pairs" in cloud_config:
7082 key_pairs = cloud_config["key-pairs"]
7083
7084 if "users" in cloud_config:
7085 userdata = cloud_config["users"]
7086
7087 self.logger.debug("cloud_init : Guest os customization started..")
7088 customize_script = self.format_script(
7089 key_pairs=key_pairs, users_list=userdata
7090 )
7091 customize_script = customize_script.replace("&", "&amp;")
7092 self.guest_customization(vapp, customize_script)
7093 except Exception as exp:
7094 self.logger.error(
7095 "cloud_init : exception occurred while injecting " "ssh-key"
7096 )
7097
7098 raise vimconn.VimConnException(
7099 "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
7100 )
7101
7102 def format_script(self, key_pairs=[], users_list=[]):
7103 bash_script = """#!/bin/sh
7104 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
7105 if [ "$1" = "precustomization" ];then
7106 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
7107 """
7108
7109 keys = "\n".join(key_pairs)
7110 if keys:
7111 keys_data = """
7112 if [ ! -d /root/.ssh ];then
7113 mkdir /root/.ssh
7114 chown root:root /root/.ssh
7115 chmod 700 /root/.ssh
7116 touch /root/.ssh/authorized_keys
7117 chown root:root /root/.ssh/authorized_keys
7118 chmod 600 /root/.ssh/authorized_keys
7119 # make centos with selinux happy
7120 which restorecon && restorecon -Rv /root/.ssh
7121 else
7122 touch /root/.ssh/authorized_keys
7123 chown root:root /root/.ssh/authorized_keys
7124 chmod 600 /root/.ssh/authorized_keys
7125 fi
7126 echo '{key}' >> /root/.ssh/authorized_keys
7127 """.format(
7128 key=keys
7129 )
7130
7131 bash_script += keys_data
7132
7133 for user in users_list:
7134 if "name" in user:
7135 user_name = user["name"]
7136
7137 if "key-pairs" in user:
7138 user_keys = "\n".join(user["key-pairs"])
7139 else:
7140 user_keys = None
7141
7142 add_user_name = """
7143 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
7144 """.format(
7145 user_name=user_name
7146 )
7147
7148 bash_script += add_user_name
7149
7150 if user_keys:
7151 user_keys_data = """
7152 mkdir /home/{user_name}/.ssh
7153 chown {user_name}:{user_name} /home/{user_name}/.ssh
7154 chmod 700 /home/{user_name}/.ssh
7155 touch /home/{user_name}/.ssh/authorized_keys
7156 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
7157 chmod 600 /home/{user_name}/.ssh/authorized_keys
7158 # make centos with selinux happy
7159 which restorecon && restorecon -Rv /home/{user_name}/.ssh
7160 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
7161 """.format(
7162 user_name=user_name, user_key=user_keys
7163 )
7164 bash_script += user_keys_data
7165
7166 return bash_script + "\n\tfi"
7167
7168 def guest_customization(self, vapp, customize_script):
7169 """
7170 Method to customize guest os
7171 vapp - Vapp object
7172 customize_script - Customize script to be run at first boot of VM.
7173 """
7174 for vm in vapp.get_all_vms():
7175 vm_id = vm.get("id").split(":")[-1]
7176 vm_name = vm.get("name")
7177 vm_name = vm_name.replace("_", "-")
7178
7179 vm_customization_url = (
7180 "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
7181 )
7182 headers = {
7183 "Accept": "application/*+xml;version=" + API_VERSION,
7184 "x-vcloud-authorization": self.client._session.headers[
7185 "x-vcloud-authorization"
7186 ],
7187 }
7188
7189 headers[
7190 "Content-Type"
7191 ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
7192
7193 data = """<GuestCustomizationSection
7194 xmlns="http://www.vmware.com/vcloud/v1.5"
7195 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
7196 ovf:required="false" href="{}"
7197 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
7198 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
7199 <Enabled>true</Enabled>
7200 <ChangeSid>false</ChangeSid>
7201 <VirtualMachineId>{}</VirtualMachineId>
7202 <JoinDomainEnabled>false</JoinDomainEnabled>
7203 <UseOrgSettings>false</UseOrgSettings>
7204 <AdminPasswordEnabled>false</AdminPasswordEnabled>
7205 <AdminPasswordAuto>true</AdminPasswordAuto>
7206 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
7207 <AdminAutoLogonCount>0</AdminAutoLogonCount>
7208 <ResetPasswordRequired>false</ResetPasswordRequired>
7209 <CustomizationScript>{}</CustomizationScript>
7210 <ComputerName>{}</ComputerName>
7211 <Link href="{}"
7212 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
7213 </GuestCustomizationSection>
7214 """.format(
7215 vm_customization_url,
7216 vm_id,
7217 customize_script,
7218 vm_name,
7219 vm_customization_url,
7220 )
7221
7222 response = self.perform_request(
7223 req_type="PUT", url=vm_customization_url, headers=headers, data=data
7224 )
7225 if response.status_code == 202:
7226 guest_task = self.get_task_from_response(response.text)
7227 self.client.get_task_monitor().wait_for_success(task=guest_task)
7228 self.logger.info(
7229 "guest_customization : customized guest os task "
7230 "completed for VM {}".format(vm_name)
7231 )
7232 else:
7233 self.logger.error(
7234 "guest_customization : task for customized guest os"
7235 "failed for VM {}".format(vm_name)
7236 )
7237
7238 raise vimconn.VimConnException(
7239 "guest_customization : failed to perform"
7240 "guest os customization on VM {}".format(vm_name)
7241 )
7242
7243 def add_new_disk(self, vapp_uuid, disk_size):
7244 """
7245 Method to create an empty vm disk
7246
7247 Args:
7248 vapp_uuid - is vapp identifier.
7249 disk_size - size of disk to be created in GB
7250
7251 Returns:
7252 None
7253 """
7254 status = False
7255 vm_details = None
7256 try:
7257 # Disk size in GB, convert it into MB
7258 if disk_size is not None:
7259 disk_size_mb = int(disk_size) * 1024
7260 vm_details = self.get_vapp_details_rest(vapp_uuid)
7261
7262 if vm_details and "vm_virtual_hardware" in vm_details:
7263 self.logger.info(
7264 "Adding disk to VM: {} disk size:{}GB".format(
7265 vm_details["name"], disk_size
7266 )
7267 )
7268 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
7269 status = self.add_new_disk_rest(disk_href, disk_size_mb)
7270 except Exception as exp:
7271 msg = "Error occurred while creating new disk {}.".format(exp)
7272 self.rollback_newvm(vapp_uuid, msg)
7273
7274 if status:
7275 self.logger.info(
7276 "Added new disk to VM: {} disk size:{}GB".format(
7277 vm_details["name"], disk_size
7278 )
7279 )
7280 else:
7281 # If failed to add disk, delete VM
7282 msg = "add_new_disk: Failed to add new disk to {}".format(
7283 vm_details["name"]
7284 )
7285 self.rollback_newvm(vapp_uuid, msg)
7286
7287 def add_new_disk_rest(self, disk_href, disk_size_mb):
7288 """
7289 Retrives vApp Disks section & add new empty disk
7290
7291 Args:
7292 disk_href: Disk section href to addd disk
7293 disk_size_mb: Disk size in MB
7294
7295 Returns: Status of add new disk task
7296 """
7297 status = False
7298 if self.client._session:
7299 headers = {
7300 "Accept": "application/*+xml;version=" + API_VERSION,
7301 "x-vcloud-authorization": self.client._session.headers[
7302 "x-vcloud-authorization"
7303 ],
7304 }
7305 response = self.perform_request(
7306 req_type="GET", url=disk_href, headers=headers
7307 )
7308
7309 if response.status_code == 403:
7310 response = self.retry_rest("GET", disk_href)
7311
7312 if response.status_code != requests.codes.ok:
7313 self.logger.error(
7314 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
7315 disk_href, response.status_code
7316 )
7317 )
7318
7319 return status
7320
7321 try:
7322 # Find but type & max of instance IDs assigned to disks
7323 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
7324 namespaces = {
7325 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
7326 }
7327 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
7328 instance_id = 0
7329
7330 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
7331 if item.find("rasd:Description", namespaces).text == "Hard disk":
7332 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
7333
7334 if inst_id > instance_id:
7335 instance_id = inst_id
7336 disk_item = item.find("rasd:HostResource", namespaces)
7337 bus_subtype = disk_item.attrib[
7338 "{" + namespaces["xmlns"] + "}busSubType"
7339 ]
7340 bus_type = disk_item.attrib[
7341 "{" + namespaces["xmlns"] + "}busType"
7342 ]
7343
7344 instance_id = instance_id + 1
7345 new_item = """<Item>
7346 <rasd:Description>Hard disk</rasd:Description>
7347 <rasd:ElementName>New disk</rasd:ElementName>
7348 <rasd:HostResource
7349 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
7350 vcloud:capacity="{}"
7351 vcloud:busSubType="{}"
7352 vcloud:busType="{}"></rasd:HostResource>
7353 <rasd:InstanceID>{}</rasd:InstanceID>
7354 <rasd:ResourceType>17</rasd:ResourceType>
7355 </Item>""".format(
7356 disk_size_mb, bus_subtype, bus_type, instance_id
7357 )
7358
7359 new_data = response.text
7360 # Add new item at the bottom
7361 new_data = new_data.replace(
7362 "</Item>\n</RasdItemsList>",
7363 "</Item>\n{}\n</RasdItemsList>".format(new_item),
7364 )
7365
7366 # Send PUT request to modify virtual hardware section with new disk
7367 headers[
7368 "Content-Type"
7369 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
7370
7371 response = self.perform_request(
7372 req_type="PUT", url=disk_href, data=new_data, headers=headers
7373 )
7374
7375 if response.status_code == 403:
7376 add_headers = {"Content-Type": headers["Content-Type"]}
7377 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
7378
7379 if response.status_code != 202:
7380 self.logger.error(
7381 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
7382 disk_href, response.status_code, response.text
7383 )
7384 )
7385 else:
7386 add_disk_task = self.get_task_from_response(response.text)
7387 result = self.client.get_task_monitor().wait_for_success(
7388 task=add_disk_task
7389 )
7390
7391 if result.get("status") == "success":
7392 status = True
7393 else:
7394 self.logger.error(
7395 "Add new disk REST task failed to add {} MB disk".format(
7396 disk_size_mb
7397 )
7398 )
7399 except Exception as exp:
7400 self.logger.error(
7401 "Error occurred calling rest api for creating new disk {}".format(exp)
7402 )
7403
7404 return status
7405
7406 def add_existing_disk(
7407 self,
7408 catalogs=None,
7409 image_id=None,
7410 size=None,
7411 template_name=None,
7412 vapp_uuid=None,
7413 ):
7414 """
7415 Method to add existing disk to vm
7416 Args :
7417 catalogs - List of VDC catalogs
7418 image_id - Catalog ID
7419 template_name - Name of template in catalog
7420 vapp_uuid - UUID of vApp
7421 Returns:
7422 None
7423 """
7424 disk_info = None
7425 vcenter_conect, content = self.get_vcenter_content()
7426 # find moref-id of vm in image
7427 catalog_vm_info = self.get_vapp_template_details(
7428 catalogs=catalogs,
7429 image_id=image_id,
7430 )
7431
7432 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
7433 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
7434 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
7435 "vm_moref_id", None
7436 )
7437
7438 if catalog_vm_moref_id:
7439 self.logger.info(
7440 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
7441 )
7442 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
7443
7444 if catalog_vm_obj:
7445 # find existing disk
7446 disk_info = self.find_disk(catalog_vm_obj)
7447 else:
7448 exp_msg = "No VM with image id {} found".format(image_id)
7449 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7450 else:
7451 exp_msg = "No Image found with image ID {} ".format(image_id)
7452 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7453
7454 if disk_info:
7455 self.logger.info("Existing disk_info : {}".format(disk_info))
7456 # get VM
7457 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7458 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7459
7460 if vm_obj:
7461 status = self.add_disk(
7462 vcenter_conect=vcenter_conect,
7463 vm=vm_obj,
7464 disk_info=disk_info,
7465 size=size,
7466 vapp_uuid=vapp_uuid,
7467 )
7468
7469 if status:
7470 self.logger.info(
7471 "Disk from image id {} added to {}".format(
7472 image_id, vm_obj.config.name
7473 )
7474 )
7475 else:
7476 msg = "No disk found with image id {} to add in VM {}".format(
7477 image_id, vm_obj.config.name
7478 )
7479 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
7480
7481 def find_disk(self, vm_obj):
7482 """
7483 Method to find details of existing disk in VM
7484 Args:
7485 vm_obj - vCenter object of VM
7486 Returns:
7487 disk_info : dict of disk details
7488 """
7489 disk_info = {}
7490 if vm_obj:
7491 try:
7492 devices = vm_obj.config.hardware.device
7493
7494 for device in devices:
7495 if type(device) is vim.vm.device.VirtualDisk:
7496 if isinstance(
7497 device.backing,
7498 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
7499 ) and hasattr(device.backing, "fileName"):
7500 disk_info["full_path"] = device.backing.fileName
7501 disk_info["datastore"] = device.backing.datastore
7502 disk_info["capacityKB"] = device.capacityInKB
7503 break
7504 except Exception as exp:
7505 self.logger.error(
7506 "find_disk() : exception occurred while "
7507 "getting existing disk details :{}".format(exp)
7508 )
7509
7510 return disk_info
7511
7512 def add_disk(
7513 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
7514 ):
7515 """
7516 Method to add existing disk in VM
7517 Args :
7518 vcenter_conect - vCenter content object
7519 vm - vCenter vm object
7520 disk_info : dict of disk details
7521 Returns:
7522 status : status of add disk task
7523 """
7524 datastore = disk_info["datastore"] if "datastore" in disk_info else None
7525 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
7526 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
7527 if size is not None:
7528 # Convert size from GB to KB
7529 sizeKB = int(size) * 1024 * 1024
7530 # compare size of existing disk and user given size.Assign whicherver is greater
7531 self.logger.info(
7532 "Add Existing disk : sizeKB {} , capacityKB {}".format(
7533 sizeKB, capacityKB
7534 )
7535 )
7536
7537 if sizeKB > capacityKB:
7538 capacityKB = sizeKB
7539
7540 if datastore and fullpath and capacityKB:
7541 try:
7542 spec = vim.vm.ConfigSpec()
7543 # get all disks on a VM, set unit_number to the next available
7544 unit_number = 0
7545 for dev in vm.config.hardware.device:
7546 if hasattr(dev.backing, "fileName"):
7547 unit_number = int(dev.unitNumber) + 1
7548 # unit_number 7 reserved for scsi controller
7549
7550 if unit_number == 7:
7551 unit_number += 1
7552
7553 if isinstance(dev, vim.vm.device.VirtualDisk):
7554 # vim.vm.device.VirtualSCSIController
7555 controller_key = dev.controllerKey
7556
7557 self.logger.info(
7558 "Add Existing disk : unit number {} , controller key {}".format(
7559 unit_number, controller_key
7560 )
7561 )
7562 # add disk here
7563 dev_changes = []
7564 disk_spec = vim.vm.device.VirtualDeviceSpec()
7565 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7566 disk_spec.device = vim.vm.device.VirtualDisk()
7567 disk_spec.device.backing = (
7568 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
7569 )
7570 disk_spec.device.backing.thinProvisioned = True
7571 disk_spec.device.backing.diskMode = "persistent"
7572 disk_spec.device.backing.datastore = datastore
7573 disk_spec.device.backing.fileName = fullpath
7574
7575 disk_spec.device.unitNumber = unit_number
7576 disk_spec.device.capacityInKB = capacityKB
7577 disk_spec.device.controllerKey = controller_key
7578 dev_changes.append(disk_spec)
7579 spec.deviceChange = dev_changes
7580 task = vm.ReconfigVM_Task(spec=spec)
7581 status = self.wait_for_vcenter_task(task, vcenter_conect)
7582
7583 return status
7584 except Exception as exp:
7585 exp_msg = (
7586 "add_disk() : exception {} occurred while adding disk "
7587 "{} to vm {}".format(exp, fullpath, vm.config.name)
7588 )
7589 self.rollback_newvm(vapp_uuid, exp_msg)
7590 else:
7591 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
7592 disk_info
7593 )
7594 self.rollback_newvm(vapp_uuid, msg)
7595
7596 def get_vcenter_content(self):
7597 """
7598 Get the vsphere content object
7599 """
7600 try:
7601 vm_vcenter_info = self.get_vm_vcenter_info()
7602 except Exception as exp:
7603 self.logger.error(
7604 "Error occurred while getting vCenter infromationn"
7605 " for VM : {}".format(exp)
7606 )
7607
7608 raise vimconn.VimConnException(message=exp)
7609
7610 context = None
7611 if hasattr(ssl, "_create_unverified_context"):
7612 context = ssl._create_unverified_context()
7613
7614 vcenter_conect = SmartConnect(
7615 host=vm_vcenter_info["vm_vcenter_ip"],
7616 user=vm_vcenter_info["vm_vcenter_user"],
7617 pwd=vm_vcenter_info["vm_vcenter_password"],
7618 port=int(vm_vcenter_info["vm_vcenter_port"]),
7619 sslContext=context,
7620 )
7621 atexit.register(Disconnect, vcenter_conect)
7622 content = vcenter_conect.RetrieveContent()
7623
7624 return vcenter_conect, content
7625
7626 def get_vm_moref_id(self, vapp_uuid):
7627 """
7628 Get the moref_id of given VM
7629 """
7630 try:
7631 if vapp_uuid:
7632 vm_details = self.get_vapp_details_rest(
7633 vapp_uuid, need_admin_access=True
7634 )
7635
7636 if vm_details and "vm_vcenter_info" in vm_details:
7637 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
7638
7639 return vm_moref_id
7640 except Exception as exp:
7641 self.logger.error(
7642 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
7643 )
7644
7645 return None
7646
7647 def get_vapp_template_details(
7648 self, catalogs=None, image_id=None, template_name=None
7649 ):
7650 """
7651 Method to get vApp template details
7652 Args :
7653 catalogs - list of VDC catalogs
7654 image_id - Catalog ID to find
7655 template_name : template name in catalog
7656 Returns:
7657 parsed_respond : dict of vApp tempalte details
7658 """
7659 parsed_response = {}
7660
7661 vca = self.connect_as_admin()
7662 if not vca:
7663 raise vimconn.VimConnConnectionException("Failed to connect vCD")
7664
7665 try:
7666 org, _ = self.get_vdc_details()
7667 catalog = self.get_catalog_obj(image_id, catalogs)
7668 if catalog:
7669 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
7670 catalog_items = [items.attrib]
7671
7672 if len(catalog_items) == 1:
7673 headers = {
7674 "Accept": "application/*+xml;version=" + API_VERSION,
7675 "x-vcloud-authorization": vca._session.headers[
7676 "x-vcloud-authorization"
7677 ],
7678 }
7679 response = self.perform_request(
7680 req_type="GET",
7681 url=catalog_items[0].get("href"),
7682 headers=headers,
7683 )
7684 catalogItem = XmlElementTree.fromstring(response.text)
7685 entity = [
7686 child
7687 for child in catalogItem
7688 if child.get("type")
7689 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
7690 ][0]
7691 vapp_tempalte_href = entity.get("href")
7692 # get vapp details and parse moref id
7693
7694 namespaces = {
7695 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7696 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7697 "vmw": "http://www.vmware.com/schema/ovf",
7698 "vm": "http://www.vmware.com/vcloud/v1.5",
7699 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7700 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7701 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7702 }
7703
7704 if vca._session:
7705 response = self.perform_request(
7706 req_type="GET", url=vapp_tempalte_href, headers=headers
7707 )
7708
7709 if response.status_code != requests.codes.ok:
7710 self.logger.debug(
7711 "REST API call {} failed. Return status code {}".format(
7712 vapp_tempalte_href, response.status_code
7713 )
7714 )
7715 else:
7716 xmlroot_respond = XmlElementTree.fromstring(response.text)
7717 children_section = xmlroot_respond.find(
7718 "vm:Children/", namespaces
7719 )
7720
7721 if children_section is not None:
7722 vCloud_extension_section = children_section.find(
7723 "xmlns:VCloudExtension", namespaces
7724 )
7725
7726 if vCloud_extension_section is not None:
7727 vm_vcenter_info = {}
7728 vim_info = vCloud_extension_section.find(
7729 "vmext:VmVimInfo", namespaces
7730 )
7731 vmext = vim_info.find(
7732 "vmext:VmVimObjectRef", namespaces
7733 )
7734
7735 if vmext is not None:
7736 vm_vcenter_info["vm_moref_id"] = vmext.find(
7737 "vmext:MoRef", namespaces
7738 ).text
7739
7740 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7741 except Exception as exp:
7742 self.logger.info(
7743 "Error occurred calling rest api for getting vApp details {}".format(
7744 exp
7745 )
7746 )
7747
7748 return parsed_response
7749
7750 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7751 """
7752 Method to delete vApp
7753 Args :
7754 vapp_uuid - vApp UUID
7755 msg - Error message to be logged
7756 exp_type : Exception type
7757 Returns:
7758 None
7759 """
7760 if vapp_uuid:
7761 self.delete_vminstance(vapp_uuid)
7762 else:
7763 msg = "No vApp ID"
7764
7765 self.logger.error(msg)
7766
7767 if exp_type == "Genric":
7768 raise vimconn.VimConnException(msg)
7769 elif exp_type == "NotFound":
7770 raise vimconn.VimConnNotFoundException(message=msg)
7771
7772 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
7773 """
7774 Method to attach SRIOV adapters to VM
7775
7776 Args:
7777 vapp_uuid - uuid of vApp/VM
7778 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
7779 vmname_andid - vmname
7780
7781 Returns:
7782 The status of add SRIOV adapter task , vm object and
7783 vcenter_conect object
7784 """
7785 vm_obj = None
7786 vcenter_conect, content = self.get_vcenter_content()
7787 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7788
7789 if vm_moref_id:
7790 try:
7791 no_of_sriov_devices = len(sriov_nets)
7792 if no_of_sriov_devices > 0:
7793 # Get VM and its host
7794 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
7795 self.logger.info(
7796 "VM {} is currently on host {}".format(vm_obj, host_obj)
7797 )
7798
7799 if host_obj and vm_obj:
7800 # get SRIOV devies from host on which vapp is currently installed
7801 avilable_sriov_devices = self.get_sriov_devices(
7802 host_obj,
7803 no_of_sriov_devices,
7804 )
7805
7806 if len(avilable_sriov_devices) == 0:
7807 # find other hosts with active pci devices
7808 (
7809 new_host_obj,
7810 avilable_sriov_devices,
7811 ) = self.get_host_and_sriov_devices(
7812 content,
7813 no_of_sriov_devices,
7814 )
7815
7816 if (
7817 new_host_obj is not None
7818 and len(avilable_sriov_devices) > 0
7819 ):
7820 # Migrate vm to the host where SRIOV devices are available
7821 self.logger.info(
7822 "Relocate VM {} on new host {}".format(
7823 vm_obj, new_host_obj
7824 )
7825 )
7826 task = self.relocate_vm(new_host_obj, vm_obj)
7827
7828 if task is not None:
7829 result = self.wait_for_vcenter_task(
7830 task, vcenter_conect
7831 )
7832 self.logger.info(
7833 "Migrate VM status: {}".format(result)
7834 )
7835 host_obj = new_host_obj
7836 else:
7837 self.logger.info(
7838 "Fail to migrate VM : {}".format(result)
7839 )
7840
7841 raise vimconn.VimConnNotFoundException(
7842 "Fail to migrate VM : {} to host {}".format(
7843 vmname_andid, new_host_obj
7844 )
7845 )
7846
7847 if (
7848 host_obj is not None
7849 and avilable_sriov_devices is not None
7850 and len(avilable_sriov_devices) > 0
7851 ):
7852 # Add SRIOV devices one by one
7853 for sriov_net in sriov_nets:
7854 network_name = sriov_net.get("net_id")
7855 self.create_dvPort_group(network_name)
7856
7857 if (
7858 sriov_net.get("type") == "VF"
7859 or sriov_net.get("type") == "SR-IOV"
7860 ):
7861 # add vlan ID ,Modify portgroup for vlan ID
7862 self.configure_vlanID(
7863 content, vcenter_conect, network_name
7864 )
7865
7866 task = self.add_sriov_to_vm(
7867 content,
7868 vm_obj,
7869 host_obj,
7870 network_name,
7871 avilable_sriov_devices[0],
7872 )
7873
7874 if task:
7875 status = self.wait_for_vcenter_task(
7876 task, vcenter_conect
7877 )
7878
7879 if status:
7880 self.logger.info(
7881 "Added SRIOV {} to VM {}".format(
7882 no_of_sriov_devices, str(vm_obj)
7883 )
7884 )
7885 else:
7886 self.logger.error(
7887 "Fail to add SRIOV {} to VM {}".format(
7888 no_of_sriov_devices, str(vm_obj)
7889 )
7890 )
7891
7892 raise vimconn.VimConnUnexpectedResponse(
7893 "Fail to add SRIOV adapter in VM {}".format(
7894 str(vm_obj)
7895 )
7896 )
7897
7898 return True, vm_obj, vcenter_conect
7899 else:
7900 self.logger.error(
7901 "Currently there is no host with"
7902 " {} number of avaialble SRIOV "
7903 "VFs required for VM {}".format(
7904 no_of_sriov_devices, vmname_andid
7905 )
7906 )
7907
7908 raise vimconn.VimConnNotFoundException(
7909 "Currently there is no host with {} "
7910 "number of avaialble SRIOV devices required for VM {}".format(
7911 no_of_sriov_devices, vmname_andid
7912 )
7913 )
7914 else:
7915 self.logger.debug(
7916 "No infromation about SRIOV devices {} ", sriov_nets
7917 )
7918 except vmodl.MethodFault as error:
7919 self.logger.error("Error occurred while adding SRIOV {} ", error)
7920
7921 return None, vm_obj, vcenter_conect
7922
7923 def get_sriov_devices(self, host, no_of_vfs):
7924 """
7925 Method to get the details of SRIOV devices on given host
7926 Args:
7927 host - vSphere host object
7928 no_of_vfs - number of VFs needed on host
7929
7930 Returns:
7931 array of SRIOV devices
7932 """
7933 sriovInfo = []
7934
7935 if host:
7936 for device in host.config.pciPassthruInfo:
7937 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7938 if device.numVirtualFunction >= no_of_vfs:
7939 sriovInfo.append(device)
7940 break
7941
7942 return sriovInfo
7943
7944 def get_host_and_sriov_devices(self, content, no_of_vfs):
7945 """
7946 Method to get the details of SRIOV devices infromation on all hosts
7947
7948 Args:
7949 content - vSphere host object
7950 no_of_vfs - number of pci VFs needed on host
7951
7952 Returns:
7953 array of SRIOV devices and host object
7954 """
7955 host_obj = None
7956 sriov_device_objs = None
7957
7958 try:
7959 if content:
7960 container = content.viewManager.CreateContainerView(
7961 content.rootFolder, [vim.HostSystem], True
7962 )
7963
7964 for host in container.view:
7965 devices = self.get_sriov_devices(host, no_of_vfs)
7966
7967 if devices:
7968 host_obj = host
7969 sriov_device_objs = devices
7970 break
7971 except Exception as exp:
7972 self.logger.error(
7973 "Error {} occurred while finding SRIOV devices on host: {}".format(
7974 exp, host_obj
7975 )
7976 )
7977
7978 return host_obj, sriov_device_objs
7979
7980 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
7981 """
7982 Method to add SRIOV adapter to vm
7983
7984 Args:
7985 host_obj - vSphere host object
7986 vm_obj - vSphere vm object
7987 content - vCenter content object
7988 network_name - name of distributed virtaul portgroup
7989 sriov_device - SRIOV device info
7990
7991 Returns:
7992 task object
7993 """
7994 devices = []
7995 vnic_label = "sriov nic"
7996
7997 try:
7998 dvs_portgr = self.get_dvport_group(network_name)
7999 network_name = dvs_portgr.name
8000 nic = vim.vm.device.VirtualDeviceSpec()
8001 # VM device
8002 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
8003 nic.device = vim.vm.device.VirtualSriovEthernetCard()
8004 nic.device.addressType = "assigned"
8005 # nic.device.key = 13016
8006 nic.device.deviceInfo = vim.Description()
8007 nic.device.deviceInfo.label = vnic_label
8008 nic.device.deviceInfo.summary = network_name
8009 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
8010
8011 nic.device.backing.network = self.get_obj(
8012 content, [vim.Network], network_name
8013 )
8014 nic.device.backing.deviceName = network_name
8015 nic.device.backing.useAutoDetect = False
8016 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
8017 nic.device.connectable.startConnected = True
8018 nic.device.connectable.allowGuestControl = True
8019
8020 nic.device.sriovBacking = (
8021 vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
8022 )
8023 nic.device.sriovBacking.physicalFunctionBacking = (
8024 vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
8025 )
8026 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
8027
8028 devices.append(nic)
8029 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
8030 task = vm_obj.ReconfigVM_Task(vmconf)
8031
8032 return task
8033 except Exception as exp:
8034 self.logger.error(
8035 "Error {} occurred while adding SRIOV adapter in VM: {}".format(
8036 exp, vm_obj
8037 )
8038 )
8039
8040 return None
8041
8042 def create_dvPort_group(self, network_name):
8043 """
8044 Method to create disributed virtual portgroup
8045
8046 Args:
8047 network_name - name of network/portgroup
8048
8049 Returns:
8050 portgroup key
8051 """
8052 try:
8053 new_network_name = [network_name, "-", str(uuid.uuid4())]
8054 network_name = "".join(new_network_name)
8055 vcenter_conect, content = self.get_vcenter_content()
8056
8057 dv_switch = self.get_obj(
8058 content, [vim.DistributedVirtualSwitch], self.dvs_name
8059 )
8060
8061 if dv_switch:
8062 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8063 dv_pg_spec.name = network_name
8064
8065 dv_pg_spec.type = (
8066 vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
8067 )
8068 dv_pg_spec.defaultPortConfig = (
8069 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8070 )
8071 dv_pg_spec.defaultPortConfig.securityPolicy = (
8072 vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
8073 )
8074 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
8075 vim.BoolPolicy(value=False)
8076 )
8077 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
8078 vim.BoolPolicy(value=False)
8079 )
8080 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
8081 value=False
8082 )
8083
8084 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
8085 self.wait_for_vcenter_task(task, vcenter_conect)
8086
8087 dvPort_group = self.get_obj(
8088 content, [vim.dvs.DistributedVirtualPortgroup], network_name
8089 )
8090
8091 if dvPort_group:
8092 self.logger.info(
8093 "Created disributed virtaul port group: {}".format(dvPort_group)
8094 )
8095 return dvPort_group.key
8096 else:
8097 self.logger.debug(
8098 "No disributed virtual switch found with name {}".format(
8099 network_name
8100 )
8101 )
8102
8103 except Exception as exp:
8104 self.logger.error(
8105 "Error occurred while creating disributed virtaul port group {}"
8106 " : {}".format(network_name, exp)
8107 )
8108
8109 return None
8110
8111 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
8112 """
8113 Method to reconfigure disributed virtual portgroup
8114
8115 Args:
8116 dvPort_group_name - name of disributed virtual portgroup
8117 content - vCenter content object
8118 config_info - disributed virtual portgroup configuration
8119
8120 Returns:
8121 task object
8122 """
8123 try:
8124 dvPort_group = self.get_dvport_group(dvPort_group_name)
8125
8126 if dvPort_group:
8127 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8128 dv_pg_spec.configVersion = dvPort_group.config.configVersion
8129 dv_pg_spec.defaultPortConfig = (
8130 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8131 )
8132
8133 if "vlanID" in config_info:
8134 dv_pg_spec.defaultPortConfig.vlan = (
8135 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
8136 )
8137 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
8138
8139 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
8140
8141 return task
8142 else:
8143 return None
8144 except Exception as exp:
8145 self.logger.error(
8146 "Error occurred while reconfiguraing disributed virtaul port group {}"
8147 " : {}".format(dvPort_group_name, exp)
8148 )
8149
8150 return None
8151
8152 def destroy_dvport_group(self, dvPort_group_name):
8153 """
8154 Method to destroy disributed virtual portgroup
8155
8156 Args:
8157 network_name - name of network/portgroup
8158
8159 Returns:
8160 True if portgroup successfully got deleted else false
8161 """
8162 vcenter_conect, _ = self.get_vcenter_content()
8163
8164 try:
8165 status = None
8166 dvPort_group = self.get_dvport_group(dvPort_group_name)
8167
8168 if dvPort_group:
8169 task = dvPort_group.Destroy_Task()
8170 status = self.wait_for_vcenter_task(task, vcenter_conect)
8171
8172 return status
8173 except vmodl.MethodFault as exp:
8174 self.logger.error(
8175 "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
8176 exp, dvPort_group_name
8177 )
8178 )
8179
8180 return None
8181
8182 def get_dvport_group(self, dvPort_group_name):
8183 """
8184 Method to get disributed virtual portgroup
8185
8186 Args:
8187 network_name - name of network/portgroup
8188
8189 Returns:
8190 portgroup object
8191 """
8192 _, content = self.get_vcenter_content()
8193 dvPort_group = None
8194
8195 try:
8196 container = content.viewManager.CreateContainerView(
8197 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
8198 )
8199
8200 for item in container.view:
8201 if item.key == dvPort_group_name:
8202 dvPort_group = item
8203 break
8204
8205 return dvPort_group
8206 except vmodl.MethodFault as exp:
8207 self.logger.error(
8208 "Caught vmodl fault {} for disributed virtual port group {}".format(
8209 exp, dvPort_group_name
8210 )
8211 )
8212
8213 return None
8214
8215 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
8216 """
8217 Method to get disributed virtual portgroup vlanID
8218
8219 Args:
8220 network_name - name of network/portgroup
8221
8222 Returns:
8223 vlan ID
8224 """
8225 vlanId = None
8226
8227 try:
8228 dvPort_group = self.get_dvport_group(dvPort_group_name)
8229
8230 if dvPort_group:
8231 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
8232 except vmodl.MethodFault as exp:
8233 self.logger.error(
8234 "Caught vmodl fault {} for disributed virtaul port group {}".format(
8235 exp, dvPort_group_name
8236 )
8237 )
8238
8239 return vlanId
8240
8241 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
8242 """
8243 Method to configure vlanID in disributed virtual portgroup vlanID
8244
8245 Args:
8246 network_name - name of network/portgroup
8247
8248 Returns:
8249 None
8250 """
8251 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
8252
8253 if vlanID == 0:
8254 # configure vlanID
8255 vlanID = self.genrate_vlanID(dvPort_group_name)
8256 config = {"vlanID": vlanID}
8257 task = self.reconfig_portgroup(
8258 content, dvPort_group_name, config_info=config
8259 )
8260
8261 if task:
8262 status = self.wait_for_vcenter_task(task, vcenter_conect)
8263
8264 if status:
8265 self.logger.info(
8266 "Reconfigured Port group {} for vlan ID {}".format(
8267 dvPort_group_name, vlanID
8268 )
8269 )
8270 else:
8271 self.logger.error(
8272 "Fail reconfigure portgroup {} for vlanID{}".format(
8273 dvPort_group_name, vlanID
8274 )
8275 )
8276
8277 def genrate_vlanID(self, network_name):
8278 """
8279 Method to get unused vlanID
8280 Args:
8281 network_name - name of network/portgroup
8282 Returns:
8283 vlanID
8284 """
8285 vlan_id = None
8286 used_ids = []
8287
8288 if self.config.get("vlanID_range") is None:
8289 raise vimconn.VimConnConflictException(
8290 "You must provide a 'vlanID_range' "
8291 "at config value before creating sriov network with vlan tag"
8292 )
8293
8294 if "used_vlanIDs" not in self.persistent_info:
8295 self.persistent_info["used_vlanIDs"] = {}
8296 else:
8297 used_ids = list(self.persistent_info["used_vlanIDs"].values())
8298
8299 for vlanID_range in self.config.get("vlanID_range"):
8300 start_vlanid, end_vlanid = vlanID_range.split("-")
8301
8302 if start_vlanid > end_vlanid:
8303 raise vimconn.VimConnConflictException(
8304 "Invalid vlan ID range {}".format(vlanID_range)
8305 )
8306
8307 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
8308 if vid not in used_ids:
8309 vlan_id = vid
8310 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
8311 return vlan_id
8312
8313 if vlan_id is None:
8314 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
8315
8316 def get_obj(self, content, vimtype, name):
8317 """
8318 Get the vsphere object associated with a given text name
8319 """
8320 obj = None
8321 container = content.viewManager.CreateContainerView(
8322 content.rootFolder, vimtype, True
8323 )
8324
8325 for item in container.view:
8326 if item.name == name:
8327 obj = item
8328 break
8329
8330 return obj
8331
8332 def insert_media_to_vm(self, vapp, image_id):
8333 """
8334 Method to insert media CD-ROM (ISO image) from catalog to vm.
8335 vapp - vapp object to get vm id
8336 Image_id - image id for cdrom to be inerted to vm
8337 """
8338 # create connection object
8339 vca = self.connect()
8340 try:
8341 # fetching catalog details
8342 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
8343
8344 if vca._session:
8345 headers = {
8346 "Accept": "application/*+xml;version=" + API_VERSION,
8347 "x-vcloud-authorization": vca._session.headers[
8348 "x-vcloud-authorization"
8349 ],
8350 }
8351 response = self.perform_request(
8352 req_type="GET", url=rest_url, headers=headers
8353 )
8354
8355 if response.status_code != 200:
8356 self.logger.error(
8357 "REST call {} failed reason : {}"
8358 "status code : {}".format(
8359 rest_url, response.text, response.status_code
8360 )
8361 )
8362
8363 raise vimconn.VimConnException(
8364 "insert_media_to_vm(): Failed to get " "catalog details"
8365 )
8366
8367 # searching iso name and id
8368 iso_name, media_id = self.get_media_details(vca, response.text)
8369
8370 if iso_name and media_id:
8371 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
8372 <ns6:MediaInsertOrEjectParams
8373 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
8374 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
8375 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
8376 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
8377 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
8378 xmlns:ns7="http://www.vmware.com/schema/ovf"
8379 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
8380 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
8381 <ns6:Media
8382 type="application/vnd.vmware.vcloud.media+xml"
8383 name="{}"
8384 id="urn:vcloud:media:{}"
8385 href="https://{}/api/media/{}"/>
8386 </ns6:MediaInsertOrEjectParams>""".format(
8387 iso_name, media_id, self.url, media_id
8388 )
8389
8390 for vms in vapp.get_all_vms():
8391 vm_id = vms.get("id").split(":")[-1]
8392
8393 headers[
8394 "Content-Type"
8395 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
8396 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
8397 self.url, vm_id
8398 )
8399
8400 response = self.perform_request(
8401 req_type="POST", url=rest_url, data=data, headers=headers
8402 )
8403
8404 if response.status_code != 202:
8405 error_msg = (
8406 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
8407 "Status code {}".format(response.text, response.status_code)
8408 )
8409 self.logger.error(error_msg)
8410
8411 raise vimconn.VimConnException(error_msg)
8412 else:
8413 task = self.get_task_from_response(response.text)
8414 result = self.client.get_task_monitor().wait_for_success(
8415 task=task
8416 )
8417
8418 if result.get("status") == "success":
8419 self.logger.info(
8420 "insert_media_to_vm(): Sucessfully inserted media ISO"
8421 " image to vm {}".format(vm_id)
8422 )
8423 except Exception as exp:
8424 self.logger.error(
8425 "insert_media_to_vm() : exception occurred "
8426 "while inserting media CD-ROM"
8427 )
8428
8429 raise vimconn.VimConnException(message=exp)
8430
8431 def get_media_details(self, vca, content):
8432 """
8433 Method to get catalog item details
8434 vca - connection object
8435 content - Catalog details
8436 Return - Media name, media id
8437 """
8438 cataloghref_list = []
8439 try:
8440 if content:
8441 vm_list_xmlroot = XmlElementTree.fromstring(content)
8442
8443 for child in vm_list_xmlroot.iter():
8444 if "CatalogItem" in child.tag:
8445 cataloghref_list.append(child.attrib.get("href"))
8446
8447 if cataloghref_list is not None:
8448 for href in cataloghref_list:
8449 if href:
8450 headers = {
8451 "Accept": "application/*+xml;version=" + API_VERSION,
8452 "x-vcloud-authorization": vca._session.headers[
8453 "x-vcloud-authorization"
8454 ],
8455 }
8456 response = self.perform_request(
8457 req_type="GET", url=href, headers=headers
8458 )
8459
8460 if response.status_code != 200:
8461 self.logger.error(
8462 "REST call {} failed reason : {}"
8463 "status code : {}".format(
8464 href, response.text, response.status_code
8465 )
8466 )
8467
8468 raise vimconn.VimConnException(
8469 "get_media_details : Failed to get "
8470 "catalogitem details"
8471 )
8472
8473 list_xmlroot = XmlElementTree.fromstring(response.text)
8474
8475 for child in list_xmlroot.iter():
8476 if "Entity" in child.tag:
8477 if "media" in child.attrib.get("href"):
8478 name = child.attrib.get("name")
8479 media_id = (
8480 child.attrib.get("href").split("/").pop()
8481 )
8482
8483 return name, media_id
8484 else:
8485 self.logger.debug("Media name and id not found")
8486
8487 return False, False
8488 except Exception as exp:
8489 self.logger.error(
8490 "get_media_details : exception occurred " "getting media details"
8491 )
8492
8493 raise vimconn.VimConnException(message=exp)
8494
8495 def retry_rest(self, method, url, add_headers=None, data=None):
8496 """Method to get Token & retry respective REST request
8497 Args:
8498 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
8499 url - request url to be used
8500 add_headers - Additional headers (optional)
8501 data - Request payload data to be passed in request
8502 Returns:
8503 response - Response of request
8504 """
8505 response = None
8506
8507 # Get token
8508 self.get_token()
8509
8510 if self.client._session:
8511 headers = {
8512 "Accept": "application/*+xml;version=" + API_VERSION,
8513 "x-vcloud-authorization": self.client._session.headers[
8514 "x-vcloud-authorization"
8515 ],
8516 }
8517
8518 if add_headers:
8519 headers.update(add_headers)
8520
8521 if method == "GET":
8522 response = self.perform_request(req_type="GET", url=url, headers=headers)
8523 elif method == "PUT":
8524 response = self.perform_request(
8525 req_type="PUT", url=url, headers=headers, data=data
8526 )
8527 elif method == "POST":
8528 response = self.perform_request(
8529 req_type="POST", url=url, headers=headers, data=data
8530 )
8531 elif method == "DELETE":
8532 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
8533
8534 return response
8535
8536 def get_token(self):
8537 """Generate a new token if expired
8538
8539 Returns:
8540 The return client object that letter can be used to connect to vCloud director as admin for VDC
8541 """
8542 self.client = self.connect()
8543
8544 def get_vdc_details(self):
8545 """Get VDC details using pyVcloud Lib
8546
8547 Returns org and vdc object
8548 """
8549 vdc = None
8550
8551 try:
8552 org = Org(self.client, resource=self.client.get_org())
8553 vdc = org.get_vdc(self.tenant_name)
8554 except Exception as e:
8555 # pyvcloud not giving a specific exception, Refresh nevertheless
8556 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
8557
8558 # Retry once, if failed by refreshing token
8559 if vdc is None:
8560 self.get_token()
8561 org = Org(self.client, resource=self.client.get_org())
8562 vdc = org.get_vdc(self.tenant_name)
8563
8564 return org, vdc
8565
8566 def perform_request(self, req_type, url, headers=None, data=None):
8567 """Perform the POST/PUT/GET/DELETE request."""
8568 # Log REST request details
8569 self.log_request(req_type, url=url, headers=headers, data=data)
8570 # perform request and return its result
8571
8572 if req_type == "GET":
8573 response = requests.get(url=url, headers=headers, verify=False)
8574 elif req_type == "PUT":
8575 response = requests.put(url=url, headers=headers, data=data, verify=False)
8576 elif req_type == "POST":
8577 response = requests.post(url=url, headers=headers, data=data, verify=False)
8578 elif req_type == "DELETE":
8579 response = requests.delete(url=url, headers=headers, verify=False)
8580
8581 # Log the REST response
8582 self.log_response(response)
8583
8584 return response
8585
8586 def log_request(self, req_type, url=None, headers=None, data=None):
8587 """Logs REST request details"""
8588
8589 if req_type is not None:
8590 self.logger.debug("Request type: {}".format(req_type))
8591
8592 if url is not None:
8593 self.logger.debug("Request url: {}".format(url))
8594
8595 if headers is not None:
8596 for header in headers:
8597 self.logger.debug(
8598 "Request header: {}: {}".format(header, headers[header])
8599 )
8600
8601 if data is not None:
8602 self.logger.debug("Request data: {}".format(data))
8603
8604 def log_response(self, response):
8605 """Logs REST response details"""
8606
8607 self.logger.debug("Response status code: {} ".format(response.status_code))
8608
8609 def get_task_from_response(self, content):
8610 """
8611 content - API response.text(response.text)
8612 return task object
8613 """
8614 xmlroot = XmlElementTree.fromstring(content)
8615
8616 if xmlroot.tag.split("}")[1] == "Task":
8617 return xmlroot
8618 else:
8619 for ele in xmlroot:
8620 if ele.tag.split("}")[1] == "Tasks":
8621 task = ele[0]
8622 break
8623
8624 return task
8625
8626 def power_on_vapp(self, vapp_id, vapp_name):
8627 """
8628 vapp_id - vApp uuid
8629 vapp_name - vAapp name
8630 return - Task object
8631 """
8632 headers = {
8633 "Accept": "application/*+xml;version=" + API_VERSION,
8634 "x-vcloud-authorization": self.client._session.headers[
8635 "x-vcloud-authorization"
8636 ],
8637 }
8638
8639 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
8640 self.url, vapp_id
8641 )
8642 response = self.perform_request(
8643 req_type="POST", url=poweron_href, headers=headers
8644 )
8645
8646 if response.status_code != 202:
8647 self.logger.error(
8648 "REST call {} failed reason : {}"
8649 "status code : {} ".format(
8650 poweron_href, response.text, response.status_code
8651 )
8652 )
8653
8654 raise vimconn.VimConnException(
8655 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
8656 )
8657 else:
8658 poweron_task = self.get_task_from_response(response.text)
8659
8660 return poweron_task