Feature 10906: Support for Anti-Affinity groups
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_vcd_network_list(self):
697 """Method available organization for a logged in tenant
698
699 Returns:
700 The return vca object that letter can be used to connect to vcloud direct as admin
701 """
702
703 self.logger.debug(
704 "get_vcd_network_list(): retrieving network list for vcd {}".format(
705 self.tenant_name
706 )
707 )
708
709 if not self.tenant_name:
710 raise vimconn.VimConnConnectionException("Tenant name is empty.")
711
712 _, vdc = self.get_vdc_details()
713 if vdc is None:
714 raise vimconn.VimConnConnectionException(
715 "Can't retrieve information for a VDC {}".format(self.tenant_name)
716 )
717
718 vdc_uuid = vdc.get("id").split(":")[3]
719 if self.client._session:
720 headers = {
721 "Accept": "application/*+xml;version=" + API_VERSION,
722 "x-vcloud-authorization": self.client._session.headers[
723 "x-vcloud-authorization"
724 ],
725 }
726 response = self.perform_request(
727 req_type="GET", url=vdc.get("href"), headers=headers
728 )
729
730 if response.status_code != 200:
731 self.logger.error("Failed to get vdc content")
732 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
733 else:
734 content = XmlElementTree.fromstring(response.text)
735
736 network_list = []
737 try:
738 for item in content:
739 if item.tag.split("}")[-1] == "AvailableNetworks":
740 for net in item:
741 response = self.perform_request(
742 req_type="GET", url=net.get("href"), headers=headers
743 )
744
745 if response.status_code != 200:
746 self.logger.error("Failed to get network content")
747 raise vimconn.VimConnNotFoundException(
748 "Failed to get network content"
749 )
750 else:
751 net_details = XmlElementTree.fromstring(response.text)
752
753 filter_dict = {}
754 net_uuid = net_details.get("id").split(":")
755
756 if len(net_uuid) != 4:
757 continue
758 else:
759 net_uuid = net_uuid[3]
760 # create dict entry
761 self.logger.debug(
762 "get_vcd_network_list(): Adding network {} "
763 "to a list vcd id {} network {}".format(
764 net_uuid, vdc_uuid, net_details.get("name")
765 )
766 )
767 filter_dict["name"] = net_details.get("name")
768 filter_dict["id"] = net_uuid
769
770 if [
771 i.text
772 for i in net_details
773 if i.tag.split("}")[-1] == "IsShared"
774 ][0] == "true":
775 shared = True
776 else:
777 shared = False
778
779 filter_dict["shared"] = shared
780 filter_dict["tenant_id"] = vdc_uuid
781
782 if int(net_details.get("status")) == 1:
783 filter_dict["admin_state_up"] = True
784 else:
785 filter_dict["admin_state_up"] = False
786
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 network_list.append(filter_dict)
790 self.logger.debug(
791 "get_vcd_network_list adding entry {}".format(
792 filter_dict
793 )
794 )
795 except Exception:
796 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
797 pass
798
799 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
800
801 return network_list
802
803 def get_network_list(self, filter_dict={}):
804 """Obtain tenant networks of VIM
805 Filter_dict can be:
806 name: network name OR/AND
807 id: network uuid OR/AND
808 shared: boolean OR/AND
809 tenant_id: tenant OR/AND
810 admin_state_up: boolean
811 status: 'ACTIVE'
812
813 [{key : value , key : value}]
814
815 Returns the network list of dictionaries:
816 [{<the fields at Filter_dict plus some VIM specific>}, ...]
817 List can be empty
818 """
819
820 self.logger.debug(
821 "get_network_list(): retrieving network list for vcd {}".format(
822 self.tenant_name
823 )
824 )
825
826 if not self.tenant_name:
827 raise vimconn.VimConnConnectionException("Tenant name is empty.")
828
829 _, vdc = self.get_vdc_details()
830 if vdc is None:
831 raise vimconn.VimConnConnectionException(
832 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
833 )
834
835 try:
836 vdcid = vdc.get("id").split(":")[3]
837
838 if self.client._session:
839 headers = {
840 "Accept": "application/*+xml;version=" + API_VERSION,
841 "x-vcloud-authorization": self.client._session.headers[
842 "x-vcloud-authorization"
843 ],
844 }
845 response = self.perform_request(
846 req_type="GET", url=vdc.get("href"), headers=headers
847 )
848
849 if response.status_code != 200:
850 self.logger.error("Failed to get vdc content")
851 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
852 else:
853 content = XmlElementTree.fromstring(response.text)
854
855 network_list = []
856 for item in content:
857 if item.tag.split("}")[-1] == "AvailableNetworks":
858 for net in item:
859 response = self.perform_request(
860 req_type="GET", url=net.get("href"), headers=headers
861 )
862
863 if response.status_code != 200:
864 self.logger.error("Failed to get network content")
865 raise vimconn.VimConnNotFoundException(
866 "Failed to get network content"
867 )
868 else:
869 net_details = XmlElementTree.fromstring(response.text)
870
871 filter_entry = {}
872 net_uuid = net_details.get("id").split(":")
873
874 if len(net_uuid) != 4:
875 continue
876 else:
877 net_uuid = net_uuid[3]
878 # create dict entry
879 self.logger.debug(
880 "get_network_list(): Adding net {}"
881 " to a list vcd id {} network {}".format(
882 net_uuid, vdcid, net_details.get("name")
883 )
884 )
885 filter_entry["name"] = net_details.get("name")
886 filter_entry["id"] = net_uuid
887
888 if [
889 i.text
890 for i in net_details
891 if i.tag.split("}")[-1] == "IsShared"
892 ][0] == "true":
893 shared = True
894 else:
895 shared = False
896
897 filter_entry["shared"] = shared
898 filter_entry["tenant_id"] = vdcid
899
900 if int(net_details.get("status")) == 1:
901 filter_entry["admin_state_up"] = True
902 else:
903 filter_entry["admin_state_up"] = False
904
905 filter_entry["status"] = "ACTIVE"
906 filter_entry["type"] = "bridge"
907 filtered_entry = filter_entry.copy()
908
909 if filter_dict is not None and filter_dict:
910 # we remove all the key : value we don't care and match only
911 # respected field
912 filtered_dict = set(filter_entry.keys()) - set(
913 filter_dict
914 )
915
916 for unwanted_key in filtered_dict:
917 del filter_entry[unwanted_key]
918
919 if filter_dict == filter_entry:
920 network_list.append(filtered_entry)
921 else:
922 network_list.append(filtered_entry)
923 except Exception as e:
924 self.logger.debug("Error in get_network_list", exc_info=True)
925
926 if isinstance(e, vimconn.VimConnException):
927 raise
928 else:
929 raise vimconn.VimConnNotFoundException(
930 "Failed : Networks list not found {} ".format(e)
931 )
932
933 self.logger.debug("Returning {}".format(network_list))
934
935 return network_list
936
937 def get_network(self, net_id):
938 """Method obtains network details of net_id VIM network
939 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
940 try:
941 _, vdc = self.get_vdc_details()
942 vdc_id = vdc.get("id").split(":")[3]
943
944 if self.client._session:
945 headers = {
946 "Accept": "application/*+xml;version=" + API_VERSION,
947 "x-vcloud-authorization": self.client._session.headers[
948 "x-vcloud-authorization"
949 ],
950 }
951 response = self.perform_request(
952 req_type="GET", url=vdc.get("href"), headers=headers
953 )
954
955 if response.status_code != 200:
956 self.logger.error("Failed to get vdc content")
957 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
958 else:
959 content = XmlElementTree.fromstring(response.text)
960
961 filter_dict = {}
962
963 for item in content:
964 if item.tag.split("}")[-1] == "AvailableNetworks":
965 for net in item:
966 response = self.perform_request(
967 req_type="GET", url=net.get("href"), headers=headers
968 )
969
970 if response.status_code != 200:
971 self.logger.error("Failed to get network content")
972 raise vimconn.VimConnNotFoundException(
973 "Failed to get network content"
974 )
975 else:
976 net_details = XmlElementTree.fromstring(response.text)
977
978 vdc_network_id = net_details.get("id").split(":")
979 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
980 filter_dict["name"] = net_details.get("name")
981 filter_dict["id"] = vdc_network_id[3]
982
983 if [
984 i.text
985 for i in net_details
986 if i.tag.split("}")[-1] == "IsShared"
987 ][0] == "true":
988 shared = True
989 else:
990 shared = False
991
992 filter_dict["shared"] = shared
993 filter_dict["tenant_id"] = vdc_id
994
995 if int(net_details.get("status")) == 1:
996 filter_dict["admin_state_up"] = True
997 else:
998 filter_dict["admin_state_up"] = False
999
1000 filter_dict["status"] = "ACTIVE"
1001 filter_dict["type"] = "bridge"
1002 self.logger.debug("Returning {}".format(filter_dict))
1003
1004 return filter_dict
1005 else:
1006 raise vimconn.VimConnNotFoundException(
1007 "Network {} not found".format(net_id)
1008 )
1009 except Exception as e:
1010 self.logger.debug("Error in get_network")
1011 self.logger.debug(traceback.format_exc())
1012
1013 if isinstance(e, vimconn.VimConnException):
1014 raise
1015 else:
1016 raise vimconn.VimConnNotFoundException(
1017 "Failed : Network not found {} ".format(e)
1018 )
1019
1020 return filter_dict
1021
1022 def delete_network(self, net_id, created_items=None):
1023 """
1024 Removes a tenant network from VIM and its associated elements
1025 :param net_id: VIM identifier of the network, provided by method new_network
1026 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1027 Returns the network identifier or raises an exception upon error or when network is not found
1028 """
1029
1030 # ############# Stub code for SRIOV #################
1031 # dvport_group = self.get_dvport_group(net_id)
1032 # if dvport_group:
1033 # #delete portgroup
1034 # status = self.destroy_dvport_group(net_id)
1035 # if status:
1036 # # Remove vlanID from persistent info
1037 # if net_id in self.persistent_info["used_vlanIDs"]:
1038 # del self.persistent_info["used_vlanIDs"][net_id]
1039 #
1040 # return net_id
1041
1042 vcd_network = self.get_vcd_network(network_uuid=net_id)
1043 if vcd_network is not None and vcd_network:
1044 if self.delete_network_action(network_uuid=net_id):
1045 return net_id
1046 else:
1047 raise vimconn.VimConnNotFoundException(
1048 "Network {} not found".format(net_id)
1049 )
1050
1051 def refresh_nets_status(self, net_list):
1052 """Get the status of the networks
1053 Params: the list of network identifiers
1054 Returns a dictionary with:
1055 net_id: #VIM id of this network
1056 status: #Mandatory. Text with one of:
1057 # DELETED (not found at vim)
1058 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1059 # OTHER (Vim reported other status not understood)
1060 # ERROR (VIM indicates an ERROR status)
1061 # ACTIVE, INACTIVE, DOWN (admin down),
1062 # BUILD (on building process)
1063 #
1064 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1065 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1066
1067 """
1068 dict_entry = {}
1069 try:
1070 for net in net_list:
1071 errormsg = ""
1072 vcd_network = self.get_vcd_network(network_uuid=net)
1073 if vcd_network is not None and vcd_network:
1074 if vcd_network["status"] == "1":
1075 status = "ACTIVE"
1076 else:
1077 status = "DOWN"
1078 else:
1079 status = "DELETED"
1080 errormsg = "Network not found."
1081
1082 dict_entry[net] = {
1083 "status": status,
1084 "error_msg": errormsg,
1085 "vim_info": yaml.safe_dump(vcd_network),
1086 }
1087 except Exception:
1088 self.logger.debug("Error in refresh_nets_status")
1089 self.logger.debug(traceback.format_exc())
1090
1091 return dict_entry
1092
1093 def get_flavor(self, flavor_id):
1094 """Obtain flavor details from the VIM
1095 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
1096 """
1097 if flavor_id not in vimconnector.flavorlist:
1098 raise vimconn.VimConnNotFoundException("Flavor not found.")
1099
1100 return vimconnector.flavorlist[flavor_id]
1101
1102 def new_flavor(self, flavor_data):
1103 """Adds a tenant flavor to VIM
1104 flavor_data contains a dictionary with information, keys:
1105 name: flavor name
1106 ram: memory (cloud type) in MBytes
1107 vpcus: cpus (cloud type)
1108 extended: EPA parameters
1109 - numas: #items requested in same NUMA
1110 memory: number of 1G huge pages memory
1111 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
1112 threads
1113 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
1114 - name: interface name
1115 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
1116 bandwidth: X Gbps; requested guarantee bandwidth
1117 vpci: requested virtual PCI address
1118 disk: disk size
1119 is_public:
1120 #TODO to concrete
1121 Returns the flavor identifier"""
1122
1123 # generate a new uuid put to internal dict and return it.
1124 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1125 new_flavor = flavor_data
1126 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1127 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1128 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1129
1130 if not isinstance(ram, int):
1131 raise vimconn.VimConnException("Non-integer value for ram")
1132 elif not isinstance(cpu, int):
1133 raise vimconn.VimConnException("Non-integer value for cpu")
1134 elif not isinstance(disk, int):
1135 raise vimconn.VimConnException("Non-integer value for disk")
1136
1137 extended_flv = flavor_data.get("extended")
1138 if extended_flv:
1139 numas = extended_flv.get("numas")
1140 if numas:
1141 for numa in numas:
1142 # overwrite ram and vcpus
1143 if "memory" in numa:
1144 ram = numa["memory"] * 1024
1145
1146 if "paired-threads" in numa:
1147 cpu = numa["paired-threads"] * 2
1148 elif "cores" in numa:
1149 cpu = numa["cores"]
1150 elif "threads" in numa:
1151 cpu = numa["threads"]
1152
1153 new_flavor[FLAVOR_RAM_KEY] = ram
1154 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1155 new_flavor[FLAVOR_DISK_KEY] = disk
1156 # generate a new uuid put to internal dict and return it.
1157 flavor_id = uuid.uuid4()
1158 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1159 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1160
1161 return str(flavor_id)
1162
1163 def delete_flavor(self, flavor_id):
1164 """Deletes a tenant flavor from VIM identify by its id
1165
1166 Returns the used id or raise an exception
1167 """
1168 if flavor_id not in vimconnector.flavorlist:
1169 raise vimconn.VimConnNotFoundException("Flavor not found.")
1170
1171 vimconnector.flavorlist.pop(flavor_id, None)
1172
1173 return flavor_id
1174
1175 def new_image(self, image_dict):
1176 """
1177 Adds a tenant image to VIM
1178 Returns:
1179 200, image-id if the image is created
1180 <0, message if there is an error
1181 """
1182 return self.get_image_id_from_path(image_dict["location"])
1183
1184 def delete_image(self, image_id):
1185 """
1186 Deletes a tenant image from VIM
1187 Args:
1188 image_id is ID of Image to be deleted
1189 Return:
1190 returns the image identifier in UUID format or raises an exception on error
1191 """
1192 conn = self.connect_as_admin()
1193
1194 if not conn:
1195 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1196
1197 # Get Catalog details
1198 url_list = [self.url, "/api/catalog/", image_id]
1199 catalog_herf = "".join(url_list)
1200
1201 headers = {
1202 "Accept": "application/*+xml;version=" + API_VERSION,
1203 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1204 }
1205
1206 response = self.perform_request(
1207 req_type="GET", url=catalog_herf, headers=headers
1208 )
1209
1210 if response.status_code != requests.codes.ok:
1211 self.logger.debug(
1212 "delete_image():GET REST API call {} failed. "
1213 "Return status code {}".format(catalog_herf, response.status_code)
1214 )
1215
1216 raise vimconn.VimConnNotFoundException(
1217 "Fail to get image {}".format(image_id)
1218 )
1219
1220 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1221 namespaces = {
1222 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1223 }
1224 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1225
1226 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1227 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1228
1229 for catalogItem in catalogItems:
1230 catalogItem_href = catalogItem.attrib["href"]
1231
1232 response = self.perform_request(
1233 req_type="GET", url=catalogItem_href, headers=headers
1234 )
1235
1236 if response.status_code != requests.codes.ok:
1237 self.logger.debug(
1238 "delete_image():GET REST API call {} failed. "
1239 "Return status code {}".format(catalog_herf, response.status_code)
1240 )
1241 raise vimconn.VimConnNotFoundException(
1242 "Fail to get catalogItem {} for catalog {}".format(
1243 catalogItem, image_id
1244 )
1245 )
1246
1247 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1248 namespaces = {
1249 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1250 }
1251 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1252 catalogitem_remove_href = lxmlroot_respond.find(
1253 "xmlns:Link[@rel='remove']", namespaces
1254 ).attrib["href"]
1255
1256 # Remove catalogItem
1257 response = self.perform_request(
1258 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1259 )
1260
1261 if response.status_code == requests.codes.no_content:
1262 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1263 else:
1264 raise vimconn.VimConnException(
1265 "Fail to delete Catalog Item {}".format(catalogItem)
1266 )
1267
1268 # Remove catalog
1269 url_list = [self.url, "/api/admin/catalog/", image_id]
1270 catalog_remove_herf = "".join(url_list)
1271 response = self.perform_request(
1272 req_type="DELETE", url=catalog_remove_herf, headers=headers
1273 )
1274
1275 if response.status_code == requests.codes.no_content:
1276 self.logger.debug("Deleted Catalog {}".format(image_id))
1277
1278 return image_id
1279 else:
1280 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1281
1282 def catalog_exists(self, catalog_name, catalogs):
1283 """
1284
1285 :param catalog_name:
1286 :param catalogs:
1287 :return:
1288 """
1289 for catalog in catalogs:
1290 if catalog["name"] == catalog_name:
1291 return catalog["id"]
1292
1293 def create_vimcatalog(self, vca=None, catalog_name=None):
1294 """Create new catalog entry in vCloud director.
1295
1296 Args
1297 vca: vCloud director.
1298 catalog_name catalog that client wish to create. Note no validation done for a name.
1299 Client must make sure that provide valid string representation.
1300
1301 Returns catalog id if catalog created else None.
1302
1303 """
1304 try:
1305 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1306
1307 if lxml_catalog_element:
1308 id_attr_value = lxml_catalog_element.get("id")
1309 return id_attr_value.split(":")[-1]
1310
1311 catalogs = vca.list_catalogs()
1312 except Exception as ex:
1313 self.logger.error(
1314 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1315 catalog_name, ex
1316 )
1317 )
1318 raise
1319 return self.catalog_exists(catalog_name, catalogs)
1320
1321 # noinspection PyIncorrectDocstring
1322 def upload_ovf(
1323 self,
1324 vca=None,
1325 catalog_name=None,
1326 image_name=None,
1327 media_file_name=None,
1328 description="",
1329 progress=False,
1330 chunk_bytes=128 * 1024,
1331 ):
1332 """
1333 Uploads a OVF file to a vCloud catalog
1334
1335 :param chunk_bytes:
1336 :param progress:
1337 :param description:
1338 :param image_name:
1339 :param vca:
1340 :param catalog_name: (str): The name of the catalog to upload the media.
1341 :param media_file_name: (str): The name of the local media file to upload.
1342 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1343 """
1344 os.path.isfile(media_file_name)
1345 statinfo = os.stat(media_file_name)
1346
1347 # find a catalog entry where we upload OVF.
1348 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1349 # status change.
1350 # if VCD can parse OVF we upload VMDK file
1351 try:
1352 for catalog in vca.list_catalogs():
1353 if catalog_name != catalog["name"]:
1354 continue
1355 catalog_href = "{}/api/catalog/{}/action/upload".format(
1356 self.url, catalog["id"]
1357 )
1358 data = """
1359 <UploadVAppTemplateParams name="{}"
1360 xmlns="http://www.vmware.com/vcloud/v1.5"
1361 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1362 <Description>{} vApp Template</Description>
1363 </UploadVAppTemplateParams>
1364 """.format(
1365 catalog_name, description
1366 )
1367
1368 if self.client:
1369 headers = {
1370 "Accept": "application/*+xml;version=" + API_VERSION,
1371 "x-vcloud-authorization": self.client._session.headers[
1372 "x-vcloud-authorization"
1373 ],
1374 }
1375 headers[
1376 "Content-Type"
1377 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1378
1379 response = self.perform_request(
1380 req_type="POST", url=catalog_href, headers=headers, data=data
1381 )
1382
1383 if response.status_code == requests.codes.created:
1384 catalogItem = XmlElementTree.fromstring(response.text)
1385 entity = [
1386 child
1387 for child in catalogItem
1388 if child.get("type")
1389 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1390 ][0]
1391 href = entity.get("href")
1392 template = href
1393
1394 response = self.perform_request(
1395 req_type="GET", url=href, headers=headers
1396 )
1397
1398 if response.status_code == requests.codes.ok:
1399 headers["Content-Type"] = "Content-Type text/xml"
1400 result = re.search(
1401 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1402 response.text,
1403 )
1404
1405 if result:
1406 transfer_href = result.group(1)
1407
1408 response = self.perform_request(
1409 req_type="PUT",
1410 url=transfer_href,
1411 headers=headers,
1412 data=open(media_file_name, "rb"),
1413 )
1414
1415 if response.status_code != requests.codes.ok:
1416 self.logger.debug(
1417 "Failed create vApp template for catalog name {} and image {}".format(
1418 catalog_name, media_file_name
1419 )
1420 )
1421 return False
1422
1423 # TODO fix this with aync block
1424 time.sleep(5)
1425
1426 self.logger.debug(
1427 "vApp template for catalog name {} and image {}".format(
1428 catalog_name, media_file_name
1429 )
1430 )
1431
1432 # uploading VMDK file
1433 # check status of OVF upload and upload remaining files.
1434 response = self.perform_request(
1435 req_type="GET", url=template, headers=headers
1436 )
1437
1438 if response.status_code == requests.codes.ok:
1439 result = re.search(
1440 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1441 )
1442
1443 if result:
1444 link_href = result.group(1)
1445
1446 # we skip ovf since it already uploaded.
1447 if "ovf" in link_href:
1448 continue
1449
1450 # The OVF file and VMDK must be in a same directory
1451 head, _ = os.path.split(media_file_name)
1452 file_vmdk = head + "/" + link_href.split("/")[-1]
1453
1454 if not os.path.isfile(file_vmdk):
1455 return False
1456
1457 statinfo = os.stat(file_vmdk)
1458 if statinfo.st_size == 0:
1459 return False
1460
1461 hrefvmdk = link_href
1462
1463 if progress:
1464 widgets = [
1465 "Uploading file: ",
1466 Percentage(),
1467 " ",
1468 Bar(),
1469 " ",
1470 ETA(),
1471 " ",
1472 FileTransferSpeed(),
1473 ]
1474 progress_bar = ProgressBar(
1475 widgets=widgets, maxval=statinfo.st_size
1476 ).start()
1477
1478 bytes_transferred = 0
1479 f = open(file_vmdk, "rb")
1480
1481 while bytes_transferred < statinfo.st_size:
1482 my_bytes = f.read(chunk_bytes)
1483 if len(my_bytes) <= chunk_bytes:
1484 headers["Content-Range"] = "bytes {}-{}/{}".format(
1485 bytes_transferred,
1486 len(my_bytes) - 1,
1487 statinfo.st_size,
1488 )
1489 headers["Content-Length"] = str(len(my_bytes))
1490 response = requests.put(
1491 url=hrefvmdk,
1492 headers=headers,
1493 data=my_bytes,
1494 verify=False,
1495 )
1496
1497 if response.status_code == requests.codes.ok:
1498 bytes_transferred += len(my_bytes)
1499 if progress:
1500 progress_bar.update(bytes_transferred)
1501 else:
1502 self.logger.debug(
1503 "file upload failed with error: [{}] {}".format(
1504 response.status_code, response.text
1505 )
1506 )
1507
1508 f.close()
1509
1510 return False
1511
1512 f.close()
1513 if progress:
1514 progress_bar.finish()
1515 time.sleep(10)
1516
1517 return True
1518 else:
1519 self.logger.debug(
1520 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1521 catalog_name, media_file_name
1522 )
1523 )
1524 return False
1525 except Exception as exp:
1526 self.logger.debug(
1527 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1528 catalog_name, media_file_name, exp
1529 )
1530 )
1531
1532 raise vimconn.VimConnException(
1533 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1534 catalog_name, media_file_name, exp
1535 )
1536 )
1537
1538 self.logger.debug(
1539 "Failed retrieve catalog name {} for OVF file {}".format(
1540 catalog_name, media_file_name
1541 )
1542 )
1543
1544 return False
1545
1546 def upload_vimimage(
1547 self,
1548 vca=None,
1549 catalog_name=None,
1550 media_name=None,
1551 medial_file_name=None,
1552 progress=False,
1553 ):
1554 """Upload media file"""
1555 # TODO add named parameters for readability
1556 return self.upload_ovf(
1557 vca=vca,
1558 catalog_name=catalog_name,
1559 image_name=media_name.split(".")[0],
1560 media_file_name=medial_file_name,
1561 description="medial_file_name",
1562 progress=progress,
1563 )
1564
1565 def validate_uuid4(self, uuid_string=None):
1566 """Method validate correct format of UUID.
1567
1568 Return: true if string represent valid uuid
1569 """
1570 try:
1571 uuid.UUID(uuid_string, version=4)
1572 except ValueError:
1573 return False
1574
1575 return True
1576
1577 def get_catalogid(self, catalog_name=None, catalogs=None):
1578 """Method check catalog and return catalog ID in UUID format.
1579
1580 Args
1581 catalog_name: catalog name as string
1582 catalogs: list of catalogs.
1583
1584 Return: catalogs uuid
1585 """
1586 for catalog in catalogs:
1587 if catalog["name"] == catalog_name:
1588 catalog_id = catalog["id"]
1589 return catalog_id
1590
1591 return None
1592
1593 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1594 """Method check catalog and return catalog name lookup done by catalog UUID.
1595
1596 Args
1597 catalog_name: catalog name as string
1598 catalogs: list of catalogs.
1599
1600 Return: catalogs name or None
1601 """
1602 if not self.validate_uuid4(uuid_string=catalog_uuid):
1603 return None
1604
1605 for catalog in catalogs:
1606 catalog_id = catalog.get("id")
1607
1608 if catalog_id == catalog_uuid:
1609 return catalog.get("name")
1610
1611 return None
1612
1613 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1614 """Method check catalog and return catalog name lookup done by catalog UUID.
1615
1616 Args
1617 catalog_name: catalog name as string
1618 catalogs: list of catalogs.
1619
1620 Return: catalogs name or None
1621 """
1622 if not self.validate_uuid4(uuid_string=catalog_uuid):
1623 return None
1624
1625 for catalog in catalogs:
1626 catalog_id = catalog.get("id")
1627
1628 if catalog_id == catalog_uuid:
1629 return catalog
1630
1631 return None
1632
1633 def get_image_id_from_path(self, path=None, progress=False):
1634 """Method upload OVF image to vCloud director.
1635
1636 Each OVF image represented as single catalog entry in vcloud director.
1637 The method check for existing catalog entry. The check done by file name without file extension.
1638
1639 if given catalog name already present method will respond with existing catalog uuid otherwise
1640 it will create new catalog entry and upload OVF file to newly created catalog.
1641
1642 If method can't create catalog entry or upload a file it will throw exception.
1643
1644 Method accept boolean flag progress that will output progress bar. It useful method
1645 for standalone upload use case. In case to test large file upload.
1646
1647 Args
1648 path: - valid path to OVF file.
1649 progress - boolean progress bar show progress bar.
1650
1651 Return: if image uploaded correct method will provide image catalog UUID.
1652 """
1653 if not path:
1654 raise vimconn.VimConnException("Image path can't be None.")
1655
1656 if not os.path.isfile(path):
1657 raise vimconn.VimConnException("Can't read file. File not found.")
1658
1659 if not os.access(path, os.R_OK):
1660 raise vimconn.VimConnException(
1661 "Can't read file. Check file permission to read."
1662 )
1663
1664 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1665
1666 _, filename = os.path.split(path)
1667 _, file_extension = os.path.splitext(path)
1668 if file_extension != ".ovf":
1669 self.logger.debug(
1670 "Wrong file extension {} connector support only OVF container.".format(
1671 file_extension
1672 )
1673 )
1674
1675 raise vimconn.VimConnException(
1676 "Wrong container. vCloud director supports only OVF."
1677 )
1678
1679 catalog_name = os.path.splitext(filename)[0]
1680 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1681 self.logger.debug(
1682 "File name {} Catalog Name {} file path {} "
1683 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1684 )
1685
1686 try:
1687 org, _ = self.get_vdc_details()
1688 catalogs = org.list_catalogs()
1689 except Exception as exp:
1690 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1691
1692 raise vimconn.VimConnException(
1693 "Failed get catalogs() with Exception {} ".format(exp)
1694 )
1695
1696 if len(catalogs) == 0:
1697 self.logger.info(
1698 "Creating a new catalog entry {} in vcloud director".format(
1699 catalog_name
1700 )
1701 )
1702
1703 if self.create_vimcatalog(org, catalog_md5_name) is None:
1704 raise vimconn.VimConnException(
1705 "Failed create new catalog {} ".format(catalog_md5_name)
1706 )
1707
1708 result = self.upload_vimimage(
1709 vca=org,
1710 catalog_name=catalog_md5_name,
1711 media_name=filename,
1712 medial_file_name=path,
1713 progress=progress,
1714 )
1715
1716 if not result:
1717 raise vimconn.VimConnException(
1718 "Failed create vApp template for catalog {} ".format(catalog_name)
1719 )
1720
1721 return self.get_catalogid(catalog_name, catalogs)
1722 else:
1723 for catalog in catalogs:
1724 # search for existing catalog if we find same name we return ID
1725 # TODO optimize this
1726 if catalog["name"] == catalog_md5_name:
1727 self.logger.debug(
1728 "Found existing catalog entry for {} "
1729 "catalog id {}".format(
1730 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1731 )
1732 )
1733
1734 return self.get_catalogid(catalog_md5_name, catalogs)
1735
1736 # if we didn't find existing catalog we create a new one and upload image.
1737 self.logger.debug(
1738 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1739 )
1740 if self.create_vimcatalog(org, catalog_md5_name) is None:
1741 raise vimconn.VimConnException(
1742 "Failed create new catalog {} ".format(catalog_md5_name)
1743 )
1744
1745 result = self.upload_vimimage(
1746 vca=org,
1747 catalog_name=catalog_md5_name,
1748 media_name=filename,
1749 medial_file_name=path,
1750 progress=progress,
1751 )
1752 if not result:
1753 raise vimconn.VimConnException(
1754 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1755 )
1756
1757 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1758
1759 def get_image_list(self, filter_dict={}):
1760 """Obtain tenant images from VIM
1761 Filter_dict can be:
1762 name: image name
1763 id: image uuid
1764 checksum: image checksum
1765 location: image path
1766 Returns the image list of dictionaries:
1767 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1768 List can be empty
1769 """
1770 try:
1771 org, _ = self.get_vdc_details()
1772 image_list = []
1773 catalogs = org.list_catalogs()
1774
1775 if len(catalogs) == 0:
1776 return image_list
1777 else:
1778 for catalog in catalogs:
1779 catalog_uuid = catalog.get("id")
1780 name = catalog.get("name")
1781 filtered_dict = {}
1782
1783 if filter_dict.get("name") and filter_dict["name"] != name:
1784 continue
1785
1786 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1787 continue
1788
1789 filtered_dict["name"] = name
1790 filtered_dict["id"] = catalog_uuid
1791 image_list.append(filtered_dict)
1792
1793 self.logger.debug(
1794 "List of already created catalog items: {}".format(image_list)
1795 )
1796
1797 return image_list
1798 except Exception as exp:
1799 raise vimconn.VimConnException(
1800 "Exception occured while retriving catalog items {}".format(exp)
1801 )
1802
1803 def get_vappid(self, vdc=None, vapp_name=None):
1804 """Method takes vdc object and vApp name and returns vapp uuid or None
1805
1806 Args:
1807 vdc: The VDC object.
1808 vapp_name: is application vappp name identifier
1809
1810 Returns:
1811 The return vApp name otherwise None
1812 """
1813 if vdc is None or vapp_name is None:
1814 return None
1815
1816 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1817 try:
1818 refs = [
1819 ref
1820 for ref in vdc.ResourceEntities.ResourceEntity
1821 if ref.name == vapp_name
1822 and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1823 ]
1824
1825 if len(refs) == 1:
1826 return refs[0].href.split("vapp")[1][1:]
1827 except Exception as e:
1828 self.logger.exception(e)
1829 return False
1830
1831 return None
1832
1833 def check_vapp(self, vdc=None, vapp_uuid=None):
1834 """Method Method returns True or False if vapp deployed in vCloud director
1835
1836 Args:
1837 vca: Connector to VCA
1838 vdc: The VDC object.
1839 vappid: vappid is application identifier
1840
1841 Returns:
1842 The return True if vApp deployed
1843 :param vdc:
1844 :param vapp_uuid:
1845 """
1846 try:
1847 refs = [
1848 ref
1849 for ref in vdc.ResourceEntities.ResourceEntity
1850 if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1851 ]
1852
1853 for ref in refs:
1854 vappid = ref.href.split("vapp")[1][1:]
1855 # find vapp with respected vapp uuid
1856
1857 if vappid == vapp_uuid:
1858 return True
1859 except Exception as e:
1860 self.logger.exception(e)
1861
1862 return False
1863
1864 return False
1865
1866 def get_namebyvappid(self, vapp_uuid=None):
1867 """Method returns vApp name from vCD and lookup done by vapp_id.
1868
1869 Args:
1870 vapp_uuid: vappid is application identifier
1871
1872 Returns:
1873 The return vApp name otherwise None
1874 """
1875 try:
1876 if self.client and vapp_uuid:
1877 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1878 headers = {
1879 "Accept": "application/*+xml;version=" + API_VERSION,
1880 "x-vcloud-authorization": self.client._session.headers[
1881 "x-vcloud-authorization"
1882 ],
1883 }
1884
1885 response = self.perform_request(
1886 req_type="GET", url=vapp_call, headers=headers
1887 )
1888
1889 # Retry login if session expired & retry sending request
1890 if response.status_code == 403:
1891 response = self.retry_rest("GET", vapp_call)
1892
1893 tree = XmlElementTree.fromstring(response.text)
1894
1895 return tree.attrib["name"] if "name" in tree.attrib else None
1896 except Exception as e:
1897 self.logger.exception(e)
1898
1899 return None
1900
1901 return None
1902
1903 def new_vminstance(
1904 self,
1905 name=None,
1906 description="",
1907 start=False,
1908 image_id=None,
1909 flavor_id=None,
1910 affinity_group_list=[],
1911 net_list=[],
1912 cloud_config=None,
1913 disk_list=None,
1914 availability_zone_index=None,
1915 availability_zone_list=None,
1916 ):
1917 """Adds a VM instance to VIM
1918 Params:
1919 'start': (boolean) indicates if VM must start or created in pause mode.
1920 'image_id','flavor_id': image and flavor VIM id to use for the VM
1921 'net_list': list of interfaces, each one is a dictionary with:
1922 'name': (optional) name for the interface.
1923 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1924 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1925 capabilities
1926 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1927 'mac_address': (optional) mac address to assign to this interface
1928 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1929 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1930 for tagging VF
1931 'type': (mandatory) can be one of:
1932 'virtual', in this case always connected to a network of type 'net_type=bridge'
1933 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1934 data/ptp network or it can created unconnected
1935 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1936 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1937 are allocated on the same physical NIC
1938 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1939 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1940 or True, it must apply the default VIM behaviour
1941 After execution the method will add the key:
1942 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1943 interface. 'net_list' is modified
1944 'cloud_config': (optional) dictionary with:
1945 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1946 'users': (optional) list of users to be inserted, each item is a dict with:
1947 'name': (mandatory) user name,
1948 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1949 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1950 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1951 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1952 'dest': (mandatory) string with the destination absolute path
1953 'encoding': (optional, by default text). Can be one of:
1954 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1955 'content' (mandatory): string with the content of the file
1956 'permissions': (optional) string with file permissions, typically octal notation '0644'
1957 'owner': (optional) file owner, string with the format 'owner:group'
1958 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1959 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1960 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1961 'size': (mandatory) string with the size of the disk in GB
1962 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1963 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1964 availability_zone_index is None
1965 Returns a tuple with the instance identifier and created_items or raises an exception on error
1966 created_items can be None or a dictionary where this method can include key-values that will be passed to
1967 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1968 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1969 as not present.
1970 """
1971 self.logger.info("Creating new instance for entry {}".format(name))
1972 self.logger.debug(
1973 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1974 "availability_zone_index {} availability_zone_list {}".format(
1975 description,
1976 start,
1977 image_id,
1978 flavor_id,
1979 net_list,
1980 cloud_config,
1981 disk_list,
1982 availability_zone_index,
1983 availability_zone_list,
1984 )
1985 )
1986
1987 # new vm name = vmname + tenant_id + uuid
1988 new_vm_name = [name, "-", str(uuid.uuid4())]
1989 vmname_andid = "".join(new_vm_name)
1990
1991 for net in net_list:
1992 if net["type"] == "PCI-PASSTHROUGH":
1993 raise vimconn.VimConnNotSupportedException(
1994 "Current vCD version does not support type : {}".format(net["type"])
1995 )
1996
1997 if len(net_list) > 10:
1998 raise vimconn.VimConnNotSupportedException(
1999 "The VM hardware versions 7 and above support upto 10 NICs only"
2000 )
2001
2002 # if vm already deployed we return existing uuid
2003 # we check for presence of VDC, Catalog entry and Flavor.
2004 org, vdc = self.get_vdc_details()
2005 if vdc is None:
2006 raise vimconn.VimConnNotFoundException(
2007 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
2008 name
2009 )
2010 )
2011
2012 catalogs = org.list_catalogs()
2013 if catalogs is None:
2014 # Retry once, if failed by refreshing token
2015 self.get_token()
2016 org = Org(self.client, resource=self.client.get_org())
2017 catalogs = org.list_catalogs()
2018
2019 if catalogs is None:
2020 raise vimconn.VimConnNotFoundException(
2021 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
2022 name
2023 )
2024 )
2025
2026 catalog_hash_name = self.get_catalogbyid(
2027 catalog_uuid=image_id, catalogs=catalogs
2028 )
2029 if catalog_hash_name:
2030 self.logger.info(
2031 "Found catalog entry {} for image id {}".format(
2032 catalog_hash_name, image_id
2033 )
2034 )
2035 else:
2036 raise vimconn.VimConnNotFoundException(
2037 "new_vminstance(): Failed create vApp {}: "
2038 "(Failed retrieve catalog information {})".format(name, image_id)
2039 )
2040
2041 # Set vCPU and Memory based on flavor.
2042 vm_cpus = None
2043 vm_memory = None
2044 vm_disk = None
2045 numas = None
2046
2047 if flavor_id is not None:
2048 if flavor_id not in vimconnector.flavorlist:
2049 raise vimconn.VimConnNotFoundException(
2050 "new_vminstance(): Failed create vApp {}: "
2051 "Failed retrieve flavor information "
2052 "flavor id {}".format(name, flavor_id)
2053 )
2054 else:
2055 try:
2056 flavor = vimconnector.flavorlist[flavor_id]
2057 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
2058 vm_memory = flavor[FLAVOR_RAM_KEY]
2059 vm_disk = flavor[FLAVOR_DISK_KEY]
2060 extended = flavor.get("extended", None)
2061
2062 if extended:
2063 numas = extended.get("numas", None)
2064 except Exception as exp:
2065 raise vimconn.VimConnException(
2066 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
2067 )
2068
2069 # image upload creates template name as catalog name space Template.
2070 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
2071 # power_on = 'false'
2072 # if start:
2073 # power_on = 'true'
2074
2075 # client must provide at least one entry in net_list if not we report error
2076 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
2077 # If no mgmt, then the 1st NN in netlist is considered as primary net.
2078 primary_net = None
2079 primary_netname = None
2080 primary_net_href = None
2081 # network_mode = 'bridged'
2082 if net_list is not None and len(net_list) > 0:
2083 for net in net_list:
2084 if "use" in net and net["use"] == "mgmt" and not primary_net:
2085 primary_net = net
2086
2087 if primary_net is None:
2088 primary_net = net_list[0]
2089
2090 try:
2091 primary_net_id = primary_net["net_id"]
2092 url_list = [self.url, "/api/network/", primary_net_id]
2093 primary_net_href = "".join(url_list)
2094 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
2095
2096 if "name" in network_dict:
2097 primary_netname = network_dict["name"]
2098 except KeyError:
2099 raise vimconn.VimConnException(
2100 "Corrupted flavor. {}".format(primary_net)
2101 )
2102 else:
2103 raise vimconn.VimConnUnexpectedResponse(
2104 "new_vminstance(): Failed network list is empty."
2105 )
2106
2107 # use: 'data', 'bridge', 'mgmt'
2108 # create vApp. Set vcpu and ram based on flavor id.
2109 try:
2110 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
2111 if not vdc_obj:
2112 raise vimconn.VimConnNotFoundException(
2113 "new_vminstance(): Failed to get VDC object"
2114 )
2115
2116 for retry in (1, 2):
2117 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
2118 catalog_items = [items.attrib]
2119
2120 if len(catalog_items) == 1:
2121 if self.client:
2122 headers = {
2123 "Accept": "application/*+xml;version=" + API_VERSION,
2124 "x-vcloud-authorization": self.client._session.headers[
2125 "x-vcloud-authorization"
2126 ],
2127 }
2128
2129 response = self.perform_request(
2130 req_type="GET",
2131 url=catalog_items[0].get("href"),
2132 headers=headers,
2133 )
2134 catalogItem = XmlElementTree.fromstring(response.text)
2135 entity = [
2136 child
2137 for child in catalogItem
2138 if child.get("type")
2139 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
2140 ][0]
2141 vapp_tempalte_href = entity.get("href")
2142
2143 response = self.perform_request(
2144 req_type="GET", url=vapp_tempalte_href, headers=headers
2145 )
2146
2147 if response.status_code != requests.codes.ok:
2148 self.logger.debug(
2149 "REST API call {} failed. Return status code {}".format(
2150 vapp_tempalte_href, response.status_code
2151 )
2152 )
2153 else:
2154 result = (response.text).replace("\n", " ")
2155
2156 vapp_template_tree = XmlElementTree.fromstring(response.text)
2157 children_element = [
2158 child for child in vapp_template_tree if "Children" in child.tag
2159 ][0]
2160 vm_element = [child for child in children_element if "Vm" in child.tag][
2161 0
2162 ]
2163 vm_name = vm_element.get("name")
2164 vm_id = vm_element.get("id")
2165 vm_href = vm_element.get("href")
2166
2167 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
2168 # result).group(1)
2169 memory_mb = re.search(
2170 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
2171 result,
2172 ).group(1)
2173 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
2174
2175 headers[
2176 "Content-Type"
2177 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
2178 vdc_id = vdc.get("id").split(":")[-1]
2179 instantiate_vapp_href = (
2180 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
2181 self.url, vdc_id
2182 )
2183 )
2184
2185 with open(
2186 os.path.join(
2187 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2188 ),
2189 "r",
2190 ) as f:
2191 template = f.read()
2192
2193 data = template.format(
2194 vmname_andid,
2195 primary_netname,
2196 primary_net_href,
2197 vapp_tempalte_href,
2198 vm_href,
2199 vm_id,
2200 vm_name,
2201 primary_netname,
2202 cpu=vm_cpus,
2203 core=1,
2204 memory=vm_memory,
2205 )
2206
2207 response = self.perform_request(
2208 req_type="POST",
2209 url=instantiate_vapp_href,
2210 headers=headers,
2211 data=data,
2212 )
2213
2214 if response.status_code != 201:
2215 self.logger.error(
2216 "REST call {} failed reason : {}"
2217 "status code : {}".format(
2218 instantiate_vapp_href, response.text, response.status_code
2219 )
2220 )
2221 raise vimconn.VimConnException(
2222 "new_vminstance(): Failed to create"
2223 "vAapp {}".format(vmname_andid)
2224 )
2225 else:
2226 vapptask = self.get_task_from_response(response.text)
2227
2228 if vapptask is None and retry == 1:
2229 self.get_token() # Retry getting token
2230 continue
2231 else:
2232 break
2233
2234 if vapptask is None or vapptask is False:
2235 raise vimconn.VimConnUnexpectedResponse(
2236 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2237 )
2238
2239 # wait for task to complete
2240 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2241
2242 if result.get("status") == "success":
2243 self.logger.debug(
2244 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2245 )
2246 else:
2247 raise vimconn.VimConnUnexpectedResponse(
2248 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2249 )
2250 except Exception as exp:
2251 raise vimconn.VimConnUnexpectedResponse(
2252 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2253 vmname_andid, exp
2254 )
2255 )
2256
2257 # we should have now vapp in undeployed state.
2258 try:
2259 vdc_obj = VDC(self.client, href=vdc.get("href"))
2260 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2261 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2262 vapp = VApp(self.client, resource=vapp_resource)
2263 except Exception as exp:
2264 raise vimconn.VimConnUnexpectedResponse(
2265 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2266 vmname_andid, exp
2267 )
2268 )
2269
2270 if vapp_uuid is None:
2271 raise vimconn.VimConnUnexpectedResponse(
2272 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2273 vmname_andid
2274 )
2275 )
2276
2277 # Add PCI passthrough/SRIOV configrations
2278 pci_devices_info = []
2279 reserve_memory = False
2280
2281 for net in net_list:
2282 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2283 pci_devices_info.append(net)
2284 elif (
2285 net["type"] == "VF"
2286 or net["type"] == "SR-IOV"
2287 or net["type"] == "VFnotShared"
2288 ) and "net_id" in net:
2289 reserve_memory = True
2290
2291 # Add PCI
2292 if len(pci_devices_info) > 0:
2293 self.logger.info(
2294 "Need to add PCI devices {} into VM {}".format(
2295 pci_devices_info, vmname_andid
2296 )
2297 )
2298 PCI_devices_status, _, _ = self.add_pci_devices(
2299 vapp_uuid, pci_devices_info, vmname_andid
2300 )
2301
2302 if PCI_devices_status:
2303 self.logger.info(
2304 "Added PCI devives {} to VM {}".format(
2305 pci_devices_info, vmname_andid
2306 )
2307 )
2308 reserve_memory = True
2309 else:
2310 self.logger.info(
2311 "Fail to add PCI devives {} to VM {}".format(
2312 pci_devices_info, vmname_andid
2313 )
2314 )
2315
2316 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2317 self.add_serial_device(vapp_uuid)
2318
2319 if vm_disk:
2320 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2321 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2322 if result:
2323 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2324
2325 # Add new or existing disks to vApp
2326 if disk_list:
2327 added_existing_disk = False
2328 for disk in disk_list:
2329 if "device_type" in disk and disk["device_type"] == "cdrom":
2330 image_id = disk["image_id"]
2331 # Adding CD-ROM to VM
2332 # will revisit code once specification ready to support this feature
2333 self.insert_media_to_vm(vapp, image_id)
2334 elif "image_id" in disk and disk["image_id"] is not None:
2335 self.logger.debug(
2336 "Adding existing disk from image {} to vm {} ".format(
2337 disk["image_id"], vapp_uuid
2338 )
2339 )
2340 self.add_existing_disk(
2341 catalogs=catalogs,
2342 image_id=disk["image_id"],
2343 size=disk["size"],
2344 template_name=templateName,
2345 vapp_uuid=vapp_uuid,
2346 )
2347 added_existing_disk = True
2348 else:
2349 # Wait till added existing disk gets reflected into vCD database/API
2350 if added_existing_disk:
2351 time.sleep(5)
2352 added_existing_disk = False
2353 self.add_new_disk(vapp_uuid, disk["size"])
2354
2355 if numas:
2356 # Assigning numa affinity setting
2357 for numa in numas:
2358 if "paired-threads-id" in numa:
2359 paired_threads_id = numa["paired-threads-id"]
2360 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2361
2362 # add NICs & connect to networks in netlist
2363 try:
2364 vdc_obj = VDC(self.client, href=vdc.get("href"))
2365 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2366 vapp = VApp(self.client, resource=vapp_resource)
2367 vapp_id = vapp_resource.get("id").split(":")[-1]
2368
2369 self.logger.info("Removing primary NIC: ")
2370 # First remove all NICs so that NIC properties can be adjusted as needed
2371 self.remove_primary_network_adapter_from_all_vms(vapp)
2372
2373 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2374 primary_nic_index = 0
2375 nicIndex = 0
2376 for net in net_list:
2377 # openmano uses network id in UUID format.
2378 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2379 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2380 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2381
2382 if "net_id" not in net:
2383 continue
2384
2385 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2386 # Same will be returned in refresh_vms_status() as vim_interface_id
2387 net["vim_id"] = net[
2388 "net_id"
2389 ] # Provide the same VIM identifier as the VIM network
2390
2391 interface_net_id = net["net_id"]
2392 interface_net_name = self.get_network_name_by_id(
2393 network_uuid=interface_net_id
2394 )
2395 interface_network_mode = net["use"]
2396
2397 if interface_network_mode == "mgmt":
2398 primary_nic_index = nicIndex
2399
2400 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2401 - DHCP (The IP address is obtained from a DHCP service.)
2402 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2403 - NONE (No IP addressing mode specified.)"""
2404
2405 if primary_netname is not None:
2406 self.logger.debug(
2407 "new_vminstance(): Filtering by net name {}".format(
2408 interface_net_name
2409 )
2410 )
2411 nets = [
2412 n
2413 for n in self.get_network_list()
2414 if n.get("name") == interface_net_name
2415 ]
2416
2417 if len(nets) == 1:
2418 self.logger.info(
2419 "new_vminstance(): Found requested network: {}".format(
2420 nets[0].get("name")
2421 )
2422 )
2423
2424 if interface_net_name != primary_netname:
2425 # connect network to VM - with all DHCP by default
2426 self.logger.info(
2427 "new_vminstance(): Attaching net {} to vapp".format(
2428 interface_net_name
2429 )
2430 )
2431 self.connect_vapp_to_org_vdc_network(
2432 vapp_id, nets[0].get("name")
2433 )
2434
2435 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2436 nic_type = "VMXNET3"
2437 if "type" in net and net["type"] not in type_list:
2438 # fetching nic type from vnf
2439 if "model" in net:
2440 if net["model"] is not None:
2441 if (
2442 net["model"].lower() == "paravirt"
2443 or net["model"].lower() == "virtio"
2444 ):
2445 nic_type = "VMXNET3"
2446 else:
2447 nic_type = net["model"]
2448
2449 self.logger.info(
2450 "new_vminstance(): adding network adapter "
2451 "to a network {}".format(nets[0].get("name"))
2452 )
2453 self.add_network_adapter_to_vms(
2454 vapp,
2455 nets[0].get("name"),
2456 primary_nic_index,
2457 nicIndex,
2458 net,
2459 nic_type=nic_type,
2460 )
2461 else:
2462 self.logger.info(
2463 "new_vminstance(): adding network adapter "
2464 "to a network {}".format(nets[0].get("name"))
2465 )
2466
2467 if net["type"] in ["SR-IOV", "VF"]:
2468 nic_type = net["type"]
2469 self.add_network_adapter_to_vms(
2470 vapp,
2471 nets[0].get("name"),
2472 primary_nic_index,
2473 nicIndex,
2474 net,
2475 nic_type=nic_type,
2476 )
2477 nicIndex += 1
2478
2479 # cloud-init for ssh-key injection
2480 if cloud_config:
2481 # Create a catalog which will be carrying the config drive ISO
2482 # This catalog is deleted during vApp deletion. The catalog name carries
2483 # vApp UUID and thats how it gets identified during its deletion.
2484 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2485 self.logger.info(
2486 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2487 config_drive_catalog_name
2488 )
2489 )
2490 config_drive_catalog_id = self.create_vimcatalog(
2491 org, config_drive_catalog_name
2492 )
2493
2494 if config_drive_catalog_id is None:
2495 error_msg = (
2496 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2497 "ISO".format(config_drive_catalog_name)
2498 )
2499 raise Exception(error_msg)
2500
2501 # Create config-drive ISO
2502 _, userdata = self._create_user_data(cloud_config)
2503 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2504 iso_path = self.create_config_drive_iso(userdata)
2505 self.logger.debug(
2506 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2507 iso_path
2508 )
2509 )
2510
2511 self.logger.info(
2512 "new_vminstance(): uploading iso to catalog {}".format(
2513 config_drive_catalog_name
2514 )
2515 )
2516 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2517 # Attach the config-drive ISO to the VM
2518 self.logger.info(
2519 "new_vminstance(): Attaching the config-drive ISO to the VM"
2520 )
2521 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2522 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2523
2524 # If VM has PCI devices or SRIOV reserve memory for VM
2525 if reserve_memory:
2526 self.reserve_memory_for_all_vms(vapp, memory_mb)
2527
2528 self.logger.debug(
2529 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2530 )
2531
2532 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2533 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2534 if result.get("status") == "success":
2535 self.logger.info(
2536 "new_vminstance(): Successfully power on "
2537 "vApp {}".format(vmname_andid)
2538 )
2539 else:
2540 self.logger.error(
2541 "new_vminstance(): failed to power on vApp "
2542 "{}".format(vmname_andid)
2543 )
2544
2545 except Exception as exp:
2546 try:
2547 self.delete_vminstance(vapp_uuid)
2548 except Exception as exp2:
2549 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2550 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2551 self.logger.error(
2552 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2553 name, exp
2554 )
2555 )
2556 raise vimconn.VimConnException(
2557 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2558 name, exp
2559 )
2560 )
2561 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2562 wait_time = 0
2563 vapp_uuid = None
2564 while wait_time <= MAX_WAIT_TIME:
2565 try:
2566 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2567 vapp = VApp(self.client, resource=vapp_resource)
2568 except Exception as exp:
2569 raise vimconn.VimConnUnexpectedResponse(
2570 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2571 vmname_andid, exp
2572 )
2573 )
2574
2575 # if vapp and vapp.me.deployed:
2576 if vapp and vapp_resource.get("deployed") == "true":
2577 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2578 break
2579 else:
2580 self.logger.debug(
2581 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2582 )
2583 time.sleep(INTERVAL_TIME)
2584
2585 wait_time += INTERVAL_TIME
2586
2587 # SET Affinity Rule for VM
2588 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2589 # While creating VIM account user has to pass the Host Group names in availability_zone list
2590 # "availability_zone" is a part of VIM "config" parameters
2591 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2592 # Host groups are referred as availability zones
2593 # With following procedure, deployed VM will be added into a VM group.
2594 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2595 if availability_zone_list:
2596 self.logger.debug(
2597 "Existing Host Groups in VIM {}".format(
2598 self.config.get("availability_zone")
2599 )
2600 )
2601 # Admin access required for creating Affinity rules
2602 client = self.connect_as_admin()
2603
2604 if not client:
2605 raise vimconn.VimConnConnectionException(
2606 "Failed to connect vCD as admin"
2607 )
2608 else:
2609 self.client = client
2610
2611 if self.client:
2612 headers = {
2613 "Accept": "application/*+xml;version=27.0",
2614 "x-vcloud-authorization": self.client._session.headers[
2615 "x-vcloud-authorization"
2616 ],
2617 }
2618
2619 # Step1: Get provider vdc details from organization
2620 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2621 if pvdc_href is not None:
2622 # Step2: Found required pvdc, now get resource pool information
2623 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2624 if respool_href is None:
2625 # Raise error if respool_href not found
2626 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2627 pvdc_href
2628 )
2629 self.log_message(msg)
2630
2631 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2632 # get availability Zone
2633 vm_az = self.get_vm_availability_zone(
2634 availability_zone_index, availability_zone_list
2635 )
2636
2637 # check if provided av zone(hostGroup) is present in vCD VIM
2638 status = self.check_availibility_zone(vm_az, respool_href, headers)
2639 if status is False:
2640 msg = (
2641 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2642 "resource pool {} status: {}"
2643 ).format(vm_az, respool_href, status)
2644 self.log_message(msg)
2645 else:
2646 self.logger.debug(
2647 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2648 )
2649
2650 # Step4: Find VM group references to create vm group
2651 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2652 if vmgrp_href is None:
2653 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2654 self.log_message(msg)
2655
2656 # Step5: Create a VmGroup with name az_VmGroup
2657 vmgrp_name = (
2658 vm_az + "_" + name
2659 ) # Formed VM Group name = Host Group name + VM name
2660 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2661 if status is not True:
2662 msg = "new_vminstance(): Error in creating VM group {}".format(
2663 vmgrp_name
2664 )
2665 self.log_message(msg)
2666
2667 # VM Group url to add vms to vm group
2668 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2669
2670 # Step6: Add VM to VM Group
2671 # Find VM uuid from vapp_uuid
2672 vm_details = self.get_vapp_details_rest(vapp_uuid)
2673 vm_uuid = vm_details["vmuuid"]
2674
2675 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2676 if status is not True:
2677 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2678 vmgrp_name
2679 )
2680 self.log_message(msg)
2681
2682 # Step7: Create VM to Host affinity rule
2683 addrule_href = self.get_add_rule_reference(respool_href, headers)
2684 if addrule_href is None:
2685 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2686 respool_href
2687 )
2688 self.log_message(msg)
2689
2690 status = self.create_vm_to_host_affinity_rule(
2691 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2692 )
2693 if status is False:
2694 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2695 name, vm_az
2696 )
2697 self.log_message(msg)
2698 else:
2699 self.logger.debug(
2700 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2701 name, vm_az
2702 )
2703 )
2704 # Reset token to a normal user to perform other operations
2705 self.get_token()
2706
2707 if vapp_uuid is not None:
2708 return vapp_uuid, None
2709 else:
2710 raise vimconn.VimConnUnexpectedResponse(
2711 "new_vminstance(): Failed create new vm instance {}".format(name)
2712 )
2713
2714 def create_config_drive_iso(self, user_data):
2715 tmpdir = tempfile.mkdtemp()
2716 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2717 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2718 os.makedirs(latest_dir)
2719 with open(
2720 os.path.join(latest_dir, "meta_data.json"), "w"
2721 ) as meta_file_obj, open(
2722 os.path.join(latest_dir, "user_data"), "w"
2723 ) as userdata_file_obj:
2724 userdata_file_obj.write(user_data)
2725 meta_file_obj.write(
2726 json.dumps(
2727 {
2728 "availability_zone": "nova",
2729 "launch_index": 0,
2730 "name": "ConfigDrive",
2731 "uuid": str(uuid.uuid4()),
2732 }
2733 )
2734 )
2735 genisoimage_cmd = (
2736 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2737 iso_path=iso_path, source_dir_path=tmpdir
2738 )
2739 )
2740 self.logger.info(
2741 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2742 genisoimage_cmd
2743 )
2744 )
2745
2746 try:
2747 FNULL = open(os.devnull, "w")
2748 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2749 except subprocess.CalledProcessError as e:
2750 shutil.rmtree(tmpdir, ignore_errors=True)
2751 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2752 e
2753 )
2754 self.logger.error(error_msg)
2755 raise Exception(error_msg)
2756
2757 return iso_path
2758
2759 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2760 if not os.path.isfile(iso_file_path):
2761 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2762 iso_file_path
2763 )
2764 self.logger.error(error_msg)
2765 raise Exception(error_msg)
2766
2767 iso_file_stat = os.stat(iso_file_path)
2768 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2769 <Media
2770 xmlns="http://www.vmware.com/vcloud/v1.5"
2771 name="{iso_name}"
2772 size="{iso_size}"
2773 imageType="iso">
2774 <Description>ISO image for config-drive</Description>
2775 </Media>""".format(
2776 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2777 )
2778 headers = {
2779 "Accept": "application/*+xml;version=" + API_VERSION,
2780 "x-vcloud-authorization": self.client._session.headers[
2781 "x-vcloud-authorization"
2782 ],
2783 }
2784 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2785 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2786 response = self.perform_request(
2787 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2788 )
2789
2790 if response.status_code != 201:
2791 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2792 catalog_href
2793 )
2794 self.logger.error(error_msg)
2795 raise Exception(error_msg)
2796
2797 catalogItem = XmlElementTree.fromstring(response.text)
2798 entity = [
2799 child
2800 for child in catalogItem
2801 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2802 ][0]
2803 entity_href = entity.get("href")
2804
2805 response = self.perform_request(
2806 req_type="GET", url=entity_href, headers=headers
2807 )
2808 if response.status_code != 200:
2809 raise Exception(
2810 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2811 entity_href
2812 )
2813 )
2814
2815 match = re.search(
2816 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2817 response.text,
2818 re.DOTALL,
2819 )
2820 if match:
2821 media_upload_href = match.group(1)
2822 else:
2823 raise Exception(
2824 "Could not parse the upload URL for the media file from the last response"
2825 )
2826 upload_iso_task = self.get_task_from_response(response.text)
2827 headers["Content-Type"] = "application/octet-stream"
2828 response = self.perform_request(
2829 req_type="PUT",
2830 url=media_upload_href,
2831 headers=headers,
2832 data=open(iso_file_path, "rb"),
2833 )
2834
2835 if response.status_code != 200:
2836 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2837
2838 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2839 if result.get("status") != "success":
2840 raise Exception(
2841 "The upload iso task failed with status {}".format(result.get("status"))
2842 )
2843
2844 def get_vcd_availibility_zones(self, respool_href, headers):
2845 """Method to find presence of av zone is VIM resource pool
2846
2847 Args:
2848 respool_href - resource pool href
2849 headers - header information
2850
2851 Returns:
2852 vcd_az - list of azone present in vCD
2853 """
2854 vcd_az = []
2855 url = respool_href
2856 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2857
2858 if resp.status_code != requests.codes.ok:
2859 self.logger.debug(
2860 "REST API call {} failed. Return status code {}".format(
2861 url, resp.status_code
2862 )
2863 )
2864 else:
2865 # Get the href to hostGroups and find provided hostGroup is present in it
2866 resp_xml = XmlElementTree.fromstring(resp.content)
2867 for child in resp_xml:
2868 if "VMWProviderVdcResourcePool" in child.tag:
2869 for schild in child:
2870 if "Link" in schild.tag:
2871 if (
2872 schild.attrib.get("type")
2873 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
2874 ):
2875 hostGroup = schild.attrib.get("href")
2876 hg_resp = self.perform_request(
2877 req_type="GET", url=hostGroup, headers=headers
2878 )
2879
2880 if hg_resp.status_code != requests.codes.ok:
2881 self.logger.debug(
2882 "REST API call {} failed. Return status code {}".format(
2883 hostGroup, hg_resp.status_code
2884 )
2885 )
2886 else:
2887 hg_resp_xml = XmlElementTree.fromstring(
2888 hg_resp.content
2889 )
2890 for hostGroup in hg_resp_xml:
2891 if "HostGroup" in hostGroup.tag:
2892 # append host group name to the list
2893 vcd_az.append(hostGroup.attrib.get("name"))
2894
2895 return vcd_az
2896
2897 def set_availability_zones(self):
2898 """
2899 Set vim availability zone
2900 """
2901 vim_availability_zones = None
2902 availability_zone = None
2903
2904 if "availability_zone" in self.config:
2905 vim_availability_zones = self.config.get("availability_zone")
2906
2907 if isinstance(vim_availability_zones, str):
2908 availability_zone = [vim_availability_zones]
2909 elif isinstance(vim_availability_zones, list):
2910 availability_zone = vim_availability_zones
2911 else:
2912 return availability_zone
2913
2914 return availability_zone
2915
2916 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2917 """
2918 Return the availability zone to be used by the created VM.
2919 returns: The VIM availability zone to be used or None
2920 """
2921 if availability_zone_index is None:
2922 if not self.config.get("availability_zone"):
2923 return None
2924 elif isinstance(self.config.get("availability_zone"), str):
2925 return self.config["availability_zone"]
2926 else:
2927 return self.config["availability_zone"][0]
2928
2929 vim_availability_zones = self.availability_zone
2930
2931 # check if VIM offer enough availability zones describe in the VNFD
2932 if vim_availability_zones and len(availability_zone_list) <= len(
2933 vim_availability_zones
2934 ):
2935 # check if all the names of NFV AV match VIM AV names
2936 match_by_index = False
2937 for av in availability_zone_list:
2938 if av not in vim_availability_zones:
2939 match_by_index = True
2940 break
2941
2942 if match_by_index:
2943 self.logger.debug(
2944 "Required Availability zone or Host Group not found in VIM config"
2945 )
2946 self.logger.debug(
2947 "Input Availability zone list: {}".format(availability_zone_list)
2948 )
2949 self.logger.debug(
2950 "VIM configured Availability zones: {}".format(
2951 vim_availability_zones
2952 )
2953 )
2954 self.logger.debug("VIM Availability zones will be used by index")
2955 return vim_availability_zones[availability_zone_index]
2956 else:
2957 return availability_zone_list[availability_zone_index]
2958 else:
2959 raise vimconn.VimConnConflictException(
2960 "No enough availability zones at VIM for this deployment"
2961 )
2962
2963 def create_vm_to_host_affinity_rule(
2964 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2965 ):
2966 """Method to create VM to Host Affinity rule in vCD
2967
2968 Args:
2969 addrule_href - href to make a POST request
2970 vmgrpname - name of the VM group created
2971 hostgrpnmae - name of the host group created earlier
2972 polarity - Affinity or Anti-affinity (default: Affinity)
2973 headers - headers to make REST call
2974
2975 Returns:
2976 True- if rule is created
2977 False- Failed to create rule due to some error
2978
2979 """
2980 task_status = False
2981 rule_name = polarity + "_" + vmgrpname
2982 payload = """<?xml version="1.0" encoding="UTF-8"?>
2983 <vmext:VMWVmHostAffinityRule
2984 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2985 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2986 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2987 <vcloud:Name>{}</vcloud:Name>
2988 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2989 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2990 <vcloud:Polarity>{}</vcloud:Polarity>
2991 <vmext:HostGroupName>{}</vmext:HostGroupName>
2992 <vmext:VmGroupName>{}</vmext:VmGroupName>
2993 </vmext:VMWVmHostAffinityRule>""".format(
2994 rule_name, polarity, hostgrpname, vmgrpname
2995 )
2996
2997 resp = self.perform_request(
2998 req_type="POST", url=addrule_href, headers=headers, data=payload
2999 )
3000
3001 if resp.status_code != requests.codes.accepted:
3002 self.logger.debug(
3003 "REST API call {} failed. Return status code {}".format(
3004 addrule_href, resp.status_code
3005 )
3006 )
3007 task_status = False
3008
3009 return task_status
3010 else:
3011 affinity_task = self.get_task_from_response(resp.content)
3012 self.logger.debug("affinity_task: {}".format(affinity_task))
3013
3014 if affinity_task is None or affinity_task is False:
3015 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
3016 # wait for task to complete
3017 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
3018
3019 if result.get("status") == "success":
3020 self.logger.debug(
3021 "Successfully created affinity rule {}".format(rule_name)
3022 )
3023 return True
3024 else:
3025 raise vimconn.VimConnUnexpectedResponse(
3026 "failed to create affinity rule {}".format(rule_name)
3027 )
3028
3029 def get_add_rule_reference(self, respool_href, headers):
3030 """This method finds href to add vm to host affinity rule to vCD
3031
3032 Args:
3033 respool_href- href to resource pool
3034 headers- header information to make REST call
3035
3036 Returns:
3037 None - if no valid href to add rule found or
3038 addrule_href - href to add vm to host affinity rule of resource pool
3039 """
3040 addrule_href = None
3041 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3042
3043 if resp.status_code != requests.codes.ok:
3044 self.logger.debug(
3045 "REST API call {} failed. Return status code {}".format(
3046 respool_href, resp.status_code
3047 )
3048 )
3049 else:
3050 resp_xml = XmlElementTree.fromstring(resp.content)
3051 for child in resp_xml:
3052 if "VMWProviderVdcResourcePool" in child.tag:
3053 for schild in child:
3054 if "Link" in schild.tag:
3055 if (
3056 schild.attrib.get("type")
3057 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
3058 and schild.attrib.get("rel") == "add"
3059 ):
3060 addrule_href = schild.attrib.get("href")
3061 break
3062
3063 return addrule_href
3064
3065 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
3066 """Method to add deployed VM to newly created VM Group.
3067 This is required to create VM to Host affinity in vCD
3068
3069 Args:
3070 vm_uuid- newly created vm uuid
3071 vmGroupNameURL- URL to VM Group name
3072 vmGroup_name- Name of VM group created
3073 headers- Headers for REST request
3074
3075 Returns:
3076 True- if VM added to VM group successfully
3077 False- if any error encounter
3078 """
3079 addvm_resp = self.perform_request(
3080 req_type="GET", url=vmGroupNameURL, headers=headers
3081 ) # , data=payload)
3082
3083 if addvm_resp.status_code != requests.codes.ok:
3084 self.logger.debug(
3085 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
3086 vmGroupNameURL, addvm_resp.status_code
3087 )
3088 )
3089 return False
3090 else:
3091 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
3092 for child in resp_xml:
3093 if child.tag.split("}")[1] == "Link":
3094 if child.attrib.get("rel") == "addVms":
3095 addvmtogrpURL = child.attrib.get("href")
3096
3097 # Get vm details
3098 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
3099 vmdetailsURL = "".join(url_list)
3100
3101 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
3102
3103 if resp.status_code != requests.codes.ok:
3104 self.logger.debug(
3105 "REST API call {} failed. Return status code {}".format(
3106 vmdetailsURL, resp.status_code
3107 )
3108 )
3109 return False
3110
3111 # Parse VM details
3112 resp_xml = XmlElementTree.fromstring(resp.content)
3113 if resp_xml.tag.split("}")[1] == "Vm":
3114 vm_id = resp_xml.attrib.get("id")
3115 vm_name = resp_xml.attrib.get("name")
3116 vm_href = resp_xml.attrib.get("href")
3117 # print vm_id, vm_name, vm_href
3118
3119 # Add VM into VMgroup
3120 payload = """<?xml version="1.0" encoding="UTF-8"?>\
3121 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
3122 xmlns="http://www.vmware.com/vcloud/versions" \
3123 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
3124 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
3125 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
3126 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
3127 xmlns:ns7="http://www.vmware.com/schema/ovf" \
3128 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
3129 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
3130 <ns2:VmReference href="{}" id="{}" name="{}" \
3131 type="application/vnd.vmware.vcloud.vm+xml" />\
3132 </ns2:Vms>""".format(
3133 vm_href, vm_id, vm_name
3134 )
3135
3136 addvmtogrp_resp = self.perform_request(
3137 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
3138 )
3139
3140 if addvmtogrp_resp.status_code != requests.codes.accepted:
3141 self.logger.debug(
3142 "REST API call {} failed. Return status code {}".format(
3143 addvmtogrpURL, addvmtogrp_resp.status_code
3144 )
3145 )
3146
3147 return False
3148 else:
3149 self.logger.debug(
3150 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
3151 )
3152
3153 return True
3154
3155 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
3156 """Method to create a VM group in vCD
3157
3158 Args:
3159 vmgroup_name : Name of VM group to be created
3160 vmgroup_href : href for vmgroup
3161 headers- Headers for REST request
3162 """
3163 # POST to add URL with required data
3164 vmgroup_status = False
3165 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
3166 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
3167 <vmCount>1</vmCount>\
3168 </VMWVmGroup>""".format(
3169 vmgroup_name
3170 )
3171 resp = self.perform_request(
3172 req_type="POST", url=vmgroup_href, headers=headers, data=payload
3173 )
3174
3175 if resp.status_code != requests.codes.accepted:
3176 self.logger.debug(
3177 "REST API call {} failed. Return status code {}".format(
3178 vmgroup_href, resp.status_code
3179 )
3180 )
3181
3182 return vmgroup_status
3183 else:
3184 vmgroup_task = self.get_task_from_response(resp.content)
3185 if vmgroup_task is None or vmgroup_task is False:
3186 raise vimconn.VimConnUnexpectedResponse(
3187 "create_vmgroup(): failed to create VM group {}".format(
3188 vmgroup_name
3189 )
3190 )
3191
3192 # wait for task to complete
3193 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
3194
3195 if result.get("status") == "success":
3196 self.logger.debug(
3197 "create_vmgroup(): Successfully created VM group {}".format(
3198 vmgroup_name
3199 )
3200 )
3201 # time.sleep(10)
3202 vmgroup_status = True
3203
3204 return vmgroup_status
3205 else:
3206 raise vimconn.VimConnUnexpectedResponse(
3207 "create_vmgroup(): failed to create VM group {}".format(
3208 vmgroup_name
3209 )
3210 )
3211
3212 def find_vmgroup_reference(self, url, headers):
3213 """Method to create a new VMGroup which is required to add created VM
3214 Args:
3215 url- resource pool href
3216 headers- header information
3217
3218 Returns:
3219 returns href to VM group to create VM group
3220 """
3221 # Perform GET on resource pool to find 'add' link to create VMGroup
3222 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
3223 vmgrp_href = None
3224 resp = self.perform_request(req_type="GET", url=url, headers=headers)
3225
3226 if resp.status_code != requests.codes.ok:
3227 self.logger.debug(
3228 "REST API call {} failed. Return status code {}".format(
3229 url, resp.status_code
3230 )
3231 )
3232 else:
3233 # Get the href to add vmGroup to vCD
3234 resp_xml = XmlElementTree.fromstring(resp.content)
3235 for child in resp_xml:
3236 if "VMWProviderVdcResourcePool" in child.tag:
3237 for schild in child:
3238 if "Link" in schild.tag:
3239 # Find href with type VMGroup and rel with add
3240 if (
3241 schild.attrib.get("type")
3242 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3243 and schild.attrib.get("rel") == "add"
3244 ):
3245 vmgrp_href = schild.attrib.get("href")
3246
3247 return vmgrp_href
3248
3249 def check_availibility_zone(self, az, respool_href, headers):
3250 """Method to verify requested av zone is present or not in provided
3251 resource pool
3252
3253 Args:
3254 az - name of hostgroup (availibility_zone)
3255 respool_href - Resource Pool href
3256 headers - Headers to make REST call
3257 Returns:
3258 az_found - True if availibility_zone is found else False
3259 """
3260 az_found = False
3261 headers["Accept"] = "application/*+xml;version=27.0"
3262 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3263
3264 if resp.status_code != requests.codes.ok:
3265 self.logger.debug(
3266 "REST API call {} failed. Return status code {}".format(
3267 respool_href, resp.status_code
3268 )
3269 )
3270 else:
3271 # Get the href to hostGroups and find provided hostGroup is present in it
3272 resp_xml = XmlElementTree.fromstring(resp.content)
3273
3274 for child in resp_xml:
3275 if "VMWProviderVdcResourcePool" in child.tag:
3276 for schild in child:
3277 if "Link" in schild.tag:
3278 if (
3279 schild.attrib.get("type")
3280 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3281 ):
3282 hostGroup_href = schild.attrib.get("href")
3283 hg_resp = self.perform_request(
3284 req_type="GET", url=hostGroup_href, headers=headers
3285 )
3286
3287 if hg_resp.status_code != requests.codes.ok:
3288 self.logger.debug(
3289 "REST API call {} failed. Return status code {}".format(
3290 hostGroup_href, hg_resp.status_code
3291 )
3292 )
3293 else:
3294 hg_resp_xml = XmlElementTree.fromstring(
3295 hg_resp.content
3296 )
3297 for hostGroup in hg_resp_xml:
3298 if "HostGroup" in hostGroup.tag:
3299 if hostGroup.attrib.get("name") == az:
3300 az_found = True
3301 break
3302
3303 return az_found
3304
3305 def get_pvdc_for_org(self, org_vdc, headers):
3306 """This method gets provider vdc references from organisation
3307
3308 Args:
3309 org_vdc - name of the organisation VDC to find pvdc
3310 headers - headers to make REST call
3311
3312 Returns:
3313 None - if no pvdc href found else
3314 pvdc_href - href to pvdc
3315 """
3316 # Get provider VDC references from vCD
3317 pvdc_href = None
3318 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3319 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3320 url = "".join(url_list)
3321
3322 response = self.perform_request(req_type="GET", url=url, headers=headers)
3323 if response.status_code != requests.codes.ok:
3324 self.logger.debug(
3325 "REST API call {} failed. Return status code {}".format(
3326 url, response.status_code
3327 )
3328 )
3329 else:
3330 xmlroot_response = XmlElementTree.fromstring(response.text)
3331 for child in xmlroot_response:
3332 if "ProviderVdcReference" in child.tag:
3333 pvdc_href = child.attrib.get("href")
3334 # Get vdcReferences to find org
3335 pvdc_resp = self.perform_request(
3336 req_type="GET", url=pvdc_href, headers=headers
3337 )
3338
3339 if pvdc_resp.status_code != requests.codes.ok:
3340 raise vimconn.VimConnException(
3341 "REST API call {} failed. "
3342 "Return status code {}".format(url, pvdc_resp.status_code)
3343 )
3344
3345 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3346 for child in pvdc_resp_xml:
3347 if "Link" in child.tag:
3348 if (
3349 child.attrib.get("type")
3350 == "application/vnd.vmware.admin.vdcReferences+xml"
3351 ):
3352 vdc_href = child.attrib.get("href")
3353
3354 # Check if provided org is present in vdc
3355 vdc_resp = self.perform_request(
3356 req_type="GET", url=vdc_href, headers=headers
3357 )
3358
3359 if vdc_resp.status_code != requests.codes.ok:
3360 raise vimconn.VimConnException(
3361 "REST API call {} failed. "
3362 "Return status code {}".format(
3363 url, vdc_resp.status_code
3364 )
3365 )
3366 vdc_resp_xml = XmlElementTree.fromstring(
3367 vdc_resp.content
3368 )
3369
3370 for child in vdc_resp_xml:
3371 if "VdcReference" in child.tag:
3372 if child.attrib.get("name") == org_vdc:
3373 return pvdc_href
3374
3375 def get_resource_pool_details(self, pvdc_href, headers):
3376 """Method to get resource pool information.
3377 Host groups are property of resource group.
3378 To get host groups, we need to GET details of resource pool.
3379
3380 Args:
3381 pvdc_href: href to pvdc details
3382 headers: headers
3383
3384 Returns:
3385 respool_href - Returns href link reference to resource pool
3386 """
3387 respool_href = None
3388 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3389
3390 if resp.status_code != requests.codes.ok:
3391 self.logger.debug(
3392 "REST API call {} failed. Return status code {}".format(
3393 pvdc_href, resp.status_code
3394 )
3395 )
3396 else:
3397 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3398 for child in respool_resp_xml:
3399 if "Link" in child.tag:
3400 if (
3401 child.attrib.get("type")
3402 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3403 ):
3404 respool_href = child.attrib.get("href")
3405 break
3406
3407 return respool_href
3408
3409 def log_message(self, msg):
3410 """
3411 Method to log error messages related to Affinity rule creation
3412 in new_vminstance & raise Exception
3413 Args :
3414 msg - Error message to be logged
3415
3416 """
3417 # get token to connect vCD as a normal user
3418 self.get_token()
3419 self.logger.debug(msg)
3420
3421 raise vimconn.VimConnException(msg)
3422
3423 # #
3424 # #
3425 # # based on current discussion
3426 # #
3427 # #
3428 # # server:
3429 # created: '2016-09-08T11:51:58'
3430 # description: simple-instance.linux1.1
3431 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
3432 # hostId: e836c036-74e7-11e6-b249-0800273e724c
3433 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
3434 # status: ACTIVE
3435 # error_msg:
3436 # interfaces: …
3437 #
3438 def get_vminstance(self, vim_vm_uuid=None):
3439 """Returns the VM instance information from VIM"""
3440 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3441
3442 _, vdc = self.get_vdc_details()
3443 if vdc is None:
3444 raise vimconn.VimConnConnectionException(
3445 "Failed to get a reference of VDC for a tenant {}".format(
3446 self.tenant_name
3447 )
3448 )
3449
3450 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3451 if not vm_info_dict:
3452 self.logger.debug(
3453 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3454 vim_vm_uuid
3455 )
3456 )
3457 raise vimconn.VimConnNotFoundException(
3458 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3459 )
3460
3461 status_key = vm_info_dict["status"]
3462 error = ""
3463 try:
3464 vm_dict = {
3465 "created": vm_info_dict["created"],
3466 "description": vm_info_dict["name"],
3467 "status": vcdStatusCode2manoFormat[int(status_key)],
3468 "hostId": vm_info_dict["vmuuid"],
3469 "error_msg": error,
3470 "vim_info": yaml.safe_dump(vm_info_dict),
3471 "interfaces": [],
3472 }
3473
3474 if "interfaces" in vm_info_dict:
3475 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3476 else:
3477 vm_dict["interfaces"] = []
3478 except KeyError:
3479 vm_dict = {
3480 "created": "",
3481 "description": "",
3482 "status": vcdStatusCode2manoFormat[int(-1)],
3483 "hostId": vm_info_dict["vmuuid"],
3484 "error_msg": "Inconsistency state",
3485 "vim_info": yaml.safe_dump(vm_info_dict),
3486 "interfaces": [],
3487 }
3488
3489 return vm_dict
3490
3491 def delete_vminstance(self, vm__vim_uuid, created_items=None):
3492 """Method poweroff and remove VM instance from vcloud director network.
3493
3494 Args:
3495 vm__vim_uuid: VM UUID
3496
3497 Returns:
3498 Returns the instance identifier
3499 """
3500 self.logger.debug(
3501 "Client requesting delete vm instance {} ".format(vm__vim_uuid)
3502 )
3503
3504 _, vdc = self.get_vdc_details()
3505 vdc_obj = VDC(self.client, href=vdc.get("href"))
3506 if vdc_obj is None:
3507 self.logger.debug(
3508 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3509 self.tenant_name
3510 )
3511 )
3512 raise vimconn.VimConnException(
3513 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3514 self.tenant_name
3515 )
3516 )
3517
3518 try:
3519 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3520 if vapp_name is None:
3521 self.logger.debug(
3522 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3523 vm__vim_uuid
3524 )
3525 )
3526
3527 return (
3528 -1,
3529 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3530 vm__vim_uuid
3531 ),
3532 )
3533
3534 self.logger.info(
3535 "Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
3536 )
3537 vapp_resource = vdc_obj.get_vapp(vapp_name)
3538 vapp = VApp(self.client, resource=vapp_resource)
3539
3540 # Delete vApp and wait for status change if task executed and vApp is None.
3541 if vapp:
3542 if vapp_resource.get("deployed") == "true":
3543 self.logger.info("Powering off vApp {}".format(vapp_name))
3544 # Power off vApp
3545 powered_off = False
3546 wait_time = 0
3547
3548 while wait_time <= MAX_WAIT_TIME:
3549 power_off_task = vapp.power_off()
3550 result = self.client.get_task_monitor().wait_for_success(
3551 task=power_off_task
3552 )
3553
3554 if result.get("status") == "success":
3555 powered_off = True
3556 break
3557 else:
3558 self.logger.info(
3559 "Wait for vApp {} to power off".format(vapp_name)
3560 )
3561 time.sleep(INTERVAL_TIME)
3562
3563 wait_time += INTERVAL_TIME
3564
3565 if not powered_off:
3566 self.logger.debug(
3567 "delete_vminstance(): Failed to power off VM instance {} ".format(
3568 vm__vim_uuid
3569 )
3570 )
3571 else:
3572 self.logger.info(
3573 "delete_vminstance(): Powered off VM instance {} ".format(
3574 vm__vim_uuid
3575 )
3576 )
3577
3578 # Undeploy vApp
3579 self.logger.info("Undeploy vApp {}".format(vapp_name))
3580 wait_time = 0
3581 undeployed = False
3582 while wait_time <= MAX_WAIT_TIME:
3583 vapp = VApp(self.client, resource=vapp_resource)
3584 if not vapp:
3585 self.logger.debug(
3586 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3587 vm__vim_uuid
3588 )
3589 )
3590
3591 return (
3592 -1,
3593 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3594 vm__vim_uuid
3595 ),
3596 )
3597
3598 undeploy_task = vapp.undeploy()
3599 result = self.client.get_task_monitor().wait_for_success(
3600 task=undeploy_task
3601 )
3602
3603 if result.get("status") == "success":
3604 undeployed = True
3605 break
3606 else:
3607 self.logger.debug(
3608 "Wait for vApp {} to undeploy".format(vapp_name)
3609 )
3610 time.sleep(INTERVAL_TIME)
3611
3612 wait_time += INTERVAL_TIME
3613
3614 if not undeployed:
3615 self.logger.debug(
3616 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3617 vm__vim_uuid
3618 )
3619 )
3620
3621 # delete vapp
3622 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3623 if vapp is not None:
3624 wait_time = 0
3625 result = False
3626
3627 while wait_time <= MAX_WAIT_TIME:
3628 vapp = VApp(self.client, resource=vapp_resource)
3629 if not vapp:
3630 self.logger.debug(
3631 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3632 vm__vim_uuid
3633 )
3634 )
3635
3636 return (
3637 -1,
3638 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3639 vm__vim_uuid
3640 ),
3641 )
3642
3643 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3644 result = self.client.get_task_monitor().wait_for_success(
3645 task=delete_task
3646 )
3647 if result.get("status") == "success":
3648 break
3649 else:
3650 self.logger.debug(
3651 "Wait for vApp {} to delete".format(vapp_name)
3652 )
3653 time.sleep(INTERVAL_TIME)
3654
3655 wait_time += INTERVAL_TIME
3656
3657 if result is None:
3658 self.logger.debug(
3659 "delete_vminstance(): Failed delete uuid {} ".format(
3660 vm__vim_uuid
3661 )
3662 )
3663 else:
3664 self.logger.info(
3665 "Deleted vm instance {} sccessfully".format(vm__vim_uuid)
3666 )
3667 config_drive_catalog_name, config_drive_catalog_id = (
3668 "cfg_drv-" + vm__vim_uuid,
3669 None,
3670 )
3671 catalog_list = self.get_image_list()
3672
3673 try:
3674 config_drive_catalog_id = [
3675 catalog_["id"]
3676 for catalog_ in catalog_list
3677 if catalog_["name"] == config_drive_catalog_name
3678 ][0]
3679 except IndexError:
3680 pass
3681
3682 if config_drive_catalog_id:
3683 self.logger.debug(
3684 "delete_vminstance(): Found a config drive catalog {} matching "
3685 'vapp_name"{}". Deleting it.'.format(
3686 config_drive_catalog_id, vapp_name
3687 )
3688 )
3689 self.delete_image(config_drive_catalog_id)
3690
3691 return vm__vim_uuid
3692 except Exception:
3693 self.logger.debug(traceback.format_exc())
3694
3695 raise vimconn.VimConnException(
3696 "delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid)
3697 )
3698
3699 def refresh_vms_status(self, vm_list):
3700 """Get the status of the virtual machines and their interfaces/ports
3701 Params: the list of VM identifiers
3702 Returns a dictionary with:
3703 vm_id: #VIM id of this Virtual Machine
3704 status: #Mandatory. Text with one of:
3705 # DELETED (not found at vim)
3706 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3707 # OTHER (Vim reported other status not understood)
3708 # ERROR (VIM indicates an ERROR status)
3709 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3710 # CREATING (on building process), ERROR
3711 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3712 #
3713 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3714 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3715 interfaces:
3716 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3717 mac_address: #Text format XX:XX:XX:XX:XX:XX
3718 vim_net_id: #network id where this interface is connected
3719 vim_interface_id: #interface/port VIM id
3720 ip_address: #null, or text with IPv4, IPv6 address
3721 """
3722 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3723
3724 _, vdc = self.get_vdc_details()
3725 if vdc is None:
3726 raise vimconn.VimConnException(
3727 "Failed to get a reference of VDC for a tenant {}".format(
3728 self.tenant_name
3729 )
3730 )
3731
3732 vms_dict = {}
3733 nsx_edge_list = []
3734 for vmuuid in vm_list:
3735 vapp_name = self.get_namebyvappid(vmuuid)
3736 if vapp_name is not None:
3737 try:
3738 vm_pci_details = self.get_vm_pci_details(vmuuid)
3739 vdc_obj = VDC(self.client, href=vdc.get("href"))
3740 vapp_resource = vdc_obj.get_vapp(vapp_name)
3741 the_vapp = VApp(self.client, resource=vapp_resource)
3742
3743 vm_details = {}
3744 for vm in the_vapp.get_all_vms():
3745 headers = {
3746 "Accept": "application/*+xml;version=" + API_VERSION,
3747 "x-vcloud-authorization": self.client._session.headers[
3748 "x-vcloud-authorization"
3749 ],
3750 }
3751 response = self.perform_request(
3752 req_type="GET", url=vm.get("href"), headers=headers
3753 )
3754
3755 if response.status_code != 200:
3756 self.logger.error(
3757 "refresh_vms_status : REST call {} failed reason : {}"
3758 "status code : {}".format(
3759 vm.get("href"), response.text, response.status_code
3760 )
3761 )
3762 raise vimconn.VimConnException(
3763 "refresh_vms_status : Failed to get VM details"
3764 )
3765
3766 xmlroot = XmlElementTree.fromstring(response.text)
3767 result = response.text.replace("\n", " ")
3768 hdd_match = re.search(
3769 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3770 result,
3771 )
3772
3773 if hdd_match:
3774 hdd_mb = hdd_match.group(1)
3775 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3776
3777 cpus_match = re.search(
3778 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3779 result,
3780 )
3781
3782 if cpus_match:
3783 cpus = cpus_match.group(1)
3784 vm_details["cpus"] = int(cpus) if cpus else None
3785
3786 memory_mb = re.search(
3787 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3788 result,
3789 ).group(1)
3790 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3791 vm_details["status"] = vcdStatusCode2manoFormat[
3792 int(xmlroot.get("status"))
3793 ]
3794 vm_details["id"] = xmlroot.get("id")
3795 vm_details["name"] = xmlroot.get("name")
3796 vm_info = [vm_details]
3797
3798 if vm_pci_details:
3799 vm_info[0].update(vm_pci_details)
3800
3801 vm_dict = {
3802 "status": vcdStatusCode2manoFormat[
3803 int(vapp_resource.get("status"))
3804 ],
3805 "error_msg": vcdStatusCode2manoFormat[
3806 int(vapp_resource.get("status"))
3807 ],
3808 "vim_info": yaml.safe_dump(vm_info),
3809 "interfaces": [],
3810 }
3811
3812 # get networks
3813 vm_ip = None
3814 vm_mac = None
3815 networks = re.findall(
3816 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3817 result,
3818 )
3819
3820 for network in networks:
3821 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3822 vm_mac = mac_s.group(1) if mac_s else None
3823 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3824 vm_ip = ip_s.group(1) if ip_s else None
3825
3826 if vm_ip is None:
3827 if not nsx_edge_list:
3828 nsx_edge_list = self.get_edge_details()
3829 if nsx_edge_list is None:
3830 raise vimconn.VimConnException(
3831 "refresh_vms_status:"
3832 "Failed to get edge details from NSX Manager"
3833 )
3834
3835 if vm_mac is not None:
3836 vm_ip = self.get_ipaddr_from_NSXedge(
3837 nsx_edge_list, vm_mac
3838 )
3839
3840 net_s = re.search('network="(.*?)"', network)
3841 network_name = net_s.group(1) if net_s else None
3842 vm_net_id = self.get_network_id_by_name(network_name)
3843 interface = {
3844 "mac_address": vm_mac,
3845 "vim_net_id": vm_net_id,
3846 "vim_interface_id": vm_net_id,
3847 "ip_address": vm_ip,
3848 }
3849 vm_dict["interfaces"].append(interface)
3850
3851 # add a vm to vm dict
3852 vms_dict.setdefault(vmuuid, vm_dict)
3853 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3854 except Exception as exp:
3855 self.logger.debug("Error in response {}".format(exp))
3856 self.logger.debug(traceback.format_exc())
3857
3858 return vms_dict
3859
3860 def get_edge_details(self):
3861 """Get the NSX edge list from NSX Manager
3862 Returns list of NSX edges
3863 """
3864 edge_list = []
3865 rheaders = {"Content-Type": "application/xml"}
3866 nsx_api_url = "/api/4.0/edges"
3867
3868 self.logger.debug(
3869 "Get edge details from NSX Manager {} {}".format(
3870 self.nsx_manager, nsx_api_url
3871 )
3872 )
3873
3874 try:
3875 resp = requests.get(
3876 self.nsx_manager + nsx_api_url,
3877 auth=(self.nsx_user, self.nsx_password),
3878 verify=False,
3879 headers=rheaders,
3880 )
3881 if resp.status_code == requests.codes.ok:
3882 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3883 for edge_pages in paged_Edge_List:
3884 if edge_pages.tag == "edgePage":
3885 for edge_summary in edge_pages:
3886 if edge_summary.tag == "pagingInfo":
3887 for element in edge_summary:
3888 if (
3889 element.tag == "totalCount"
3890 and element.text == "0"
3891 ):
3892 raise vimconn.VimConnException(
3893 "get_edge_details: No NSX edges details found: {}".format(
3894 self.nsx_manager
3895 )
3896 )
3897
3898 if edge_summary.tag == "edgeSummary":
3899 for element in edge_summary:
3900 if element.tag == "id":
3901 edge_list.append(element.text)
3902 else:
3903 raise vimconn.VimConnException(
3904 "get_edge_details: No NSX edge details found: {}".format(
3905 self.nsx_manager
3906 )
3907 )
3908
3909 if not edge_list:
3910 raise vimconn.VimConnException(
3911 "get_edge_details: "
3912 "No NSX edge details found: {}".format(self.nsx_manager)
3913 )
3914 else:
3915 self.logger.debug(
3916 "get_edge_details: Found NSX edges {}".format(edge_list)
3917 )
3918
3919 return edge_list
3920 else:
3921 self.logger.debug(
3922 "get_edge_details: "
3923 "Failed to get NSX edge details from NSX Manager: {}".format(
3924 resp.content
3925 )
3926 )
3927
3928 return None
3929
3930 except Exception as exp:
3931 self.logger.debug(
3932 "get_edge_details: "
3933 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3934 )
3935 raise vimconn.VimConnException(
3936 "get_edge_details: "
3937 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3938 )
3939
3940 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3941 """Get IP address details from NSX edges, using the MAC address
3942 PARAMS: nsx_edges : List of NSX edges
3943 mac_address : Find IP address corresponding to this MAC address
3944 Returns: IP address corrresponding to the provided MAC address
3945 """
3946 ip_addr = None
3947 rheaders = {"Content-Type": "application/xml"}
3948
3949 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3950
3951 try:
3952 for edge in nsx_edges:
3953 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3954
3955 resp = requests.get(
3956 self.nsx_manager + nsx_api_url,
3957 auth=(self.nsx_user, self.nsx_password),
3958 verify=False,
3959 headers=rheaders,
3960 )
3961
3962 if resp.status_code == requests.codes.ok:
3963 dhcp_leases = XmlElementTree.fromstring(resp.text)
3964 for child in dhcp_leases:
3965 if child.tag == "dhcpLeaseInfo":
3966 dhcpLeaseInfo = child
3967 for leaseInfo in dhcpLeaseInfo:
3968 for elem in leaseInfo:
3969 if (elem.tag) == "macAddress":
3970 edge_mac_addr = elem.text
3971
3972 if (elem.tag) == "ipAddress":
3973 ip_addr = elem.text
3974
3975 if edge_mac_addr is not None:
3976 if edge_mac_addr == mac_address:
3977 self.logger.debug(
3978 "Found ip addr {} for mac {} at NSX edge {}".format(
3979 ip_addr, mac_address, edge
3980 )
3981 )
3982
3983 return ip_addr
3984 else:
3985 self.logger.debug(
3986 "get_ipaddr_from_NSXedge: "
3987 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3988 resp.content
3989 )
3990 )
3991
3992 self.logger.debug(
3993 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3994 )
3995
3996 return None
3997
3998 except XmlElementTree.ParseError as Err:
3999 self.logger.debug(
4000 "ParseError in response from NSX Manager {}".format(Err.message),
4001 exc_info=True,
4002 )
4003
4004 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
4005 """Send and action over a VM instance from VIM
4006 Returns the vm_id if the action was successfully sent to the VIM"""
4007
4008 self.logger.debug(
4009 "Received action for vm {} and action dict {}".format(
4010 vm__vim_uuid, action_dict
4011 )
4012 )
4013
4014 if vm__vim_uuid is None or action_dict is None:
4015 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
4016
4017 _, vdc = self.get_vdc_details()
4018 if vdc is None:
4019 raise vimconn.VimConnException(
4020 "Failed to get a reference of VDC for a tenant {}".format(
4021 self.tenant_name
4022 )
4023 )
4024
4025 vapp_name = self.get_namebyvappid(vm__vim_uuid)
4026 if vapp_name is None:
4027 self.logger.debug(
4028 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
4029 vm__vim_uuid
4030 )
4031 )
4032
4033 raise vimconn.VimConnException(
4034 "Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
4035 )
4036 else:
4037 self.logger.info(
4038 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
4039 )
4040
4041 try:
4042 vdc_obj = VDC(self.client, href=vdc.get("href"))
4043 vapp_resource = vdc_obj.get_vapp(vapp_name)
4044 vapp = VApp(self.client, resource=vapp_resource)
4045
4046 if "start" in action_dict:
4047 self.logger.info(
4048 "action_vminstance: Power on vApp: {}".format(vapp_name)
4049 )
4050 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4051 result = self.client.get_task_monitor().wait_for_success(
4052 task=poweron_task
4053 )
4054 self.instance_actions_result("start", result, vapp_name)
4055 elif "rebuild" in action_dict:
4056 self.logger.info(
4057 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
4058 )
4059 rebuild_task = vapp.deploy(power_on=True)
4060 result = self.client.get_task_monitor().wait_for_success(
4061 task=rebuild_task
4062 )
4063 self.instance_actions_result("rebuild", result, vapp_name)
4064 elif "pause" in action_dict:
4065 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
4066 pause_task = vapp.undeploy(action="suspend")
4067 result = self.client.get_task_monitor().wait_for_success(
4068 task=pause_task
4069 )
4070 self.instance_actions_result("pause", result, vapp_name)
4071 elif "resume" in action_dict:
4072 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
4073 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4074 result = self.client.get_task_monitor().wait_for_success(
4075 task=poweron_task
4076 )
4077 self.instance_actions_result("resume", result, vapp_name)
4078 elif "shutoff" in action_dict or "shutdown" in action_dict:
4079 action_name, _ = list(action_dict.items())[0]
4080 self.logger.info(
4081 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
4082 )
4083 shutdown_task = vapp.shutdown()
4084 result = self.client.get_task_monitor().wait_for_success(
4085 task=shutdown_task
4086 )
4087 if action_name == "shutdown":
4088 self.instance_actions_result("shutdown", result, vapp_name)
4089 else:
4090 self.instance_actions_result("shutoff", result, vapp_name)
4091 elif "forceOff" in action_dict:
4092 result = vapp.undeploy(action="powerOff")
4093 self.instance_actions_result("forceOff", result, vapp_name)
4094 elif "reboot" in action_dict:
4095 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
4096 reboot_task = vapp.reboot()
4097 self.client.get_task_monitor().wait_for_success(task=reboot_task)
4098 else:
4099 raise vimconn.VimConnException(
4100 "action_vminstance: Invalid action {} or action is None.".format(
4101 action_dict
4102 )
4103 )
4104
4105 return vm__vim_uuid
4106 except Exception as exp:
4107 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
4108
4109 raise vimconn.VimConnException(
4110 "action_vminstance: Failed with Exception {}".format(exp)
4111 )
4112
4113 def instance_actions_result(self, action, result, vapp_name):
4114 if result.get("status") == "success":
4115 self.logger.info(
4116 "action_vminstance: Sucessfully {} the vApp: {}".format(
4117 action, vapp_name
4118 )
4119 )
4120 else:
4121 self.logger.error(
4122 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
4123 )
4124
4125 def get_vminstance_console(self, vm_id, console_type="novnc"):
4126 """
4127 Get a console for the virtual machine
4128 Params:
4129 vm_id: uuid of the VM
4130 console_type, can be:
4131 "novnc" (by default), "xvpvnc" for VNC types,
4132 "rdp-html5" for RDP types, "spice-html5" for SPICE types
4133 Returns dict with the console parameters:
4134 protocol: ssh, ftp, http, https, ...
4135 server: usually ip address
4136 port: the http, ssh, ... port
4137 suffix: extra text, e.g. the http path and query string
4138 """
4139 console_dict = {}
4140
4141 if console_type is None or console_type == "novnc":
4142 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
4143 self.url, vm_id
4144 )
4145 headers = {
4146 "Accept": "application/*+xml;version=" + API_VERSION,
4147 "x-vcloud-authorization": self.client._session.headers[
4148 "x-vcloud-authorization"
4149 ],
4150 }
4151 response = self.perform_request(
4152 req_type="POST", url=url_rest_call, headers=headers
4153 )
4154
4155 if response.status_code == 403:
4156 response = self.retry_rest("GET", url_rest_call)
4157
4158 if response.status_code != 200:
4159 self.logger.error(
4160 "REST call {} failed reason : {}"
4161 "status code : {}".format(
4162 url_rest_call, response.text, response.status_code
4163 )
4164 )
4165 raise vimconn.VimConnException(
4166 "get_vminstance_console : Failed to get " "VM Mks ticket details"
4167 )
4168
4169 s = re.search("<Host>(.*?)</Host>", response.text)
4170 console_dict["server"] = s.group(1) if s else None
4171 s1 = re.search("<Port>(\d+)</Port>", response.text)
4172 console_dict["port"] = s1.group(1) if s1 else None
4173 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
4174 self.url, vm_id
4175 )
4176 headers = {
4177 "Accept": "application/*+xml;version=" + API_VERSION,
4178 "x-vcloud-authorization": self.client._session.headers[
4179 "x-vcloud-authorization"
4180 ],
4181 }
4182 response = self.perform_request(
4183 req_type="POST", url=url_rest_call, headers=headers
4184 )
4185
4186 if response.status_code == 403:
4187 response = self.retry_rest("GET", url_rest_call)
4188
4189 if response.status_code != 200:
4190 self.logger.error(
4191 "REST call {} failed reason : {}"
4192 "status code : {}".format(
4193 url_rest_call, response.text, response.status_code
4194 )
4195 )
4196 raise vimconn.VimConnException(
4197 "get_vminstance_console : Failed to get " "VM console details"
4198 )
4199
4200 s = re.search(">.*?/(vm-\d+.*)</", response.text)
4201 console_dict["suffix"] = s.group(1) if s else None
4202 console_dict["protocol"] = "https"
4203
4204 return console_dict
4205
4206 # NOT USED METHODS in current version
4207
4208 def host_vim2gui(self, host, server_dict):
4209 """Transform host dictionary from VIM format to GUI format,
4210 and append to the server_dict
4211 """
4212 raise vimconn.VimConnNotImplemented("Should have implemented this")
4213
4214 def get_hosts_info(self):
4215 """Get the information of deployed hosts
4216 Returns the hosts content"""
4217 raise vimconn.VimConnNotImplemented("Should have implemented this")
4218
4219 def get_hosts(self, vim_tenant):
4220 """Get the hosts and deployed instances
4221 Returns the hosts content"""
4222 raise vimconn.VimConnNotImplemented("Should have implemented this")
4223
4224 def get_processor_rankings(self):
4225 """Get the processor rankings in the VIM database"""
4226 raise vimconn.VimConnNotImplemented("Should have implemented this")
4227
4228 def new_host(self, host_data):
4229 """Adds a new host to VIM"""
4230 """Returns status code of the VIM response"""
4231 raise vimconn.VimConnNotImplemented("Should have implemented this")
4232
4233 def new_external_port(self, port_data):
4234 """Adds a external port to VIM"""
4235 """Returns the port identifier"""
4236 raise vimconn.VimConnNotImplemented("Should have implemented this")
4237
4238 def new_external_network(self, net_name, net_type):
4239 """Adds a external network to VIM (shared)"""
4240 """Returns the network identifier"""
4241 raise vimconn.VimConnNotImplemented("Should have implemented this")
4242
4243 def connect_port_network(self, port_id, network_id, admin=False):
4244 """Connects a external port to a network"""
4245 """Returns status code of the VIM response"""
4246 raise vimconn.VimConnNotImplemented("Should have implemented this")
4247
4248 def new_vminstancefromJSON(self, vm_data):
4249 """Adds a VM instance to VIM"""
4250 """Returns the instance identifier"""
4251 raise vimconn.VimConnNotImplemented("Should have implemented this")
4252
4253 def get_network_name_by_id(self, network_uuid=None):
4254 """Method gets vcloud director network named based on supplied uuid.
4255
4256 Args:
4257 network_uuid: network_id
4258
4259 Returns:
4260 The return network name.
4261 """
4262
4263 if not network_uuid:
4264 return None
4265
4266 try:
4267 org_dict = self.get_org(self.org_uuid)
4268 if "networks" in org_dict:
4269 org_network_dict = org_dict["networks"]
4270
4271 for net_uuid in org_network_dict:
4272 if net_uuid == network_uuid:
4273 return org_network_dict[net_uuid]
4274 except Exception:
4275 self.logger.debug("Exception in get_network_name_by_id")
4276 self.logger.debug(traceback.format_exc())
4277
4278 return None
4279
4280 def get_network_id_by_name(self, network_name=None):
4281 """Method gets vcloud director network uuid based on supplied name.
4282
4283 Args:
4284 network_name: network_name
4285 Returns:
4286 The return network uuid.
4287 network_uuid: network_id
4288 """
4289 if not network_name:
4290 self.logger.debug("get_network_id_by_name() : Network name is empty")
4291 return None
4292
4293 try:
4294 org_dict = self.get_org(self.org_uuid)
4295 if org_dict and "networks" in org_dict:
4296 org_network_dict = org_dict["networks"]
4297
4298 for net_uuid, net_name in org_network_dict.items():
4299 if net_name == network_name:
4300 return net_uuid
4301
4302 except KeyError as exp:
4303 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4304
4305 return None
4306
4307 def get_physical_network_by_name(self, physical_network_name):
4308 """
4309 Methos returns uuid of physical network which passed
4310 Args:
4311 physical_network_name: physical network name
4312 Returns:
4313 UUID of physical_network_name
4314 """
4315 try:
4316 client_as_admin = self.connect_as_admin()
4317
4318 if not client_as_admin:
4319 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4320
4321 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4322 vm_list_rest_call = "".join(url_list)
4323
4324 if client_as_admin._session:
4325 headers = {
4326 "Accept": "application/*+xml;version=" + API_VERSION,
4327 "x-vcloud-authorization": client_as_admin._session.headers[
4328 "x-vcloud-authorization"
4329 ],
4330 }
4331 response = self.perform_request(
4332 req_type="GET", url=vm_list_rest_call, headers=headers
4333 )
4334 provider_network = None
4335 available_network = None
4336 # add_vdc_rest_url = None
4337
4338 if response.status_code != requests.codes.ok:
4339 self.logger.debug(
4340 "REST API call {} failed. Return status code {}".format(
4341 vm_list_rest_call, response.status_code
4342 )
4343 )
4344 return None
4345 else:
4346 try:
4347 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4348 for child in vm_list_xmlroot:
4349 if child.tag.split("}")[1] == "ProviderVdcReference":
4350 provider_network = child.attrib.get("href")
4351 # application/vnd.vmware.admin.providervdc+xml
4352
4353 if child.tag.split("}")[1] == "Link":
4354 if (
4355 child.attrib.get("type")
4356 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4357 and child.attrib.get("rel") == "add"
4358 ):
4359 child.attrib.get("href")
4360 except Exception:
4361 self.logger.debug(
4362 "Failed parse respond for rest api call {}".format(
4363 vm_list_rest_call
4364 )
4365 )
4366 self.logger.debug("Respond body {}".format(response.text))
4367
4368 return None
4369
4370 # find pvdc provided available network
4371 response = self.perform_request(
4372 req_type="GET", url=provider_network, headers=headers
4373 )
4374
4375 if response.status_code != requests.codes.ok:
4376 self.logger.debug(
4377 "REST API call {} failed. Return status code {}".format(
4378 vm_list_rest_call, response.status_code
4379 )
4380 )
4381
4382 return None
4383
4384 try:
4385 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4386 for child in vm_list_xmlroot.iter():
4387 if child.tag.split("}")[1] == "AvailableNetworks":
4388 for networks in child.iter():
4389 if (
4390 networks.attrib.get("href") is not None
4391 and networks.attrib.get("name") is not None
4392 ):
4393 if (
4394 networks.attrib.get("name")
4395 == physical_network_name
4396 ):
4397 network_url = networks.attrib.get("href")
4398 available_network = network_url[
4399 network_url.rindex("/") + 1 :
4400 ]
4401 break
4402 except Exception:
4403 return None
4404
4405 return available_network
4406 except Exception as e:
4407 self.logger.error("Error while getting physical network: {}".format(e))
4408
4409 def list_org_action(self):
4410 """
4411 Method leverages vCloud director and query for available organization for particular user
4412
4413 Args:
4414 vca - is active VCA connection.
4415 vdc_name - is a vdc name that will be used to query vms action
4416
4417 Returns:
4418 The return XML respond
4419 """
4420 url_list = [self.url, "/api/org"]
4421 vm_list_rest_call = "".join(url_list)
4422
4423 if self.client._session:
4424 headers = {
4425 "Accept": "application/*+xml;version=" + API_VERSION,
4426 "x-vcloud-authorization": self.client._session.headers[
4427 "x-vcloud-authorization"
4428 ],
4429 }
4430
4431 response = self.perform_request(
4432 req_type="GET", url=vm_list_rest_call, headers=headers
4433 )
4434
4435 if response.status_code == 403:
4436 response = self.retry_rest("GET", vm_list_rest_call)
4437
4438 if response.status_code == requests.codes.ok:
4439 return response.text
4440
4441 return None
4442
4443 def get_org_action(self, org_uuid=None):
4444 """
4445 Method leverages vCloud director and retrieve available object for organization.
4446
4447 Args:
4448 org_uuid - vCD organization uuid
4449 self.client - is active connection.
4450
4451 Returns:
4452 The return XML respond
4453 """
4454
4455 if org_uuid is None:
4456 return None
4457
4458 url_list = [self.url, "/api/org/", org_uuid]
4459 vm_list_rest_call = "".join(url_list)
4460
4461 if self.client._session:
4462 headers = {
4463 "Accept": "application/*+xml;version=" + API_VERSION,
4464 "x-vcloud-authorization": self.client._session.headers[
4465 "x-vcloud-authorization"
4466 ],
4467 }
4468
4469 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4470 response = self.perform_request(
4471 req_type="GET", url=vm_list_rest_call, headers=headers
4472 )
4473
4474 if response.status_code == 403:
4475 response = self.retry_rest("GET", vm_list_rest_call)
4476
4477 if response.status_code == requests.codes.ok:
4478 return response.text
4479
4480 return None
4481
4482 def get_org(self, org_uuid=None):
4483 """
4484 Method retrieves available organization in vCloud Director
4485
4486 Args:
4487 org_uuid - is a organization uuid.
4488
4489 Returns:
4490 The return dictionary with following key
4491 "network" - for network list under the org
4492 "catalogs" - for network list under the org
4493 "vdcs" - for vdc list under org
4494 """
4495
4496 org_dict = {}
4497
4498 if org_uuid is None:
4499 return org_dict
4500
4501 content = self.get_org_action(org_uuid=org_uuid)
4502 try:
4503 vdc_list = {}
4504 network_list = {}
4505 catalog_list = {}
4506 vm_list_xmlroot = XmlElementTree.fromstring(content)
4507 for child in vm_list_xmlroot:
4508 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4509 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4510 "name"
4511 ]
4512 org_dict["vdcs"] = vdc_list
4513
4514 if (
4515 child.attrib["type"]
4516 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4517 ):
4518 network_list[
4519 child.attrib["href"].split("/")[-1:][0]
4520 ] = child.attrib["name"]
4521 org_dict["networks"] = network_list
4522
4523 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4524 catalog_list[
4525 child.attrib["href"].split("/")[-1:][0]
4526 ] = child.attrib["name"]
4527 org_dict["catalogs"] = catalog_list
4528 except Exception:
4529 pass
4530
4531 return org_dict
4532
4533 def get_org_list(self):
4534 """
4535 Method retrieves available organization in vCloud Director
4536
4537 Args:
4538 vca - is active VCA connection.
4539
4540 Returns:
4541 The return dictionary and key for each entry VDC UUID
4542 """
4543 org_dict = {}
4544
4545 content = self.list_org_action()
4546 try:
4547 vm_list_xmlroot = XmlElementTree.fromstring(content)
4548
4549 for vm_xml in vm_list_xmlroot:
4550 if vm_xml.tag.split("}")[1] == "Org":
4551 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4552 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4553 except Exception:
4554 pass
4555
4556 return org_dict
4557
4558 def vms_view_action(self, vdc_name=None):
4559 """Method leverages vCloud director vms query call
4560
4561 Args:
4562 vca - is active VCA connection.
4563 vdc_name - is a vdc name that will be used to query vms action
4564
4565 Returns:
4566 The return XML respond
4567 """
4568 vca = self.connect()
4569 if vdc_name is None:
4570 return None
4571
4572 url_list = [vca.host, "/api/vms/query"]
4573 vm_list_rest_call = "".join(url_list)
4574
4575 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4576 refs = [
4577 ref
4578 for ref in vca.vcloud_session.organization.Link
4579 if ref.name == vdc_name
4580 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4581 ]
4582
4583 if len(refs) == 1:
4584 response = self.perform_request(
4585 req_type="GET",
4586 url=vm_list_rest_call,
4587 headers=vca.vcloud_session.get_vcloud_headers(),
4588 verify=vca.verify,
4589 logger=vca.logger,
4590 )
4591
4592 if response.status_code == requests.codes.ok:
4593 return response.text
4594
4595 return None
4596
4597 def get_vapp_list(self, vdc_name=None):
4598 """
4599 Method retrieves vApp list deployed vCloud director and returns a dictionary
4600 contains a list of all vapp deployed for queried VDC.
4601 The key for a dictionary is vApp UUID
4602
4603
4604 Args:
4605 vca - is active VCA connection.
4606 vdc_name - is a vdc name that will be used to query vms action
4607
4608 Returns:
4609 The return dictionary and key for each entry vapp UUID
4610 """
4611 vapp_dict = {}
4612
4613 if vdc_name is None:
4614 return vapp_dict
4615
4616 content = self.vms_view_action(vdc_name=vdc_name)
4617 try:
4618 vm_list_xmlroot = XmlElementTree.fromstring(content)
4619 for vm_xml in vm_list_xmlroot:
4620 if vm_xml.tag.split("}")[1] == "VMRecord":
4621 if vm_xml.attrib["isVAppTemplate"] == "true":
4622 rawuuid = vm_xml.attrib["container"].split("/")[-1:]
4623 if "vappTemplate-" in rawuuid[0]:
4624 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4625 # vm and use raw UUID as key
4626 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
4627 except Exception:
4628 pass
4629
4630 return vapp_dict
4631
4632 def get_vm_list(self, vdc_name=None):
4633 """
4634 Method retrieves VM's list deployed vCloud director. It returns a dictionary
4635 contains a list of all VM's deployed for queried VDC.
4636 The key for a dictionary is VM UUID
4637
4638
4639 Args:
4640 vca - is active VCA connection.
4641 vdc_name - is a vdc name that will be used to query vms action
4642
4643 Returns:
4644 The return dictionary and key for each entry vapp UUID
4645 """
4646 vm_dict = {}
4647
4648 if vdc_name is None:
4649 return vm_dict
4650
4651 content = self.vms_view_action(vdc_name=vdc_name)
4652 try:
4653 vm_list_xmlroot = XmlElementTree.fromstring(content)
4654 for vm_xml in vm_list_xmlroot:
4655 if vm_xml.tag.split("}")[1] == "VMRecord":
4656 if vm_xml.attrib["isVAppTemplate"] == "false":
4657 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4658 if "vm-" in rawuuid[0]:
4659 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4660 # vm and use raw UUID as key
4661 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4662 except Exception:
4663 pass
4664
4665 return vm_dict
4666
4667 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4668 """
4669 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4670 contains a list of all VM's deployed for queried VDC.
4671 The key for a dictionary is VM UUID
4672
4673
4674 Args:
4675 vca - is active VCA connection.
4676 vdc_name - is a vdc name that will be used to query vms action
4677
4678 Returns:
4679 The return dictionary and key for each entry vapp UUID
4680 """
4681 vm_dict = {}
4682 vca = self.connect()
4683
4684 if not vca:
4685 raise vimconn.VimConnConnectionException("self.connect() is failed")
4686
4687 if vdc_name is None:
4688 return vm_dict
4689
4690 content = self.vms_view_action(vdc_name=vdc_name)
4691 try:
4692 vm_list_xmlroot = XmlElementTree.fromstring(content)
4693 for vm_xml in vm_list_xmlroot:
4694 if (
4695 vm_xml.tag.split("}")[1] == "VMRecord"
4696 and vm_xml.attrib["isVAppTemplate"] == "false"
4697 ):
4698 # lookup done by UUID
4699 if isuuid:
4700 if vapp_name in vm_xml.attrib["container"]:
4701 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4702 if "vm-" in rawuuid[0]:
4703 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4704 break
4705 # lookup done by Name
4706 else:
4707 if vapp_name in vm_xml.attrib["name"]:
4708 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4709 if "vm-" in rawuuid[0]:
4710 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4711 break
4712 except Exception:
4713 pass
4714
4715 return vm_dict
4716
4717 def get_network_action(self, network_uuid=None):
4718 """
4719 Method leverages vCloud director and query network based on network uuid
4720
4721 Args:
4722 vca - is active VCA connection.
4723 network_uuid - is a network uuid
4724
4725 Returns:
4726 The return XML respond
4727 """
4728 if network_uuid is None:
4729 return None
4730
4731 url_list = [self.url, "/api/network/", network_uuid]
4732 vm_list_rest_call = "".join(url_list)
4733
4734 if self.client._session:
4735 headers = {
4736 "Accept": "application/*+xml;version=" + API_VERSION,
4737 "x-vcloud-authorization": self.client._session.headers[
4738 "x-vcloud-authorization"
4739 ],
4740 }
4741 response = self.perform_request(
4742 req_type="GET", url=vm_list_rest_call, headers=headers
4743 )
4744
4745 # Retry login if session expired & retry sending request
4746 if response.status_code == 403:
4747 response = self.retry_rest("GET", vm_list_rest_call)
4748
4749 if response.status_code == requests.codes.ok:
4750 return response.text
4751
4752 return None
4753
4754 def get_vcd_network(self, network_uuid=None):
4755 """
4756 Method retrieves available network from vCloud Director
4757
4758 Args:
4759 network_uuid - is VCD network UUID
4760
4761 Each element serialized as key : value pair
4762
4763 Following keys available for access. network_configuration['Gateway'}
4764 <Configuration>
4765 <IpScopes>
4766 <IpScope>
4767 <IsInherited>true</IsInherited>
4768 <Gateway>172.16.252.100</Gateway>
4769 <Netmask>255.255.255.0</Netmask>
4770 <Dns1>172.16.254.201</Dns1>
4771 <Dns2>172.16.254.202</Dns2>
4772 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4773 <IsEnabled>true</IsEnabled>
4774 <IpRanges>
4775 <IpRange>
4776 <StartAddress>172.16.252.1</StartAddress>
4777 <EndAddress>172.16.252.99</EndAddress>
4778 </IpRange>
4779 </IpRanges>
4780 </IpScope>
4781 </IpScopes>
4782 <FenceMode>bridged</FenceMode>
4783
4784 Returns:
4785 The return dictionary and key for each entry vapp UUID
4786 """
4787 network_configuration = {}
4788
4789 if network_uuid is None:
4790 return network_uuid
4791
4792 try:
4793 content = self.get_network_action(network_uuid=network_uuid)
4794 if content is not None:
4795 vm_list_xmlroot = XmlElementTree.fromstring(content)
4796 network_configuration["status"] = vm_list_xmlroot.get("status")
4797 network_configuration["name"] = vm_list_xmlroot.get("name")
4798 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4799
4800 for child in vm_list_xmlroot:
4801 if child.tag.split("}")[1] == "IsShared":
4802 network_configuration["isShared"] = child.text.strip()
4803
4804 if child.tag.split("}")[1] == "Configuration":
4805 for configuration in child.iter():
4806 tagKey = configuration.tag.split("}")[1].strip()
4807 if tagKey != "":
4808 network_configuration[
4809 tagKey
4810 ] = configuration.text.strip()
4811 except Exception as exp:
4812 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4813
4814 raise vimconn.VimConnException(
4815 "get_vcd_network: Failed with Exception {}".format(exp)
4816 )
4817
4818 return network_configuration
4819
4820 def delete_network_action(self, network_uuid=None):
4821 """
4822 Method delete given network from vCloud director
4823
4824 Args:
4825 network_uuid - is a network uuid that client wish to delete
4826
4827 Returns:
4828 The return None or XML respond or false
4829 """
4830 client = self.connect_as_admin()
4831
4832 if not client:
4833 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4834
4835 if network_uuid is None:
4836 return False
4837
4838 url_list = [self.url, "/api/admin/network/", network_uuid]
4839 vm_list_rest_call = "".join(url_list)
4840
4841 if client._session:
4842 headers = {
4843 "Accept": "application/*+xml;version=" + API_VERSION,
4844 "x-vcloud-authorization": client._session.headers[
4845 "x-vcloud-authorization"
4846 ],
4847 }
4848 response = self.perform_request(
4849 req_type="DELETE", url=vm_list_rest_call, headers=headers
4850 )
4851
4852 if response.status_code == 202:
4853 return True
4854
4855 return False
4856
4857 def create_network(
4858 self,
4859 network_name=None,
4860 net_type="bridge",
4861 parent_network_uuid=None,
4862 ip_profile=None,
4863 isshared="true",
4864 ):
4865 """
4866 Method create network in vCloud director
4867
4868 Args:
4869 network_name - is network name to be created.
4870 net_type - can be 'bridge','data','ptp','mgmt'.
4871 ip_profile is a dict containing the IP parameters of the network
4872 isshared - is a boolean
4873 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4874 It optional attribute. by default if no parent network indicate the first available will be used.
4875
4876 Returns:
4877 The return network uuid or return None
4878 """
4879 new_network_name = [network_name, "-", str(uuid.uuid4())]
4880 content = self.create_network_rest(
4881 network_name="".join(new_network_name),
4882 ip_profile=ip_profile,
4883 net_type=net_type,
4884 parent_network_uuid=parent_network_uuid,
4885 isshared=isshared,
4886 )
4887
4888 if content is None:
4889 self.logger.debug("Failed create network {}.".format(network_name))
4890
4891 return None
4892
4893 try:
4894 vm_list_xmlroot = XmlElementTree.fromstring(content)
4895 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4896 if len(vcd_uuid) == 4:
4897 self.logger.info(
4898 "Created new network name: {} uuid: {}".format(
4899 network_name, vcd_uuid[3]
4900 )
4901 )
4902
4903 return vcd_uuid[3]
4904 except Exception:
4905 self.logger.debug("Failed create network {}".format(network_name))
4906
4907 return None
4908
4909 def create_network_rest(
4910 self,
4911 network_name=None,
4912 net_type="bridge",
4913 parent_network_uuid=None,
4914 ip_profile=None,
4915 isshared="true",
4916 ):
4917 """
4918 Method create network in vCloud director
4919
4920 Args:
4921 network_name - is network name to be created.
4922 net_type - can be 'bridge','data','ptp','mgmt'.
4923 ip_profile is a dict containing the IP parameters of the network
4924 isshared - is a boolean
4925 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4926 It optional attribute. by default if no parent network indicate the first available will be used.
4927
4928 Returns:
4929 The return network uuid or return None
4930 """
4931 client_as_admin = self.connect_as_admin()
4932
4933 if not client_as_admin:
4934 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4935
4936 if network_name is None:
4937 return None
4938
4939 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4940 vm_list_rest_call = "".join(url_list)
4941
4942 if client_as_admin._session:
4943 headers = {
4944 "Accept": "application/*+xml;version=" + API_VERSION,
4945 "x-vcloud-authorization": client_as_admin._session.headers[
4946 "x-vcloud-authorization"
4947 ],
4948 }
4949 response = self.perform_request(
4950 req_type="GET", url=vm_list_rest_call, headers=headers
4951 )
4952 provider_network = None
4953 available_networks = None
4954 add_vdc_rest_url = None
4955
4956 if response.status_code != requests.codes.ok:
4957 self.logger.debug(
4958 "REST API call {} failed. Return status code {}".format(
4959 vm_list_rest_call, response.status_code
4960 )
4961 )
4962
4963 return None
4964 else:
4965 try:
4966 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4967 for child in vm_list_xmlroot:
4968 if child.tag.split("}")[1] == "ProviderVdcReference":
4969 provider_network = child.attrib.get("href")
4970 # application/vnd.vmware.admin.providervdc+xml
4971
4972 if child.tag.split("}")[1] == "Link":
4973 if (
4974 child.attrib.get("type")
4975 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4976 and child.attrib.get("rel") == "add"
4977 ):
4978 add_vdc_rest_url = child.attrib.get("href")
4979 except Exception:
4980 self.logger.debug(
4981 "Failed parse respond for rest api call {}".format(
4982 vm_list_rest_call
4983 )
4984 )
4985 self.logger.debug("Respond body {}".format(response.text))
4986
4987 return None
4988
4989 # find pvdc provided available network
4990 response = self.perform_request(
4991 req_type="GET", url=provider_network, headers=headers
4992 )
4993
4994 if response.status_code != requests.codes.ok:
4995 self.logger.debug(
4996 "REST API call {} failed. Return status code {}".format(
4997 vm_list_rest_call, response.status_code
4998 )
4999 )
5000
5001 return None
5002
5003 if parent_network_uuid is None:
5004 try:
5005 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5006 for child in vm_list_xmlroot.iter():
5007 if child.tag.split("}")[1] == "AvailableNetworks":
5008 for networks in child.iter():
5009 # application/vnd.vmware.admin.network+xml
5010 if networks.attrib.get("href") is not None:
5011 available_networks = networks.attrib.get("href")
5012 break
5013 except Exception:
5014 return None
5015
5016 try:
5017 # Configure IP profile of the network
5018 ip_profile = (
5019 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
5020 )
5021
5022 if (
5023 "subnet_address" not in ip_profile
5024 or ip_profile["subnet_address"] is None
5025 ):
5026 subnet_rand = random.randint(0, 255)
5027 ip_base = "192.168.{}.".format(subnet_rand)
5028 ip_profile["subnet_address"] = ip_base + "0/24"
5029 else:
5030 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
5031
5032 if (
5033 "gateway_address" not in ip_profile
5034 or ip_profile["gateway_address"] is None
5035 ):
5036 ip_profile["gateway_address"] = ip_base + "1"
5037
5038 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
5039 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
5040
5041 if (
5042 "dhcp_enabled" not in ip_profile
5043 or ip_profile["dhcp_enabled"] is None
5044 ):
5045 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
5046
5047 if (
5048 "dhcp_start_address" not in ip_profile
5049 or ip_profile["dhcp_start_address"] is None
5050 ):
5051 ip_profile["dhcp_start_address"] = ip_base + "3"
5052
5053 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
5054 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
5055
5056 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
5057 ip_profile["dns_address"] = ip_base + "2"
5058
5059 gateway_address = ip_profile["gateway_address"]
5060 dhcp_count = int(ip_profile["dhcp_count"])
5061 subnet_address = self.convert_cidr_to_netmask(
5062 ip_profile["subnet_address"]
5063 )
5064
5065 if ip_profile["dhcp_enabled"] is True:
5066 dhcp_enabled = "true"
5067 else:
5068 dhcp_enabled = "false"
5069
5070 dhcp_start_address = ip_profile["dhcp_start_address"]
5071
5072 # derive dhcp_end_address from dhcp_start_address & dhcp_count
5073 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
5074 end_ip_int += dhcp_count - 1
5075 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
5076
5077 # ip_version = ip_profile['ip_version']
5078 dns_address = ip_profile["dns_address"]
5079 except KeyError as exp:
5080 self.logger.debug("Create Network REST: Key error {}".format(exp))
5081
5082 raise vimconn.VimConnException(
5083 "Create Network REST: Key error{}".format(exp)
5084 )
5085
5086 # either use client provided UUID or search for a first available
5087 # if both are not defined we return none
5088 if parent_network_uuid is not None:
5089 provider_network = None
5090 available_networks = None
5091 add_vdc_rest_url = None
5092 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
5093 add_vdc_rest_url = "".join(url_list)
5094 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
5095 available_networks = "".join(url_list)
5096
5097 # Creating all networks as Direct Org VDC type networks.
5098 # Unused in case of Underlay (data/ptp) network interface.
5099 fence_mode = "isolated"
5100 is_inherited = "false"
5101 dns_list = dns_address.split(";")
5102 dns1 = dns_list[0]
5103 dns2_text = ""
5104
5105 if len(dns_list) >= 2:
5106 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
5107 dns_list[1]
5108 )
5109
5110 if net_type == "isolated":
5111 fence_mode = "isolated"
5112 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5113 <Description>Openmano created</Description>
5114 <Configuration>
5115 <IpScopes>
5116 <IpScope>
5117 <IsInherited>{1:s}</IsInherited>
5118 <Gateway>{2:s}</Gateway>
5119 <Netmask>{3:s}</Netmask>
5120 <Dns1>{4:s}</Dns1>{5:s}
5121 <IsEnabled>{6:s}</IsEnabled>
5122 <IpRanges>
5123 <IpRange>
5124 <StartAddress>{7:s}</StartAddress>
5125 <EndAddress>{8:s}</EndAddress>
5126 </IpRange>
5127 </IpRanges>
5128 </IpScope>
5129 </IpScopes>
5130 <FenceMode>{9:s}</FenceMode>
5131 </Configuration>
5132 <IsShared>{10:s}</IsShared>
5133 </OrgVdcNetwork> """.format(
5134 escape(network_name),
5135 is_inherited,
5136 gateway_address,
5137 subnet_address,
5138 dns1,
5139 dns2_text,
5140 dhcp_enabled,
5141 dhcp_start_address,
5142 dhcp_end_address,
5143 fence_mode,
5144 isshared,
5145 )
5146 else:
5147 fence_mode = "bridged"
5148 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5149 <Description>Openmano created</Description>
5150 <Configuration>
5151 <IpScopes>
5152 <IpScope>
5153 <IsInherited>{1:s}</IsInherited>
5154 <Gateway>{2:s}</Gateway>
5155 <Netmask>{3:s}</Netmask>
5156 <Dns1>{4:s}</Dns1>{5:s}
5157 <IsEnabled>{6:s}</IsEnabled>
5158 <IpRanges>
5159 <IpRange>
5160 <StartAddress>{7:s}</StartAddress>
5161 <EndAddress>{8:s}</EndAddress>
5162 </IpRange>
5163 </IpRanges>
5164 </IpScope>
5165 </IpScopes>
5166 <ParentNetwork href="{9:s}"/>
5167 <FenceMode>{10:s}</FenceMode>
5168 </Configuration>
5169 <IsShared>{11:s}</IsShared>
5170 </OrgVdcNetwork> """.format(
5171 escape(network_name),
5172 is_inherited,
5173 gateway_address,
5174 subnet_address,
5175 dns1,
5176 dns2_text,
5177 dhcp_enabled,
5178 dhcp_start_address,
5179 dhcp_end_address,
5180 available_networks,
5181 fence_mode,
5182 isshared,
5183 )
5184
5185 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
5186 try:
5187 response = self.perform_request(
5188 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
5189 )
5190
5191 if response.status_code != 201:
5192 self.logger.debug(
5193 "Create Network POST REST API call failed. "
5194 "Return status code {}, response.text: {}".format(
5195 response.status_code, response.text
5196 )
5197 )
5198 else:
5199 network_task = self.get_task_from_response(response.text)
5200 self.logger.debug(
5201 "Create Network REST : Waiting for Network creation complete"
5202 )
5203 time.sleep(5)
5204 result = self.client.get_task_monitor().wait_for_success(
5205 task=network_task
5206 )
5207
5208 if result.get("status") == "success":
5209 return response.text
5210 else:
5211 self.logger.debug(
5212 "create_network_rest task failed. Network Create response : {}".format(
5213 response.text
5214 )
5215 )
5216 except Exception as exp:
5217 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
5218
5219 return None
5220
5221 def convert_cidr_to_netmask(self, cidr_ip=None):
5222 """
5223 Method sets convert CIDR netmask address to normal IP format
5224 Args:
5225 cidr_ip : CIDR IP address
5226 Returns:
5227 netmask : Converted netmask
5228 """
5229 if cidr_ip is not None:
5230 if "/" in cidr_ip:
5231 _, net_bits = cidr_ip.split("/")
5232 netmask = socket.inet_ntoa(
5233 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
5234 )
5235 else:
5236 netmask = cidr_ip
5237
5238 return netmask
5239
5240 return None
5241
5242 def get_provider_rest(self, vca=None):
5243 """
5244 Method gets provider vdc view from vcloud director
5245
5246 Args:
5247 network_name - is network name to be created.
5248 parent_network_uuid - is parent provider vdc network that will be used for mapping.
5249 It optional attribute. by default if no parent network indicate the first available will be used.
5250
5251 Returns:
5252 The return xml content of respond or None
5253 """
5254 url_list = [self.url, "/api/admin"]
5255
5256 if vca:
5257 headers = {
5258 "Accept": "application/*+xml;version=" + API_VERSION,
5259 "x-vcloud-authorization": self.client._session.headers[
5260 "x-vcloud-authorization"
5261 ],
5262 }
5263 response = self.perform_request(
5264 req_type="GET", url="".join(url_list), headers=headers
5265 )
5266
5267 if response.status_code == requests.codes.ok:
5268 return response.text
5269
5270 return None
5271
5272 def create_vdc(self, vdc_name=None):
5273 vdc_dict = {}
5274 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
5275
5276 if xml_content is not None:
5277 try:
5278 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
5279 for child in task_resp_xmlroot:
5280 if child.tag.split("}")[1] == "Owner":
5281 vdc_id = child.attrib.get("href").split("/")[-1]
5282 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
5283
5284 return vdc_dict
5285 except Exception:
5286 self.logger.debug("Respond body {}".format(xml_content))
5287
5288 return None
5289
5290 def create_vdc_from_tmpl_rest(self, vdc_name=None):
5291 """
5292 Method create vdc in vCloud director based on VDC template.
5293 it uses pre-defined template.
5294
5295 Args:
5296 vdc_name - name of a new vdc.
5297
5298 Returns:
5299 The return xml content of respond or None
5300 """
5301 # pre-requesite atleast one vdc template should be available in vCD
5302 self.logger.info("Creating new vdc {}".format(vdc_name))
5303 vca = self.connect_as_admin()
5304
5305 if not vca:
5306 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5307
5308 if vdc_name is None:
5309 return None
5310
5311 url_list = [self.url, "/api/vdcTemplates"]
5312 vm_list_rest_call = "".join(url_list)
5313 headers = {
5314 "Accept": "application/*+xml;version=" + API_VERSION,
5315 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
5316 }
5317 response = self.perform_request(
5318 req_type="GET", url=vm_list_rest_call, headers=headers
5319 )
5320
5321 # container url to a template
5322 vdc_template_ref = None
5323 try:
5324 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5325 for child in vm_list_xmlroot:
5326 # application/vnd.vmware.admin.providervdc+xml
5327 # we need find a template from witch we instantiate VDC
5328 if child.tag.split("}")[1] == "VdcTemplate":
5329 if (
5330 child.attrib.get("type")
5331 == "application/vnd.vmware.admin.vdcTemplate+xml"
5332 ):
5333 vdc_template_ref = child.attrib.get("href")
5334 except Exception:
5335 self.logger.debug(
5336 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5337 )
5338 self.logger.debug("Respond body {}".format(response.text))
5339
5340 return None
5341
5342 # if we didn't found required pre defined template we return None
5343 if vdc_template_ref is None:
5344 return None
5345
5346 try:
5347 # instantiate vdc
5348 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
5349 vm_list_rest_call = "".join(url_list)
5350 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5351 <Source href="{1:s}"></Source>
5352 <Description>opnemano</Description>
5353 </InstantiateVdcTemplateParams>""".format(
5354 vdc_name, vdc_template_ref
5355 )
5356 headers[
5357 "Content-Type"
5358 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
5359 response = self.perform_request(
5360 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
5361 )
5362 vdc_task = self.get_task_from_response(response.text)
5363 self.client.get_task_monitor().wait_for_success(task=vdc_task)
5364
5365 # if we all ok we respond with content otherwise by default None
5366 if response.status_code >= 200 and response.status_code < 300:
5367 return response.text
5368
5369 return None
5370 except Exception:
5371 self.logger.debug(
5372 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5373 )
5374 self.logger.debug("Respond body {}".format(response.text))
5375
5376 return None
5377
5378 def create_vdc_rest(self, vdc_name=None):
5379 """
5380 Method create network in vCloud director
5381
5382 Args:
5383 vdc_name - vdc name to be created
5384 Returns:
5385 The return response
5386 """
5387 self.logger.info("Creating new vdc {}".format(vdc_name))
5388 vca = self.connect_as_admin()
5389
5390 if not vca:
5391 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5392
5393 if vdc_name is None:
5394 return None
5395
5396 url_list = [self.url, "/api/admin/org/", self.org_uuid]
5397 vm_list_rest_call = "".join(url_list)
5398
5399 if vca._session:
5400 headers = {
5401 "Accept": "application/*+xml;version=" + API_VERSION,
5402 "x-vcloud-authorization": self.client._session.headers[
5403 "x-vcloud-authorization"
5404 ],
5405 }
5406 response = self.perform_request(
5407 req_type="GET", url=vm_list_rest_call, headers=headers
5408 )
5409 provider_vdc_ref = None
5410 add_vdc_rest_url = None
5411 # available_networks = None
5412
5413 if response.status_code != requests.codes.ok:
5414 self.logger.debug(
5415 "REST API call {} failed. Return status code {}".format(
5416 vm_list_rest_call, response.status_code
5417 )
5418 )
5419
5420 return None
5421 else:
5422 try:
5423 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5424 for child in vm_list_xmlroot:
5425 # application/vnd.vmware.admin.providervdc+xml
5426 if child.tag.split("}")[1] == "Link":
5427 if (
5428 child.attrib.get("type")
5429 == "application/vnd.vmware.admin.createVdcParams+xml"
5430 and child.attrib.get("rel") == "add"
5431 ):
5432 add_vdc_rest_url = child.attrib.get("href")
5433 except Exception:
5434 self.logger.debug(
5435 "Failed parse respond for rest api call {}".format(
5436 vm_list_rest_call
5437 )
5438 )
5439 self.logger.debug("Respond body {}".format(response.text))
5440
5441 return None
5442
5443 response = self.get_provider_rest(vca=vca)
5444 try:
5445 vm_list_xmlroot = XmlElementTree.fromstring(response)
5446 for child in vm_list_xmlroot:
5447 if child.tag.split("}")[1] == "ProviderVdcReferences":
5448 for sub_child in child:
5449 provider_vdc_ref = sub_child.attrib.get("href")
5450 except Exception:
5451 self.logger.debug(
5452 "Failed parse respond for rest api call {}".format(
5453 vm_list_rest_call
5454 )
5455 )
5456 self.logger.debug("Respond body {}".format(response))
5457
5458 return None
5459
5460 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
5461 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
5462 <AllocationModel>ReservationPool</AllocationModel>
5463 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
5464 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
5465 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
5466 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
5467 <ProviderVdcReference
5468 name="Main Provider"
5469 href="{2:s}" />
5470 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
5471 escape(vdc_name), escape(vdc_name), provider_vdc_ref
5472 )
5473 headers[
5474 "Content-Type"
5475 ] = "application/vnd.vmware.admin.createVdcParams+xml"
5476 response = self.perform_request(
5477 req_type="POST",
5478 url=add_vdc_rest_url,
5479 headers=headers,
5480 data=data,
5481 )
5482
5483 # if we all ok we respond with content otherwise by default None
5484 if response.status_code == 201:
5485 return response.text
5486
5487 return None
5488
5489 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5490 """
5491 Method retrieve vapp detail from vCloud director
5492
5493 Args:
5494 vapp_uuid - is vapp identifier.
5495
5496 Returns:
5497 The return network uuid or return None
5498 """
5499 parsed_respond = {}
5500 vca = None
5501
5502 if need_admin_access:
5503 vca = self.connect_as_admin()
5504 else:
5505 vca = self.client
5506
5507 if not vca:
5508 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5509 if vapp_uuid is None:
5510 return None
5511
5512 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5513 get_vapp_restcall = "".join(url_list)
5514
5515 if vca._session:
5516 headers = {
5517 "Accept": "application/*+xml;version=" + API_VERSION,
5518 "x-vcloud-authorization": vca._session.headers[
5519 "x-vcloud-authorization"
5520 ],
5521 }
5522 response = self.perform_request(
5523 req_type="GET", url=get_vapp_restcall, headers=headers
5524 )
5525
5526 if response.status_code == 403:
5527 if need_admin_access is False:
5528 response = self.retry_rest("GET", get_vapp_restcall)
5529
5530 if response.status_code != requests.codes.ok:
5531 self.logger.debug(
5532 "REST API call {} failed. Return status code {}".format(
5533 get_vapp_restcall, response.status_code
5534 )
5535 )
5536
5537 return parsed_respond
5538
5539 try:
5540 xmlroot_respond = XmlElementTree.fromstring(response.text)
5541 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5542 "ovfDescriptorUploaded"
5543 ]
5544 namespaces = {
5545 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5546 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5547 "vmw": "http://www.vmware.com/schema/ovf",
5548 "vm": "http://www.vmware.com/vcloud/v1.5",
5549 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5550 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5551 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5552 }
5553
5554 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5555 if created_section is not None:
5556 parsed_respond["created"] = created_section.text
5557
5558 network_section = xmlroot_respond.find(
5559 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5560 )
5561 if (
5562 network_section is not None
5563 and "networkName" in network_section.attrib
5564 ):
5565 parsed_respond["networkname"] = network_section.attrib[
5566 "networkName"
5567 ]
5568
5569 ipscopes_section = xmlroot_respond.find(
5570 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5571 namespaces,
5572 )
5573 if ipscopes_section is not None:
5574 for ipscope in ipscopes_section:
5575 for scope in ipscope:
5576 tag_key = scope.tag.split("}")[1]
5577 if tag_key == "IpRanges":
5578 ip_ranges = scope.getchildren()
5579 for ipblock in ip_ranges:
5580 for block in ipblock:
5581 parsed_respond[
5582 block.tag.split("}")[1]
5583 ] = block.text
5584 else:
5585 parsed_respond[tag_key] = scope.text
5586
5587 # parse children section for other attrib
5588 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5589 if children_section is not None:
5590 parsed_respond["name"] = children_section.attrib["name"]
5591 parsed_respond["nestedHypervisorEnabled"] = (
5592 children_section.attrib["nestedHypervisorEnabled"]
5593 if "nestedHypervisorEnabled" in children_section.attrib
5594 else None
5595 )
5596 parsed_respond["deployed"] = children_section.attrib["deployed"]
5597 parsed_respond["status"] = children_section.attrib["status"]
5598 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5599 -1
5600 ]
5601 network_adapter = children_section.find(
5602 "vm:NetworkConnectionSection", namespaces
5603 )
5604 nic_list = []
5605 for adapters in network_adapter:
5606 adapter_key = adapters.tag.split("}")[1]
5607 if adapter_key == "PrimaryNetworkConnectionIndex":
5608 parsed_respond["primarynetwork"] = adapters.text
5609
5610 if adapter_key == "NetworkConnection":
5611 vnic = {}
5612 if "network" in adapters.attrib:
5613 vnic["network"] = adapters.attrib["network"]
5614 for adapter in adapters:
5615 setting_key = adapter.tag.split("}")[1]
5616 vnic[setting_key] = adapter.text
5617 nic_list.append(vnic)
5618
5619 for link in children_section:
5620 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5621 if link.attrib["rel"] == "screen:acquireTicket":
5622 parsed_respond["acquireTicket"] = link.attrib
5623
5624 if link.attrib["rel"] == "screen:acquireMksTicket":
5625 parsed_respond["acquireMksTicket"] = link.attrib
5626
5627 parsed_respond["interfaces"] = nic_list
5628 vCloud_extension_section = children_section.find(
5629 "xmlns:VCloudExtension", namespaces
5630 )
5631 if vCloud_extension_section is not None:
5632 vm_vcenter_info = {}
5633 vim_info = vCloud_extension_section.find(
5634 "vmext:VmVimInfo", namespaces
5635 )
5636 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5637
5638 if vmext is not None:
5639 vm_vcenter_info["vm_moref_id"] = vmext.find(
5640 "vmext:MoRef", namespaces
5641 ).text
5642
5643 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5644
5645 virtual_hardware_section = children_section.find(
5646 "ovf:VirtualHardwareSection", namespaces
5647 )
5648 vm_virtual_hardware_info = {}
5649 if virtual_hardware_section is not None:
5650 for item in virtual_hardware_section.iterfind(
5651 "ovf:Item", namespaces
5652 ):
5653 if (
5654 item.find("rasd:Description", namespaces).text
5655 == "Hard disk"
5656 ):
5657 disk_size = item.find(
5658 "rasd:HostResource", namespaces
5659 ).attrib["{" + namespaces["vm"] + "}capacity"]
5660 vm_virtual_hardware_info["disk_size"] = disk_size
5661 break
5662
5663 for link in virtual_hardware_section:
5664 if (
5665 link.tag.split("}")[1] == "Link"
5666 and "rel" in link.attrib
5667 ):
5668 if link.attrib["rel"] == "edit" and link.attrib[
5669 "href"
5670 ].endswith("/disks"):
5671 vm_virtual_hardware_info[
5672 "disk_edit_href"
5673 ] = link.attrib["href"]
5674 break
5675
5676 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5677 except Exception as exp:
5678 self.logger.info(
5679 "Error occurred calling rest api for getting vApp details {}".format(
5680 exp
5681 )
5682 )
5683
5684 return parsed_respond
5685
5686 def acquire_console(self, vm_uuid=None):
5687 if vm_uuid is None:
5688 return None
5689
5690 if self.client._session:
5691 headers = {
5692 "Accept": "application/*+xml;version=" + API_VERSION,
5693 "x-vcloud-authorization": self.client._session.headers[
5694 "x-vcloud-authorization"
5695 ],
5696 }
5697 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
5698 console_dict = vm_dict["acquireTicket"]
5699 console_rest_call = console_dict["href"]
5700
5701 response = self.perform_request(
5702 req_type="POST", url=console_rest_call, headers=headers
5703 )
5704
5705 if response.status_code == 403:
5706 response = self.retry_rest("POST", console_rest_call)
5707
5708 if response.status_code == requests.codes.ok:
5709 return response.text
5710
5711 return None
5712
5713 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5714 """
5715 Method retrieve vm disk details
5716
5717 Args:
5718 vapp_uuid - is vapp identifier.
5719 flavor_disk - disk size as specified in VNFD (flavor)
5720
5721 Returns:
5722 The return network uuid or return None
5723 """
5724 status = None
5725 try:
5726 # Flavor disk is in GB convert it into MB
5727 flavor_disk = int(flavor_disk) * 1024
5728 vm_details = self.get_vapp_details_rest(vapp_uuid)
5729
5730 if vm_details:
5731 vm_name = vm_details["name"]
5732 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5733
5734 if vm_details and "vm_virtual_hardware" in vm_details:
5735 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5736 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5737 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5738
5739 if flavor_disk > vm_disk:
5740 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5741 self.logger.info(
5742 "Modify disk of VM {} from {} to {} MB".format(
5743 vm_name, vm_disk, flavor_disk
5744 )
5745 )
5746 else:
5747 status = True
5748 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5749
5750 return status
5751 except Exception as exp:
5752 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5753
5754 def modify_vm_disk_rest(self, disk_href, disk_size):
5755 """
5756 Method retrieve modify vm disk size
5757
5758 Args:
5759 disk_href - vCD API URL to GET and PUT disk data
5760 disk_size - disk size as specified in VNFD (flavor)
5761
5762 Returns:
5763 The return network uuid or return None
5764 """
5765 if disk_href is None or disk_size is None:
5766 return None
5767
5768 if self.client._session:
5769 headers = {
5770 "Accept": "application/*+xml;version=" + API_VERSION,
5771 "x-vcloud-authorization": self.client._session.headers[
5772 "x-vcloud-authorization"
5773 ],
5774 }
5775 response = self.perform_request(
5776 req_type="GET", url=disk_href, headers=headers
5777 )
5778
5779 if response.status_code == 403:
5780 response = self.retry_rest("GET", disk_href)
5781
5782 if response.status_code != requests.codes.ok:
5783 self.logger.debug(
5784 "GET REST API call {} failed. Return status code {}".format(
5785 disk_href, response.status_code
5786 )
5787 )
5788
5789 return None
5790
5791 try:
5792 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5793 namespaces = {
5794 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5795 }
5796 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5797
5798 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5799 if item.find("rasd:Description", namespaces).text == "Hard disk":
5800 disk_item = item.find("rasd:HostResource", namespaces)
5801 if disk_item is not None:
5802 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5803 disk_size
5804 )
5805 break
5806
5807 data = lxmlElementTree.tostring(
5808 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5809 )
5810
5811 # Send PUT request to modify disk size
5812 headers[
5813 "Content-Type"
5814 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5815
5816 response = self.perform_request(
5817 req_type="PUT", url=disk_href, headers=headers, data=data
5818 )
5819 if response.status_code == 403:
5820 add_headers = {"Content-Type": headers["Content-Type"]}
5821 response = self.retry_rest("PUT", disk_href, add_headers, data)
5822
5823 if response.status_code != 202:
5824 self.logger.debug(
5825 "PUT REST API call {} failed. Return status code {}".format(
5826 disk_href, response.status_code
5827 )
5828 )
5829 else:
5830 modify_disk_task = self.get_task_from_response(response.text)
5831 result = self.client.get_task_monitor().wait_for_success(
5832 task=modify_disk_task
5833 )
5834 if result.get("status") == "success":
5835 return True
5836 else:
5837 return False
5838
5839 return None
5840 except Exception as exp:
5841 self.logger.info(
5842 "Error occurred calling rest api for modifing disk size {}".format(exp)
5843 )
5844
5845 return None
5846
5847 def add_serial_device(self, vapp_uuid):
5848 """
5849 Method to attach a serial device to a VM
5850
5851 Args:
5852 vapp_uuid - uuid of vApp/VM
5853
5854 Returns:
5855 """
5856 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5857 _, content = self.get_vcenter_content()
5858 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5859
5860 if vm_moref_id:
5861 try:
5862 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5863 self.logger.info(
5864 "VM {} is currently on host {}".format(vm_obj, host_obj)
5865 )
5866 if host_obj and vm_obj:
5867 spec = vim.vm.ConfigSpec()
5868 spec.deviceChange = []
5869 serial_spec = vim.vm.device.VirtualDeviceSpec()
5870 serial_spec.operation = "add"
5871 serial_port = vim.vm.device.VirtualSerialPort()
5872 serial_port.yieldOnPoll = True
5873 backing = serial_port.URIBackingInfo()
5874 backing.serviceURI = "tcp://:65500"
5875 backing.direction = "server"
5876 serial_port.backing = backing
5877 serial_spec.device = serial_port
5878 spec.deviceChange.append(serial_spec)
5879 vm_obj.ReconfigVM_Task(spec=spec)
5880 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5881 except vmodl.MethodFault as error:
5882 self.logger.error("Error occurred while adding PCI devices {} ", error)
5883
5884 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5885 """
5886 Method to attach pci devices to VM
5887
5888 Args:
5889 vapp_uuid - uuid of vApp/VM
5890 pci_devices - pci devices infromation as specified in VNFD (flavor)
5891
5892 Returns:
5893 The status of add pci device task , vm object and
5894 vcenter_conect object
5895 """
5896 vm_obj = None
5897 self.logger.info(
5898 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5899 )
5900 vcenter_conect, content = self.get_vcenter_content()
5901 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5902
5903 if vm_moref_id:
5904 try:
5905 no_of_pci_devices = len(pci_devices)
5906 if no_of_pci_devices > 0:
5907 # Get VM and its host
5908 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5909 self.logger.info(
5910 "VM {} is currently on host {}".format(vm_obj, host_obj)
5911 )
5912
5913 if host_obj and vm_obj:
5914 # get PCI devies from host on which vapp is currently installed
5915 avilable_pci_devices = self.get_pci_devices(
5916 host_obj, no_of_pci_devices
5917 )
5918
5919 if avilable_pci_devices is None:
5920 # find other hosts with active pci devices
5921 (
5922 new_host_obj,
5923 avilable_pci_devices,
5924 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5925
5926 if (
5927 new_host_obj is not None
5928 and avilable_pci_devices is not None
5929 and len(avilable_pci_devices) > 0
5930 ):
5931 # Migrate vm to the host where PCI devices are availble
5932 self.logger.info(
5933 "Relocate VM {} on new host {}".format(
5934 vm_obj, new_host_obj
5935 )
5936 )
5937
5938 task = self.relocate_vm(new_host_obj, vm_obj)
5939 if task is not None:
5940 result = self.wait_for_vcenter_task(
5941 task, vcenter_conect
5942 )
5943 self.logger.info(
5944 "Migrate VM status: {}".format(result)
5945 )
5946 host_obj = new_host_obj
5947 else:
5948 self.logger.info(
5949 "Fail to migrate VM : {}".format(result)
5950 )
5951 raise vimconn.VimConnNotFoundException(
5952 "Fail to migrate VM : {} to host {}".format(
5953 vmname_andid, new_host_obj
5954 )
5955 )
5956
5957 if (
5958 host_obj is not None
5959 and avilable_pci_devices is not None
5960 and len(avilable_pci_devices) > 0
5961 ):
5962 # Add PCI devices one by one
5963 for pci_device in avilable_pci_devices:
5964 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5965 if task:
5966 status = self.wait_for_vcenter_task(
5967 task, vcenter_conect
5968 )
5969
5970 if status:
5971 self.logger.info(
5972 "Added PCI device {} to VM {}".format(
5973 pci_device, str(vm_obj)
5974 )
5975 )
5976 else:
5977 self.logger.error(
5978 "Fail to add PCI device {} to VM {}".format(
5979 pci_device, str(vm_obj)
5980 )
5981 )
5982
5983 return True, vm_obj, vcenter_conect
5984 else:
5985 self.logger.error(
5986 "Currently there is no host with"
5987 " {} number of avaialble PCI devices required for VM {}".format(
5988 no_of_pci_devices, vmname_andid
5989 )
5990 )
5991
5992 raise vimconn.VimConnNotFoundException(
5993 "Currently there is no host with {} "
5994 "number of avaialble PCI devices required for VM {}".format(
5995 no_of_pci_devices, vmname_andid
5996 )
5997 )
5998 else:
5999 self.logger.debug(
6000 "No infromation about PCI devices {} ", pci_devices
6001 )
6002 except vmodl.MethodFault as error:
6003 self.logger.error("Error occurred while adding PCI devices {} ", error)
6004
6005 return None, vm_obj, vcenter_conect
6006
6007 def get_vm_obj(self, content, mob_id):
6008 """
6009 Method to get the vsphere VM object associated with a given morf ID
6010 Args:
6011 vapp_uuid - uuid of vApp/VM
6012 content - vCenter content object
6013 mob_id - mob_id of VM
6014
6015 Returns:
6016 VM and host object
6017 """
6018 vm_obj = None
6019 host_obj = None
6020
6021 try:
6022 container = content.viewManager.CreateContainerView(
6023 content.rootFolder, [vim.VirtualMachine], True
6024 )
6025 for vm in container.view:
6026 mobID = vm._GetMoId()
6027
6028 if mobID == mob_id:
6029 vm_obj = vm
6030 host_obj = vm_obj.runtime.host
6031 break
6032 except Exception as exp:
6033 self.logger.error("Error occurred while finding VM object : {}".format(exp))
6034
6035 return host_obj, vm_obj
6036
6037 def get_pci_devices(self, host, need_devices):
6038 """
6039 Method to get the details of pci devices on given host
6040 Args:
6041 host - vSphere host object
6042 need_devices - number of pci devices needed on host
6043
6044 Returns:
6045 array of pci devices
6046 """
6047 all_devices = []
6048 all_device_ids = []
6049 used_devices_ids = []
6050
6051 try:
6052 if host:
6053 pciPassthruInfo = host.config.pciPassthruInfo
6054 pciDevies = host.hardware.pciDevice
6055
6056 for pci_status in pciPassthruInfo:
6057 if pci_status.passthruActive:
6058 for device in pciDevies:
6059 if device.id == pci_status.id:
6060 all_device_ids.append(device.id)
6061 all_devices.append(device)
6062
6063 # check if devices are in use
6064 avalible_devices = all_devices
6065 for vm in host.vm:
6066 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
6067 vm_devices = vm.config.hardware.device
6068 for device in vm_devices:
6069 if type(device) is vim.vm.device.VirtualPCIPassthrough:
6070 if device.backing.id in all_device_ids:
6071 for use_device in avalible_devices:
6072 if use_device.id == device.backing.id:
6073 avalible_devices.remove(use_device)
6074
6075 used_devices_ids.append(device.backing.id)
6076 self.logger.debug(
6077 "Device {} from devices {}"
6078 "is in use".format(device.backing.id, device)
6079 )
6080 if len(avalible_devices) < need_devices:
6081 self.logger.debug(
6082 "Host {} don't have {} number of active devices".format(
6083 host, need_devices
6084 )
6085 )
6086 self.logger.debug(
6087 "found only {} devices {}".format(
6088 len(avalible_devices), avalible_devices
6089 )
6090 )
6091
6092 return None
6093 else:
6094 required_devices = avalible_devices[:need_devices]
6095 self.logger.info(
6096 "Found {} PCI devices on host {} but required only {}".format(
6097 len(avalible_devices), host, need_devices
6098 )
6099 )
6100 self.logger.info(
6101 "Retruning {} devices as {}".format(need_devices, required_devices)
6102 )
6103
6104 return required_devices
6105 except Exception as exp:
6106 self.logger.error(
6107 "Error {} occurred while finding pci devices on host: {}".format(
6108 exp, host
6109 )
6110 )
6111
6112 return None
6113
6114 def get_host_and_PCIdevices(self, content, need_devices):
6115 """
6116 Method to get the details of pci devices infromation on all hosts
6117
6118 Args:
6119 content - vSphere host object
6120 need_devices - number of pci devices needed on host
6121
6122 Returns:
6123 array of pci devices and host object
6124 """
6125 host_obj = None
6126 pci_device_objs = None
6127
6128 try:
6129 if content:
6130 container = content.viewManager.CreateContainerView(
6131 content.rootFolder, [vim.HostSystem], True
6132 )
6133 for host in container.view:
6134 devices = self.get_pci_devices(host, need_devices)
6135
6136 if devices:
6137 host_obj = host
6138 pci_device_objs = devices
6139 break
6140 except Exception as exp:
6141 self.logger.error(
6142 "Error {} occurred while finding pci devices on host: {}".format(
6143 exp, host_obj
6144 )
6145 )
6146
6147 return host_obj, pci_device_objs
6148
6149 def relocate_vm(self, dest_host, vm):
6150 """
6151 Method to get the relocate VM to new host
6152
6153 Args:
6154 dest_host - vSphere host object
6155 vm - vSphere VM object
6156
6157 Returns:
6158 task object
6159 """
6160 task = None
6161
6162 try:
6163 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
6164 task = vm.Relocate(relocate_spec)
6165 self.logger.info(
6166 "Migrating {} to destination host {}".format(vm, dest_host)
6167 )
6168 except Exception as exp:
6169 self.logger.error(
6170 "Error occurred while relocate VM {} to new host {}: {}".format(
6171 dest_host, vm, exp
6172 )
6173 )
6174
6175 return task
6176
6177 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
6178 """
6179 Waits and provides updates on a vSphere task
6180 """
6181 while task.info.state == vim.TaskInfo.State.running:
6182 time.sleep(2)
6183
6184 if task.info.state == vim.TaskInfo.State.success:
6185 if task.info.result is not None and not hideResult:
6186 self.logger.info(
6187 "{} completed successfully, result: {}".format(
6188 actionName, task.info.result
6189 )
6190 )
6191 else:
6192 self.logger.info("Task {} completed successfully.".format(actionName))
6193 else:
6194 self.logger.error(
6195 "{} did not complete successfully: {} ".format(
6196 actionName, task.info.error
6197 )
6198 )
6199
6200 return task.info.result
6201
6202 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
6203 """
6204 Method to add pci device in given VM
6205
6206 Args:
6207 host_object - vSphere host object
6208 vm_object - vSphere VM object
6209 host_pci_dev - host_pci_dev must be one of the devices from the
6210 host_object.hardware.pciDevice list
6211 which is configured as a PCI passthrough device
6212
6213 Returns:
6214 task object
6215 """
6216 task = None
6217
6218 if vm_object and host_object and host_pci_dev:
6219 try:
6220 # Add PCI device to VM
6221 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
6222 host=None
6223 ).pciPassthrough
6224 systemid_by_pciid = {
6225 item.pciDevice.id: item.systemId for item in pci_passthroughs
6226 }
6227
6228 if host_pci_dev.id not in systemid_by_pciid:
6229 self.logger.error(
6230 "Device {} is not a passthrough device ".format(host_pci_dev)
6231 )
6232 return None
6233
6234 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
6235 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
6236 deviceId=deviceId,
6237 id=host_pci_dev.id,
6238 systemId=systemid_by_pciid[host_pci_dev.id],
6239 vendorId=host_pci_dev.vendorId,
6240 deviceName=host_pci_dev.deviceName,
6241 )
6242
6243 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
6244 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
6245 new_device_config.operation = "add"
6246 vmConfigSpec = vim.vm.ConfigSpec()
6247 vmConfigSpec.deviceChange = [new_device_config]
6248 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
6249 self.logger.info(
6250 "Adding PCI device {} into VM {} from host {} ".format(
6251 host_pci_dev, vm_object, host_object
6252 )
6253 )
6254 except Exception as exp:
6255 self.logger.error(
6256 "Error occurred while adding pci devive {} to VM {}: {}".format(
6257 host_pci_dev, vm_object, exp
6258 )
6259 )
6260
6261 return task
6262
6263 def get_vm_vcenter_info(self):
6264 """
6265 Method to get details of vCenter and vm
6266
6267 Args:
6268 vapp_uuid - uuid of vApp or VM
6269
6270 Returns:
6271 Moref Id of VM and deails of vCenter
6272 """
6273 vm_vcenter_info = {}
6274
6275 if self.vcenter_ip is not None:
6276 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
6277 else:
6278 raise vimconn.VimConnException(
6279 message="vCenter IP is not provided."
6280 " Please provide vCenter IP while attaching datacenter "
6281 "to tenant in --config"
6282 )
6283
6284 if self.vcenter_port is not None:
6285 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
6286 else:
6287 raise vimconn.VimConnException(
6288 message="vCenter port is not provided."
6289 " Please provide vCenter port while attaching datacenter "
6290 "to tenant in --config"
6291 )
6292
6293 if self.vcenter_user is not None:
6294 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
6295 else:
6296 raise vimconn.VimConnException(
6297 message="vCenter user is not provided."
6298 " Please provide vCenter user while attaching datacenter "
6299 "to tenant in --config"
6300 )
6301
6302 if self.vcenter_password is not None:
6303 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
6304 else:
6305 raise vimconn.VimConnException(
6306 message="vCenter user password is not provided."
6307 " Please provide vCenter user password while attaching datacenter "
6308 "to tenant in --config"
6309 )
6310
6311 return vm_vcenter_info
6312
6313 def get_vm_pci_details(self, vmuuid):
6314 """
6315 Method to get VM PCI device details from vCenter
6316
6317 Args:
6318 vm_obj - vSphere VM object
6319
6320 Returns:
6321 dict of PCI devives attached to VM
6322
6323 """
6324 vm_pci_devices_info = {}
6325
6326 try:
6327 _, content = self.get_vcenter_content()
6328 vm_moref_id = self.get_vm_moref_id(vmuuid)
6329 if vm_moref_id:
6330 # Get VM and its host
6331 if content:
6332 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6333 if host_obj and vm_obj:
6334 vm_pci_devices_info["host_name"] = host_obj.name
6335 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
6336 0
6337 ].spec.ip.ipAddress
6338
6339 for device in vm_obj.config.hardware.device:
6340 if type(device) == vim.vm.device.VirtualPCIPassthrough:
6341 device_details = {
6342 "devide_id": device.backing.id,
6343 "pciSlotNumber": device.slotInfo.pciSlotNumber,
6344 }
6345 vm_pci_devices_info[
6346 device.deviceInfo.label
6347 ] = device_details
6348 else:
6349 self.logger.error(
6350 "Can not connect to vCenter while getting "
6351 "PCI devices infromationn"
6352 )
6353
6354 return vm_pci_devices_info
6355 except Exception as exp:
6356 self.logger.error(
6357 "Error occurred while getting VM information" " for VM : {}".format(exp)
6358 )
6359
6360 raise vimconn.VimConnException(message=exp)
6361
6362 def reserve_memory_for_all_vms(self, vapp, memory_mb):
6363 """
6364 Method to reserve memory for all VMs
6365 Args :
6366 vapp - VApp
6367 memory_mb - Memory in MB
6368 Returns:
6369 None
6370 """
6371 self.logger.info("Reserve memory for all VMs")
6372
6373 for vms in vapp.get_all_vms():
6374 vm_id = vms.get("id").split(":")[-1]
6375 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
6376 self.url, vm_id
6377 )
6378 headers = {
6379 "Accept": "application/*+xml;version=" + API_VERSION,
6380 "x-vcloud-authorization": self.client._session.headers[
6381 "x-vcloud-authorization"
6382 ],
6383 }
6384 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
6385 response = self.perform_request(
6386 req_type="GET", url=url_rest_call, headers=headers
6387 )
6388
6389 if response.status_code == 403:
6390 response = self.retry_rest("GET", url_rest_call)
6391
6392 if response.status_code != 200:
6393 self.logger.error(
6394 "REST call {} failed reason : {}"
6395 "status code : {}".format(
6396 url_rest_call, response.text, response.status_code
6397 )
6398 )
6399 raise vimconn.VimConnException(
6400 "reserve_memory_for_all_vms : Failed to get " "memory"
6401 )
6402
6403 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
6404 contentelem = lxmlElementTree.XML(bytexml)
6405 namespaces = {
6406 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
6407 }
6408 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6409
6410 # Find the reservation element in the response
6411 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
6412 for memelem in memelem_list:
6413 memelem.text = str(memory_mb)
6414
6415 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
6416
6417 response = self.perform_request(
6418 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6419 )
6420
6421 if response.status_code == 403:
6422 add_headers = {"Content-Type": headers["Content-Type"]}
6423 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6424
6425 if response.status_code != 202:
6426 self.logger.error(
6427 "REST call {} failed reason : {}"
6428 "status code : {} ".format(
6429 url_rest_call, response.text, response.status_code
6430 )
6431 )
6432 raise vimconn.VimConnException(
6433 "reserve_memory_for_all_vms : Failed to update "
6434 "virtual hardware memory section"
6435 )
6436 else:
6437 mem_task = self.get_task_from_response(response.text)
6438 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
6439
6440 if result.get("status") == "success":
6441 self.logger.info(
6442 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
6443 )
6444 else:
6445 self.logger.error(
6446 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
6447 )
6448
6449 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
6450 """
6451 Configure VApp network config with org vdc network
6452 Args :
6453 vapp - VApp
6454 Returns:
6455 None
6456 """
6457
6458 self.logger.info(
6459 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
6460 )
6461
6462 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
6463 self.url, vapp_id
6464 )
6465
6466 headers = {
6467 "Accept": "application/*+xml;version=" + API_VERSION,
6468 "x-vcloud-authorization": self.client._session.headers[
6469 "x-vcloud-authorization"
6470 ],
6471 }
6472 response = self.perform_request(
6473 req_type="GET", url=url_rest_call, headers=headers
6474 )
6475
6476 if response.status_code == 403:
6477 response = self.retry_rest("GET", url_rest_call)
6478
6479 if response.status_code != 200:
6480 self.logger.error(
6481 "REST call {} failed reason : {}"
6482 "status code : {}".format(
6483 url_rest_call, response.text, response.status_code
6484 )
6485 )
6486 raise vimconn.VimConnException(
6487 "connect_vapp_to_org_vdc_network : Failed to get "
6488 "network config section"
6489 )
6490
6491 data = response.text
6492 headers[
6493 "Content-Type"
6494 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
6495 net_id = self.get_network_id_by_name(net_name)
6496 if not net_id:
6497 raise vimconn.VimConnException(
6498 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
6499 )
6500
6501 bytexml = bytes(bytearray(data, encoding="utf-8"))
6502 newelem = lxmlElementTree.XML(bytexml)
6503 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6504 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6505 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6506
6507 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6508 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6509 if parentnetworklist:
6510 for pn in parentnetworklist:
6511 if "href" not in pn.keys():
6512 id_val = pn.get("id")
6513 href_val = "{}/api/network/{}".format(self.url, id_val)
6514 pn.set("href", href_val)
6515
6516 newstr = """<NetworkConfig networkName="{}">
6517 <Configuration>
6518 <ParentNetwork href="{}/api/network/{}"/>
6519 <FenceMode>bridged</FenceMode>
6520 </Configuration>
6521 </NetworkConfig>
6522 """.format(
6523 net_name, self.url, net_id
6524 )
6525 newcfgelem = lxmlElementTree.fromstring(newstr)
6526 if nwcfglist:
6527 nwcfglist[0].addnext(newcfgelem)
6528
6529 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6530
6531 response = self.perform_request(
6532 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6533 )
6534
6535 if response.status_code == 403:
6536 add_headers = {"Content-Type": headers["Content-Type"]}
6537 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6538
6539 if response.status_code != 202:
6540 self.logger.error(
6541 "REST call {} failed reason : {}"
6542 "status code : {} ".format(
6543 url_rest_call, response.text, response.status_code
6544 )
6545 )
6546 raise vimconn.VimConnException(
6547 "connect_vapp_to_org_vdc_network : Failed to update "
6548 "network config section"
6549 )
6550 else:
6551 vapp_task = self.get_task_from_response(response.text)
6552 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6553 if result.get("status") == "success":
6554 self.logger.info(
6555 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6556 "network {}".format(vapp_id, net_name)
6557 )
6558 else:
6559 self.logger.error(
6560 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6561 "connect to network {}".format(vapp_id, net_name)
6562 )
6563
6564 def remove_primary_network_adapter_from_all_vms(self, vapp):
6565 """
6566 Method to remove network adapter type to vm
6567 Args :
6568 vapp - VApp
6569 Returns:
6570 None
6571 """
6572 self.logger.info("Removing network adapter from all VMs")
6573
6574 for vms in vapp.get_all_vms():
6575 vm_id = vms.get("id").split(":")[-1]
6576
6577 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6578 self.url, vm_id
6579 )
6580
6581 headers = {
6582 "Accept": "application/*+xml;version=" + API_VERSION,
6583 "x-vcloud-authorization": self.client._session.headers[
6584 "x-vcloud-authorization"
6585 ],
6586 }
6587 response = self.perform_request(
6588 req_type="GET", url=url_rest_call, headers=headers
6589 )
6590
6591 if response.status_code == 403:
6592 response = self.retry_rest("GET", url_rest_call)
6593
6594 if response.status_code != 200:
6595 self.logger.error(
6596 "REST call {} failed reason : {}"
6597 "status code : {}".format(
6598 url_rest_call, response.text, response.status_code
6599 )
6600 )
6601 raise vimconn.VimConnException(
6602 "remove_primary_network_adapter : Failed to get "
6603 "network connection section"
6604 )
6605
6606 data = response.text
6607 data = data.split('<Link rel="edit"')[0]
6608
6609 headers[
6610 "Content-Type"
6611 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6612
6613 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6614 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6615 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6616 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6617 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6618 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6619 xmlns:vmw="http://www.vmware.com/schema/ovf"
6620 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6621 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6622 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6623 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6624 ovf:required="false">
6625 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6626 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6627 <Link rel="edit" href="{url}"
6628 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6629 </NetworkConnectionSection>""".format(
6630 url=url_rest_call
6631 )
6632 response = self.perform_request(
6633 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6634 )
6635
6636 if response.status_code == 403:
6637 add_headers = {"Content-Type": headers["Content-Type"]}
6638 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6639
6640 if response.status_code != 202:
6641 self.logger.error(
6642 "REST call {} failed reason : {}"
6643 "status code : {} ".format(
6644 url_rest_call, response.text, response.status_code
6645 )
6646 )
6647 raise vimconn.VimConnException(
6648 "remove_primary_network_adapter : Failed to update "
6649 "network connection section"
6650 )
6651 else:
6652 nic_task = self.get_task_from_response(response.text)
6653 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6654 if result.get("status") == "success":
6655 self.logger.info(
6656 "remove_primary_network_adapter(): VM {} conneced to "
6657 "default NIC type".format(vm_id)
6658 )
6659 else:
6660 self.logger.error(
6661 "remove_primary_network_adapter(): VM {} failed to "
6662 "connect NIC type".format(vm_id)
6663 )
6664
6665 def add_network_adapter_to_vms(
6666 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6667 ):
6668 """
6669 Method to add network adapter type to vm
6670 Args :
6671 network_name - name of network
6672 primary_nic_index - int value for primary nic index
6673 nicIndex - int value for nic index
6674 nic_type - specify model name to which add to vm
6675 Returns:
6676 None
6677 """
6678
6679 self.logger.info(
6680 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6681 network_name, nicIndex, nic_type
6682 )
6683 )
6684 try:
6685 ip_address = None
6686 floating_ip = False
6687 mac_address = None
6688 if "floating_ip" in net:
6689 floating_ip = net["floating_ip"]
6690
6691 # Stub for ip_address feature
6692 if "ip_address" in net:
6693 ip_address = net["ip_address"]
6694
6695 if "mac_address" in net:
6696 mac_address = net["mac_address"]
6697
6698 if floating_ip:
6699 allocation_mode = "POOL"
6700 elif ip_address:
6701 allocation_mode = "MANUAL"
6702 else:
6703 allocation_mode = "DHCP"
6704
6705 if not nic_type:
6706 for vms in vapp.get_all_vms():
6707 vm_id = vms.get("id").split(":")[-1]
6708
6709 url_rest_call = (
6710 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6711 self.url, vm_id
6712 )
6713 )
6714
6715 headers = {
6716 "Accept": "application/*+xml;version=" + API_VERSION,
6717 "x-vcloud-authorization": self.client._session.headers[
6718 "x-vcloud-authorization"
6719 ],
6720 }
6721 response = self.perform_request(
6722 req_type="GET", url=url_rest_call, headers=headers
6723 )
6724
6725 if response.status_code == 403:
6726 response = self.retry_rest("GET", url_rest_call)
6727
6728 if response.status_code != 200:
6729 self.logger.error(
6730 "REST call {} failed reason : {}"
6731 "status code : {}".format(
6732 url_rest_call, response.text, response.status_code
6733 )
6734 )
6735 raise vimconn.VimConnException(
6736 "add_network_adapter_to_vms : Failed to get "
6737 "network connection section"
6738 )
6739
6740 data = response.text
6741 data = data.split('<Link rel="edit"')[0]
6742 if "<PrimaryNetworkConnectionIndex>" not in data:
6743 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6744 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6745 <NetworkConnection network="{}">
6746 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6747 <IsConnected>true</IsConnected>
6748 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6749 </NetworkConnection>""".format(
6750 primary_nic_index, network_name, nicIndex, allocation_mode
6751 )
6752
6753 # Stub for ip_address feature
6754 if ip_address:
6755 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6756 item = item.replace(
6757 "</NetworkConnectionIndex>\n",
6758 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6759 )
6760
6761 if mac_address:
6762 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6763 item = item.replace(
6764 "</IsConnected>\n",
6765 "</IsConnected>\n{}\n".format(mac_tag),
6766 )
6767
6768 data = data.replace(
6769 "</ovf:Info>\n",
6770 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6771 )
6772 else:
6773 self.logger.debug("add_network_adapter PrimaryNIC in data")
6774 new_item = """<NetworkConnection network="{}">
6775 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6776 <IsConnected>true</IsConnected>
6777 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6778 </NetworkConnection>""".format(
6779 network_name, nicIndex, allocation_mode
6780 )
6781
6782 # Stub for ip_address feature
6783 if ip_address:
6784 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6785 new_item = new_item.replace(
6786 "</NetworkConnectionIndex>\n",
6787 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6788 )
6789
6790 if mac_address:
6791 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6792 new_item = new_item.replace(
6793 "</IsConnected>\n",
6794 "</IsConnected>\n{}\n".format(mac_tag),
6795 )
6796
6797 data = data + new_item + "</NetworkConnectionSection>"
6798
6799 headers[
6800 "Content-Type"
6801 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6802
6803 response = self.perform_request(
6804 req_type="PUT", url=url_rest_call, headers=headers, data=data
6805 )
6806
6807 if response.status_code == 403:
6808 add_headers = {"Content-Type": headers["Content-Type"]}
6809 response = self.retry_rest(
6810 "PUT", url_rest_call, add_headers, data
6811 )
6812
6813 if response.status_code != 202:
6814 self.logger.error(
6815 "REST call {} failed reason : {}"
6816 "status code : {} ".format(
6817 url_rest_call, response.text, response.status_code
6818 )
6819 )
6820 raise vimconn.VimConnException(
6821 "add_network_adapter_to_vms : Failed to update "
6822 "network connection section"
6823 )
6824 else:
6825 nic_task = self.get_task_from_response(response.text)
6826 result = self.client.get_task_monitor().wait_for_success(
6827 task=nic_task
6828 )
6829
6830 if result.get("status") == "success":
6831 self.logger.info(
6832 "add_network_adapter_to_vms(): VM {} conneced to "
6833 "default NIC type".format(vm_id)
6834 )
6835 else:
6836 self.logger.error(
6837 "add_network_adapter_to_vms(): VM {} failed to "
6838 "connect NIC type".format(vm_id)
6839 )
6840 else:
6841 for vms in vapp.get_all_vms():
6842 vm_id = vms.get("id").split(":")[-1]
6843
6844 url_rest_call = (
6845 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6846 self.url, vm_id
6847 )
6848 )
6849
6850 headers = {
6851 "Accept": "application/*+xml;version=" + API_VERSION,
6852 "x-vcloud-authorization": self.client._session.headers[
6853 "x-vcloud-authorization"
6854 ],
6855 }
6856 response = self.perform_request(
6857 req_type="GET", url=url_rest_call, headers=headers
6858 )
6859
6860 if response.status_code == 403:
6861 response = self.retry_rest("GET", url_rest_call)
6862
6863 if response.status_code != 200:
6864 self.logger.error(
6865 "REST call {} failed reason : {}"
6866 "status code : {}".format(
6867 url_rest_call, response.text, response.status_code
6868 )
6869 )
6870 raise vimconn.VimConnException(
6871 "add_network_adapter_to_vms : Failed to get "
6872 "network connection section"
6873 )
6874 data = response.text
6875 data = data.split('<Link rel="edit"')[0]
6876 vcd_netadapter_type = nic_type
6877
6878 if nic_type in ["SR-IOV", "VF"]:
6879 vcd_netadapter_type = "SRIOVETHERNETCARD"
6880
6881 if "<PrimaryNetworkConnectionIndex>" not in data:
6882 self.logger.debug(
6883 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6884 nic_type
6885 )
6886 )
6887 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6888 <NetworkConnection network="{}">
6889 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6890 <IsConnected>true</IsConnected>
6891 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6892 <NetworkAdapterType>{}</NetworkAdapterType>
6893 </NetworkConnection>""".format(
6894 primary_nic_index,
6895 network_name,
6896 nicIndex,
6897 allocation_mode,
6898 vcd_netadapter_type,
6899 )
6900
6901 # Stub for ip_address feature
6902 if ip_address:
6903 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6904 item = item.replace(
6905 "</NetworkConnectionIndex>\n",
6906 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6907 )
6908
6909 if mac_address:
6910 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6911 item = item.replace(
6912 "</IsConnected>\n",
6913 "</IsConnected>\n{}\n".format(mac_tag),
6914 )
6915
6916 data = data.replace(
6917 "</ovf:Info>\n",
6918 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6919 )
6920 else:
6921 self.logger.debug(
6922 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6923 nic_type
6924 )
6925 )
6926 new_item = """<NetworkConnection network="{}">
6927 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6928 <IsConnected>true</IsConnected>
6929 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6930 <NetworkAdapterType>{}</NetworkAdapterType>
6931 </NetworkConnection>""".format(
6932 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6933 )
6934
6935 # Stub for ip_address feature
6936 if ip_address:
6937 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6938 new_item = new_item.replace(
6939 "</NetworkConnectionIndex>\n",
6940 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6941 )
6942
6943 if mac_address:
6944 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6945 new_item = new_item.replace(
6946 "</IsConnected>\n",
6947 "</IsConnected>\n{}\n".format(mac_tag),
6948 )
6949
6950 data = data + new_item + "</NetworkConnectionSection>"
6951
6952 headers[
6953 "Content-Type"
6954 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6955
6956 response = self.perform_request(
6957 req_type="PUT", url=url_rest_call, headers=headers, data=data
6958 )
6959
6960 if response.status_code == 403:
6961 add_headers = {"Content-Type": headers["Content-Type"]}
6962 response = self.retry_rest(
6963 "PUT", url_rest_call, add_headers, data
6964 )
6965
6966 if response.status_code != 202:
6967 self.logger.error(
6968 "REST call {} failed reason : {}"
6969 "status code : {}".format(
6970 url_rest_call, response.text, response.status_code
6971 )
6972 )
6973 raise vimconn.VimConnException(
6974 "add_network_adapter_to_vms : Failed to update "
6975 "network connection section"
6976 )
6977 else:
6978 nic_task = self.get_task_from_response(response.text)
6979 result = self.client.get_task_monitor().wait_for_success(
6980 task=nic_task
6981 )
6982
6983 if result.get("status") == "success":
6984 self.logger.info(
6985 "add_network_adapter_to_vms(): VM {} "
6986 "conneced to NIC type {}".format(vm_id, nic_type)
6987 )
6988 else:
6989 self.logger.error(
6990 "add_network_adapter_to_vms(): VM {} "
6991 "failed to connect NIC type {}".format(vm_id, nic_type)
6992 )
6993 except Exception as exp:
6994 self.logger.error(
6995 "add_network_adapter_to_vms() : exception occurred "
6996 "while adding Network adapter"
6997 )
6998
6999 raise vimconn.VimConnException(message=exp)
7000
7001 def set_numa_affinity(self, vmuuid, paired_threads_id):
7002 """
7003 Method to assign numa affinity in vm configuration parammeters
7004 Args :
7005 vmuuid - vm uuid
7006 paired_threads_id - one or more virtual processor
7007 numbers
7008 Returns:
7009 return if True
7010 """
7011 try:
7012 vcenter_conect, content = self.get_vcenter_content()
7013 vm_moref_id = self.get_vm_moref_id(vmuuid)
7014 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7015
7016 if vm_obj:
7017 config_spec = vim.vm.ConfigSpec()
7018 config_spec.extraConfig = []
7019 opt = vim.option.OptionValue()
7020 opt.key = "numa.nodeAffinity"
7021 opt.value = str(paired_threads_id)
7022 config_spec.extraConfig.append(opt)
7023 task = vm_obj.ReconfigVM_Task(config_spec)
7024
7025 if task:
7026 self.wait_for_vcenter_task(task, vcenter_conect)
7027 extra_config = vm_obj.config.extraConfig
7028 flag = False
7029
7030 for opts in extra_config:
7031 if "numa.nodeAffinity" in opts.key:
7032 flag = True
7033 self.logger.info(
7034 "set_numa_affinity: Sucessfully assign numa affinity "
7035 "value {} for vm {}".format(opt.value, vm_obj)
7036 )
7037
7038 if flag:
7039 return
7040 else:
7041 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
7042 except Exception as exp:
7043 self.logger.error(
7044 "set_numa_affinity : exception occurred while setting numa affinity "
7045 "for VM {} : {}".format(vm_obj, vm_moref_id)
7046 )
7047
7048 raise vimconn.VimConnException(
7049 "set_numa_affinity : Error {} failed to assign numa "
7050 "affinity".format(exp)
7051 )
7052
7053 def cloud_init(self, vapp, cloud_config):
7054 """
7055 Method to inject ssh-key
7056 vapp - vapp object
7057 cloud_config a dictionary with:
7058 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
7059 'users': (optional) list of users to be inserted, each item is a dict with:
7060 'name': (mandatory) user name,
7061 'key-pairs': (optional) list of strings with the public key to be inserted to the user
7062 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
7063 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
7064 'config-files': (optional). List of files to be transferred. Each item is a dict with:
7065 'dest': (mandatory) string with the destination absolute path
7066 'encoding': (optional, by default text). Can be one of:
7067 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
7068 'content' (mandatory): string with the content of the file
7069 'permissions': (optional) string with file permissions, typically octal notation '0644'
7070 'owner': (optional) file owner, string with the format 'owner:group'
7071 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
7072 """
7073 try:
7074 if not isinstance(cloud_config, dict):
7075 raise Exception(
7076 "cloud_init : parameter cloud_config is not a dictionary"
7077 )
7078 else:
7079 key_pairs = []
7080 userdata = []
7081
7082 if "key-pairs" in cloud_config:
7083 key_pairs = cloud_config["key-pairs"]
7084
7085 if "users" in cloud_config:
7086 userdata = cloud_config["users"]
7087
7088 self.logger.debug("cloud_init : Guest os customization started..")
7089 customize_script = self.format_script(
7090 key_pairs=key_pairs, users_list=userdata
7091 )
7092 customize_script = customize_script.replace("&", "&amp;")
7093 self.guest_customization(vapp, customize_script)
7094 except Exception as exp:
7095 self.logger.error(
7096 "cloud_init : exception occurred while injecting " "ssh-key"
7097 )
7098
7099 raise vimconn.VimConnException(
7100 "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
7101 )
7102
7103 def format_script(self, key_pairs=[], users_list=[]):
7104 bash_script = """#!/bin/sh
7105 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
7106 if [ "$1" = "precustomization" ];then
7107 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
7108 """
7109
7110 keys = "\n".join(key_pairs)
7111 if keys:
7112 keys_data = """
7113 if [ ! -d /root/.ssh ];then
7114 mkdir /root/.ssh
7115 chown root:root /root/.ssh
7116 chmod 700 /root/.ssh
7117 touch /root/.ssh/authorized_keys
7118 chown root:root /root/.ssh/authorized_keys
7119 chmod 600 /root/.ssh/authorized_keys
7120 # make centos with selinux happy
7121 which restorecon && restorecon -Rv /root/.ssh
7122 else
7123 touch /root/.ssh/authorized_keys
7124 chown root:root /root/.ssh/authorized_keys
7125 chmod 600 /root/.ssh/authorized_keys
7126 fi
7127 echo '{key}' >> /root/.ssh/authorized_keys
7128 """.format(
7129 key=keys
7130 )
7131
7132 bash_script += keys_data
7133
7134 for user in users_list:
7135 if "name" in user:
7136 user_name = user["name"]
7137
7138 if "key-pairs" in user:
7139 user_keys = "\n".join(user["key-pairs"])
7140 else:
7141 user_keys = None
7142
7143 add_user_name = """
7144 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
7145 """.format(
7146 user_name=user_name
7147 )
7148
7149 bash_script += add_user_name
7150
7151 if user_keys:
7152 user_keys_data = """
7153 mkdir /home/{user_name}/.ssh
7154 chown {user_name}:{user_name} /home/{user_name}/.ssh
7155 chmod 700 /home/{user_name}/.ssh
7156 touch /home/{user_name}/.ssh/authorized_keys
7157 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
7158 chmod 600 /home/{user_name}/.ssh/authorized_keys
7159 # make centos with selinux happy
7160 which restorecon && restorecon -Rv /home/{user_name}/.ssh
7161 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
7162 """.format(
7163 user_name=user_name, user_key=user_keys
7164 )
7165 bash_script += user_keys_data
7166
7167 return bash_script + "\n\tfi"
7168
7169 def guest_customization(self, vapp, customize_script):
7170 """
7171 Method to customize guest os
7172 vapp - Vapp object
7173 customize_script - Customize script to be run at first boot of VM.
7174 """
7175 for vm in vapp.get_all_vms():
7176 vm_id = vm.get("id").split(":")[-1]
7177 vm_name = vm.get("name")
7178 vm_name = vm_name.replace("_", "-")
7179
7180 vm_customization_url = (
7181 "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
7182 )
7183 headers = {
7184 "Accept": "application/*+xml;version=" + API_VERSION,
7185 "x-vcloud-authorization": self.client._session.headers[
7186 "x-vcloud-authorization"
7187 ],
7188 }
7189
7190 headers[
7191 "Content-Type"
7192 ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
7193
7194 data = """<GuestCustomizationSection
7195 xmlns="http://www.vmware.com/vcloud/v1.5"
7196 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
7197 ovf:required="false" href="{}"
7198 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
7199 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
7200 <Enabled>true</Enabled>
7201 <ChangeSid>false</ChangeSid>
7202 <VirtualMachineId>{}</VirtualMachineId>
7203 <JoinDomainEnabled>false</JoinDomainEnabled>
7204 <UseOrgSettings>false</UseOrgSettings>
7205 <AdminPasswordEnabled>false</AdminPasswordEnabled>
7206 <AdminPasswordAuto>true</AdminPasswordAuto>
7207 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
7208 <AdminAutoLogonCount>0</AdminAutoLogonCount>
7209 <ResetPasswordRequired>false</ResetPasswordRequired>
7210 <CustomizationScript>{}</CustomizationScript>
7211 <ComputerName>{}</ComputerName>
7212 <Link href="{}"
7213 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
7214 </GuestCustomizationSection>
7215 """.format(
7216 vm_customization_url,
7217 vm_id,
7218 customize_script,
7219 vm_name,
7220 vm_customization_url,
7221 )
7222
7223 response = self.perform_request(
7224 req_type="PUT", url=vm_customization_url, headers=headers, data=data
7225 )
7226 if response.status_code == 202:
7227 guest_task = self.get_task_from_response(response.text)
7228 self.client.get_task_monitor().wait_for_success(task=guest_task)
7229 self.logger.info(
7230 "guest_customization : customized guest os task "
7231 "completed for VM {}".format(vm_name)
7232 )
7233 else:
7234 self.logger.error(
7235 "guest_customization : task for customized guest os"
7236 "failed for VM {}".format(vm_name)
7237 )
7238
7239 raise vimconn.VimConnException(
7240 "guest_customization : failed to perform"
7241 "guest os customization on VM {}".format(vm_name)
7242 )
7243
7244 def add_new_disk(self, vapp_uuid, disk_size):
7245 """
7246 Method to create an empty vm disk
7247
7248 Args:
7249 vapp_uuid - is vapp identifier.
7250 disk_size - size of disk to be created in GB
7251
7252 Returns:
7253 None
7254 """
7255 status = False
7256 vm_details = None
7257 try:
7258 # Disk size in GB, convert it into MB
7259 if disk_size is not None:
7260 disk_size_mb = int(disk_size) * 1024
7261 vm_details = self.get_vapp_details_rest(vapp_uuid)
7262
7263 if vm_details and "vm_virtual_hardware" in vm_details:
7264 self.logger.info(
7265 "Adding disk to VM: {} disk size:{}GB".format(
7266 vm_details["name"], disk_size
7267 )
7268 )
7269 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
7270 status = self.add_new_disk_rest(disk_href, disk_size_mb)
7271 except Exception as exp:
7272 msg = "Error occurred while creating new disk {}.".format(exp)
7273 self.rollback_newvm(vapp_uuid, msg)
7274
7275 if status:
7276 self.logger.info(
7277 "Added new disk to VM: {} disk size:{}GB".format(
7278 vm_details["name"], disk_size
7279 )
7280 )
7281 else:
7282 # If failed to add disk, delete VM
7283 msg = "add_new_disk: Failed to add new disk to {}".format(
7284 vm_details["name"]
7285 )
7286 self.rollback_newvm(vapp_uuid, msg)
7287
7288 def add_new_disk_rest(self, disk_href, disk_size_mb):
7289 """
7290 Retrives vApp Disks section & add new empty disk
7291
7292 Args:
7293 disk_href: Disk section href to addd disk
7294 disk_size_mb: Disk size in MB
7295
7296 Returns: Status of add new disk task
7297 """
7298 status = False
7299 if self.client._session:
7300 headers = {
7301 "Accept": "application/*+xml;version=" + API_VERSION,
7302 "x-vcloud-authorization": self.client._session.headers[
7303 "x-vcloud-authorization"
7304 ],
7305 }
7306 response = self.perform_request(
7307 req_type="GET", url=disk_href, headers=headers
7308 )
7309
7310 if response.status_code == 403:
7311 response = self.retry_rest("GET", disk_href)
7312
7313 if response.status_code != requests.codes.ok:
7314 self.logger.error(
7315 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
7316 disk_href, response.status_code
7317 )
7318 )
7319
7320 return status
7321
7322 try:
7323 # Find but type & max of instance IDs assigned to disks
7324 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
7325 namespaces = {
7326 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
7327 }
7328 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
7329 instance_id = 0
7330
7331 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
7332 if item.find("rasd:Description", namespaces).text == "Hard disk":
7333 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
7334
7335 if inst_id > instance_id:
7336 instance_id = inst_id
7337 disk_item = item.find("rasd:HostResource", namespaces)
7338 bus_subtype = disk_item.attrib[
7339 "{" + namespaces["xmlns"] + "}busSubType"
7340 ]
7341 bus_type = disk_item.attrib[
7342 "{" + namespaces["xmlns"] + "}busType"
7343 ]
7344
7345 instance_id = instance_id + 1
7346 new_item = """<Item>
7347 <rasd:Description>Hard disk</rasd:Description>
7348 <rasd:ElementName>New disk</rasd:ElementName>
7349 <rasd:HostResource
7350 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
7351 vcloud:capacity="{}"
7352 vcloud:busSubType="{}"
7353 vcloud:busType="{}"></rasd:HostResource>
7354 <rasd:InstanceID>{}</rasd:InstanceID>
7355 <rasd:ResourceType>17</rasd:ResourceType>
7356 </Item>""".format(
7357 disk_size_mb, bus_subtype, bus_type, instance_id
7358 )
7359
7360 new_data = response.text
7361 # Add new item at the bottom
7362 new_data = new_data.replace(
7363 "</Item>\n</RasdItemsList>",
7364 "</Item>\n{}\n</RasdItemsList>".format(new_item),
7365 )
7366
7367 # Send PUT request to modify virtual hardware section with new disk
7368 headers[
7369 "Content-Type"
7370 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
7371
7372 response = self.perform_request(
7373 req_type="PUT", url=disk_href, data=new_data, headers=headers
7374 )
7375
7376 if response.status_code == 403:
7377 add_headers = {"Content-Type": headers["Content-Type"]}
7378 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
7379
7380 if response.status_code != 202:
7381 self.logger.error(
7382 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
7383 disk_href, response.status_code, response.text
7384 )
7385 )
7386 else:
7387 add_disk_task = self.get_task_from_response(response.text)
7388 result = self.client.get_task_monitor().wait_for_success(
7389 task=add_disk_task
7390 )
7391
7392 if result.get("status") == "success":
7393 status = True
7394 else:
7395 self.logger.error(
7396 "Add new disk REST task failed to add {} MB disk".format(
7397 disk_size_mb
7398 )
7399 )
7400 except Exception as exp:
7401 self.logger.error(
7402 "Error occurred calling rest api for creating new disk {}".format(exp)
7403 )
7404
7405 return status
7406
7407 def add_existing_disk(
7408 self,
7409 catalogs=None,
7410 image_id=None,
7411 size=None,
7412 template_name=None,
7413 vapp_uuid=None,
7414 ):
7415 """
7416 Method to add existing disk to vm
7417 Args :
7418 catalogs - List of VDC catalogs
7419 image_id - Catalog ID
7420 template_name - Name of template in catalog
7421 vapp_uuid - UUID of vApp
7422 Returns:
7423 None
7424 """
7425 disk_info = None
7426 vcenter_conect, content = self.get_vcenter_content()
7427 # find moref-id of vm in image
7428 catalog_vm_info = self.get_vapp_template_details(
7429 catalogs=catalogs,
7430 image_id=image_id,
7431 )
7432
7433 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
7434 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
7435 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
7436 "vm_moref_id", None
7437 )
7438
7439 if catalog_vm_moref_id:
7440 self.logger.info(
7441 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
7442 )
7443 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
7444
7445 if catalog_vm_obj:
7446 # find existing disk
7447 disk_info = self.find_disk(catalog_vm_obj)
7448 else:
7449 exp_msg = "No VM with image id {} found".format(image_id)
7450 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7451 else:
7452 exp_msg = "No Image found with image ID {} ".format(image_id)
7453 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7454
7455 if disk_info:
7456 self.logger.info("Existing disk_info : {}".format(disk_info))
7457 # get VM
7458 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7459 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7460
7461 if vm_obj:
7462 status = self.add_disk(
7463 vcenter_conect=vcenter_conect,
7464 vm=vm_obj,
7465 disk_info=disk_info,
7466 size=size,
7467 vapp_uuid=vapp_uuid,
7468 )
7469
7470 if status:
7471 self.logger.info(
7472 "Disk from image id {} added to {}".format(
7473 image_id, vm_obj.config.name
7474 )
7475 )
7476 else:
7477 msg = "No disk found with image id {} to add in VM {}".format(
7478 image_id, vm_obj.config.name
7479 )
7480 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
7481
7482 def find_disk(self, vm_obj):
7483 """
7484 Method to find details of existing disk in VM
7485 Args:
7486 vm_obj - vCenter object of VM
7487 Returns:
7488 disk_info : dict of disk details
7489 """
7490 disk_info = {}
7491 if vm_obj:
7492 try:
7493 devices = vm_obj.config.hardware.device
7494
7495 for device in devices:
7496 if type(device) is vim.vm.device.VirtualDisk:
7497 if isinstance(
7498 device.backing,
7499 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
7500 ) and hasattr(device.backing, "fileName"):
7501 disk_info["full_path"] = device.backing.fileName
7502 disk_info["datastore"] = device.backing.datastore
7503 disk_info["capacityKB"] = device.capacityInKB
7504 break
7505 except Exception as exp:
7506 self.logger.error(
7507 "find_disk() : exception occurred while "
7508 "getting existing disk details :{}".format(exp)
7509 )
7510
7511 return disk_info
7512
7513 def add_disk(
7514 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
7515 ):
7516 """
7517 Method to add existing disk in VM
7518 Args :
7519 vcenter_conect - vCenter content object
7520 vm - vCenter vm object
7521 disk_info : dict of disk details
7522 Returns:
7523 status : status of add disk task
7524 """
7525 datastore = disk_info["datastore"] if "datastore" in disk_info else None
7526 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
7527 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
7528 if size is not None:
7529 # Convert size from GB to KB
7530 sizeKB = int(size) * 1024 * 1024
7531 # compare size of existing disk and user given size.Assign whicherver is greater
7532 self.logger.info(
7533 "Add Existing disk : sizeKB {} , capacityKB {}".format(
7534 sizeKB, capacityKB
7535 )
7536 )
7537
7538 if sizeKB > capacityKB:
7539 capacityKB = sizeKB
7540
7541 if datastore and fullpath and capacityKB:
7542 try:
7543 spec = vim.vm.ConfigSpec()
7544 # get all disks on a VM, set unit_number to the next available
7545 unit_number = 0
7546 for dev in vm.config.hardware.device:
7547 if hasattr(dev.backing, "fileName"):
7548 unit_number = int(dev.unitNumber) + 1
7549 # unit_number 7 reserved for scsi controller
7550
7551 if unit_number == 7:
7552 unit_number += 1
7553
7554 if isinstance(dev, vim.vm.device.VirtualDisk):
7555 # vim.vm.device.VirtualSCSIController
7556 controller_key = dev.controllerKey
7557
7558 self.logger.info(
7559 "Add Existing disk : unit number {} , controller key {}".format(
7560 unit_number, controller_key
7561 )
7562 )
7563 # add disk here
7564 dev_changes = []
7565 disk_spec = vim.vm.device.VirtualDeviceSpec()
7566 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7567 disk_spec.device = vim.vm.device.VirtualDisk()
7568 disk_spec.device.backing = (
7569 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
7570 )
7571 disk_spec.device.backing.thinProvisioned = True
7572 disk_spec.device.backing.diskMode = "persistent"
7573 disk_spec.device.backing.datastore = datastore
7574 disk_spec.device.backing.fileName = fullpath
7575
7576 disk_spec.device.unitNumber = unit_number
7577 disk_spec.device.capacityInKB = capacityKB
7578 disk_spec.device.controllerKey = controller_key
7579 dev_changes.append(disk_spec)
7580 spec.deviceChange = dev_changes
7581 task = vm.ReconfigVM_Task(spec=spec)
7582 status = self.wait_for_vcenter_task(task, vcenter_conect)
7583
7584 return status
7585 except Exception as exp:
7586 exp_msg = (
7587 "add_disk() : exception {} occurred while adding disk "
7588 "{} to vm {}".format(exp, fullpath, vm.config.name)
7589 )
7590 self.rollback_newvm(vapp_uuid, exp_msg)
7591 else:
7592 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
7593 disk_info
7594 )
7595 self.rollback_newvm(vapp_uuid, msg)
7596
7597 def get_vcenter_content(self):
7598 """
7599 Get the vsphere content object
7600 """
7601 try:
7602 vm_vcenter_info = self.get_vm_vcenter_info()
7603 except Exception as exp:
7604 self.logger.error(
7605 "Error occurred while getting vCenter infromationn"
7606 " for VM : {}".format(exp)
7607 )
7608
7609 raise vimconn.VimConnException(message=exp)
7610
7611 context = None
7612 if hasattr(ssl, "_create_unverified_context"):
7613 context = ssl._create_unverified_context()
7614
7615 vcenter_conect = SmartConnect(
7616 host=vm_vcenter_info["vm_vcenter_ip"],
7617 user=vm_vcenter_info["vm_vcenter_user"],
7618 pwd=vm_vcenter_info["vm_vcenter_password"],
7619 port=int(vm_vcenter_info["vm_vcenter_port"]),
7620 sslContext=context,
7621 )
7622 atexit.register(Disconnect, vcenter_conect)
7623 content = vcenter_conect.RetrieveContent()
7624
7625 return vcenter_conect, content
7626
7627 def get_vm_moref_id(self, vapp_uuid):
7628 """
7629 Get the moref_id of given VM
7630 """
7631 try:
7632 if vapp_uuid:
7633 vm_details = self.get_vapp_details_rest(
7634 vapp_uuid, need_admin_access=True
7635 )
7636
7637 if vm_details and "vm_vcenter_info" in vm_details:
7638 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
7639
7640 return vm_moref_id
7641 except Exception as exp:
7642 self.logger.error(
7643 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
7644 )
7645
7646 return None
7647
7648 def get_vapp_template_details(
7649 self, catalogs=None, image_id=None, template_name=None
7650 ):
7651 """
7652 Method to get vApp template details
7653 Args :
7654 catalogs - list of VDC catalogs
7655 image_id - Catalog ID to find
7656 template_name : template name in catalog
7657 Returns:
7658 parsed_respond : dict of vApp tempalte details
7659 """
7660 parsed_response = {}
7661
7662 vca = self.connect_as_admin()
7663 if not vca:
7664 raise vimconn.VimConnConnectionException("Failed to connect vCD")
7665
7666 try:
7667 org, _ = self.get_vdc_details()
7668 catalog = self.get_catalog_obj(image_id, catalogs)
7669 if catalog:
7670 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
7671 catalog_items = [items.attrib]
7672
7673 if len(catalog_items) == 1:
7674 headers = {
7675 "Accept": "application/*+xml;version=" + API_VERSION,
7676 "x-vcloud-authorization": vca._session.headers[
7677 "x-vcloud-authorization"
7678 ],
7679 }
7680 response = self.perform_request(
7681 req_type="GET",
7682 url=catalog_items[0].get("href"),
7683 headers=headers,
7684 )
7685 catalogItem = XmlElementTree.fromstring(response.text)
7686 entity = [
7687 child
7688 for child in catalogItem
7689 if child.get("type")
7690 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
7691 ][0]
7692 vapp_tempalte_href = entity.get("href")
7693 # get vapp details and parse moref id
7694
7695 namespaces = {
7696 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7697 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7698 "vmw": "http://www.vmware.com/schema/ovf",
7699 "vm": "http://www.vmware.com/vcloud/v1.5",
7700 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7701 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7702 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7703 }
7704
7705 if vca._session:
7706 response = self.perform_request(
7707 req_type="GET", url=vapp_tempalte_href, headers=headers
7708 )
7709
7710 if response.status_code != requests.codes.ok:
7711 self.logger.debug(
7712 "REST API call {} failed. Return status code {}".format(
7713 vapp_tempalte_href, response.status_code
7714 )
7715 )
7716 else:
7717 xmlroot_respond = XmlElementTree.fromstring(response.text)
7718 children_section = xmlroot_respond.find(
7719 "vm:Children/", namespaces
7720 )
7721
7722 if children_section is not None:
7723 vCloud_extension_section = children_section.find(
7724 "xmlns:VCloudExtension", namespaces
7725 )
7726
7727 if vCloud_extension_section is not None:
7728 vm_vcenter_info = {}
7729 vim_info = vCloud_extension_section.find(
7730 "vmext:VmVimInfo", namespaces
7731 )
7732 vmext = vim_info.find(
7733 "vmext:VmVimObjectRef", namespaces
7734 )
7735
7736 if vmext is not None:
7737 vm_vcenter_info["vm_moref_id"] = vmext.find(
7738 "vmext:MoRef", namespaces
7739 ).text
7740
7741 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7742 except Exception as exp:
7743 self.logger.info(
7744 "Error occurred calling rest api for getting vApp details {}".format(
7745 exp
7746 )
7747 )
7748
7749 return parsed_response
7750
7751 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7752 """
7753 Method to delete vApp
7754 Args :
7755 vapp_uuid - vApp UUID
7756 msg - Error message to be logged
7757 exp_type : Exception type
7758 Returns:
7759 None
7760 """
7761 if vapp_uuid:
7762 self.delete_vminstance(vapp_uuid)
7763 else:
7764 msg = "No vApp ID"
7765
7766 self.logger.error(msg)
7767
7768 if exp_type == "Genric":
7769 raise vimconn.VimConnException(msg)
7770 elif exp_type == "NotFound":
7771 raise vimconn.VimConnNotFoundException(message=msg)
7772
7773 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
7774 """
7775 Method to attach SRIOV adapters to VM
7776
7777 Args:
7778 vapp_uuid - uuid of vApp/VM
7779 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
7780 vmname_andid - vmname
7781
7782 Returns:
7783 The status of add SRIOV adapter task , vm object and
7784 vcenter_conect object
7785 """
7786 vm_obj = None
7787 vcenter_conect, content = self.get_vcenter_content()
7788 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7789
7790 if vm_moref_id:
7791 try:
7792 no_of_sriov_devices = len(sriov_nets)
7793 if no_of_sriov_devices > 0:
7794 # Get VM and its host
7795 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
7796 self.logger.info(
7797 "VM {} is currently on host {}".format(vm_obj, host_obj)
7798 )
7799
7800 if host_obj and vm_obj:
7801 # get SRIOV devies from host on which vapp is currently installed
7802 avilable_sriov_devices = self.get_sriov_devices(
7803 host_obj,
7804 no_of_sriov_devices,
7805 )
7806
7807 if len(avilable_sriov_devices) == 0:
7808 # find other hosts with active pci devices
7809 (
7810 new_host_obj,
7811 avilable_sriov_devices,
7812 ) = self.get_host_and_sriov_devices(
7813 content,
7814 no_of_sriov_devices,
7815 )
7816
7817 if (
7818 new_host_obj is not None
7819 and len(avilable_sriov_devices) > 0
7820 ):
7821 # Migrate vm to the host where SRIOV devices are available
7822 self.logger.info(
7823 "Relocate VM {} on new host {}".format(
7824 vm_obj, new_host_obj
7825 )
7826 )
7827 task = self.relocate_vm(new_host_obj, vm_obj)
7828
7829 if task is not None:
7830 result = self.wait_for_vcenter_task(
7831 task, vcenter_conect
7832 )
7833 self.logger.info(
7834 "Migrate VM status: {}".format(result)
7835 )
7836 host_obj = new_host_obj
7837 else:
7838 self.logger.info(
7839 "Fail to migrate VM : {}".format(result)
7840 )
7841
7842 raise vimconn.VimConnNotFoundException(
7843 "Fail to migrate VM : {} to host {}".format(
7844 vmname_andid, new_host_obj
7845 )
7846 )
7847
7848 if (
7849 host_obj is not None
7850 and avilable_sriov_devices is not None
7851 and len(avilable_sriov_devices) > 0
7852 ):
7853 # Add SRIOV devices one by one
7854 for sriov_net in sriov_nets:
7855 network_name = sriov_net.get("net_id")
7856 self.create_dvPort_group(network_name)
7857
7858 if (
7859 sriov_net.get("type") == "VF"
7860 or sriov_net.get("type") == "SR-IOV"
7861 ):
7862 # add vlan ID ,Modify portgroup for vlan ID
7863 self.configure_vlanID(
7864 content, vcenter_conect, network_name
7865 )
7866
7867 task = self.add_sriov_to_vm(
7868 content,
7869 vm_obj,
7870 host_obj,
7871 network_name,
7872 avilable_sriov_devices[0],
7873 )
7874
7875 if task:
7876 status = self.wait_for_vcenter_task(
7877 task, vcenter_conect
7878 )
7879
7880 if status:
7881 self.logger.info(
7882 "Added SRIOV {} to VM {}".format(
7883 no_of_sriov_devices, str(vm_obj)
7884 )
7885 )
7886 else:
7887 self.logger.error(
7888 "Fail to add SRIOV {} to VM {}".format(
7889 no_of_sriov_devices, str(vm_obj)
7890 )
7891 )
7892
7893 raise vimconn.VimConnUnexpectedResponse(
7894 "Fail to add SRIOV adapter in VM {}".format(
7895 str(vm_obj)
7896 )
7897 )
7898
7899 return True, vm_obj, vcenter_conect
7900 else:
7901 self.logger.error(
7902 "Currently there is no host with"
7903 " {} number of avaialble SRIOV "
7904 "VFs required for VM {}".format(
7905 no_of_sriov_devices, vmname_andid
7906 )
7907 )
7908
7909 raise vimconn.VimConnNotFoundException(
7910 "Currently there is no host with {} "
7911 "number of avaialble SRIOV devices required for VM {}".format(
7912 no_of_sriov_devices, vmname_andid
7913 )
7914 )
7915 else:
7916 self.logger.debug(
7917 "No infromation about SRIOV devices {} ", sriov_nets
7918 )
7919 except vmodl.MethodFault as error:
7920 self.logger.error("Error occurred while adding SRIOV {} ", error)
7921
7922 return None, vm_obj, vcenter_conect
7923
7924 def get_sriov_devices(self, host, no_of_vfs):
7925 """
7926 Method to get the details of SRIOV devices on given host
7927 Args:
7928 host - vSphere host object
7929 no_of_vfs - number of VFs needed on host
7930
7931 Returns:
7932 array of SRIOV devices
7933 """
7934 sriovInfo = []
7935
7936 if host:
7937 for device in host.config.pciPassthruInfo:
7938 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7939 if device.numVirtualFunction >= no_of_vfs:
7940 sriovInfo.append(device)
7941 break
7942
7943 return sriovInfo
7944
7945 def get_host_and_sriov_devices(self, content, no_of_vfs):
7946 """
7947 Method to get the details of SRIOV devices infromation on all hosts
7948
7949 Args:
7950 content - vSphere host object
7951 no_of_vfs - number of pci VFs needed on host
7952
7953 Returns:
7954 array of SRIOV devices and host object
7955 """
7956 host_obj = None
7957 sriov_device_objs = None
7958
7959 try:
7960 if content:
7961 container = content.viewManager.CreateContainerView(
7962 content.rootFolder, [vim.HostSystem], True
7963 )
7964
7965 for host in container.view:
7966 devices = self.get_sriov_devices(host, no_of_vfs)
7967
7968 if devices:
7969 host_obj = host
7970 sriov_device_objs = devices
7971 break
7972 except Exception as exp:
7973 self.logger.error(
7974 "Error {} occurred while finding SRIOV devices on host: {}".format(
7975 exp, host_obj
7976 )
7977 )
7978
7979 return host_obj, sriov_device_objs
7980
7981 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
7982 """
7983 Method to add SRIOV adapter to vm
7984
7985 Args:
7986 host_obj - vSphere host object
7987 vm_obj - vSphere vm object
7988 content - vCenter content object
7989 network_name - name of distributed virtaul portgroup
7990 sriov_device - SRIOV device info
7991
7992 Returns:
7993 task object
7994 """
7995 devices = []
7996 vnic_label = "sriov nic"
7997
7998 try:
7999 dvs_portgr = self.get_dvport_group(network_name)
8000 network_name = dvs_portgr.name
8001 nic = vim.vm.device.VirtualDeviceSpec()
8002 # VM device
8003 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
8004 nic.device = vim.vm.device.VirtualSriovEthernetCard()
8005 nic.device.addressType = "assigned"
8006 # nic.device.key = 13016
8007 nic.device.deviceInfo = vim.Description()
8008 nic.device.deviceInfo.label = vnic_label
8009 nic.device.deviceInfo.summary = network_name
8010 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
8011
8012 nic.device.backing.network = self.get_obj(
8013 content, [vim.Network], network_name
8014 )
8015 nic.device.backing.deviceName = network_name
8016 nic.device.backing.useAutoDetect = False
8017 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
8018 nic.device.connectable.startConnected = True
8019 nic.device.connectable.allowGuestControl = True
8020
8021 nic.device.sriovBacking = (
8022 vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
8023 )
8024 nic.device.sriovBacking.physicalFunctionBacking = (
8025 vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
8026 )
8027 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
8028
8029 devices.append(nic)
8030 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
8031 task = vm_obj.ReconfigVM_Task(vmconf)
8032
8033 return task
8034 except Exception as exp:
8035 self.logger.error(
8036 "Error {} occurred while adding SRIOV adapter in VM: {}".format(
8037 exp, vm_obj
8038 )
8039 )
8040
8041 return None
8042
8043 def create_dvPort_group(self, network_name):
8044 """
8045 Method to create disributed virtual portgroup
8046
8047 Args:
8048 network_name - name of network/portgroup
8049
8050 Returns:
8051 portgroup key
8052 """
8053 try:
8054 new_network_name = [network_name, "-", str(uuid.uuid4())]
8055 network_name = "".join(new_network_name)
8056 vcenter_conect, content = self.get_vcenter_content()
8057
8058 dv_switch = self.get_obj(
8059 content, [vim.DistributedVirtualSwitch], self.dvs_name
8060 )
8061
8062 if dv_switch:
8063 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8064 dv_pg_spec.name = network_name
8065
8066 dv_pg_spec.type = (
8067 vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
8068 )
8069 dv_pg_spec.defaultPortConfig = (
8070 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8071 )
8072 dv_pg_spec.defaultPortConfig.securityPolicy = (
8073 vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
8074 )
8075 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
8076 vim.BoolPolicy(value=False)
8077 )
8078 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
8079 vim.BoolPolicy(value=False)
8080 )
8081 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
8082 value=False
8083 )
8084
8085 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
8086 self.wait_for_vcenter_task(task, vcenter_conect)
8087
8088 dvPort_group = self.get_obj(
8089 content, [vim.dvs.DistributedVirtualPortgroup], network_name
8090 )
8091
8092 if dvPort_group:
8093 self.logger.info(
8094 "Created disributed virtaul port group: {}".format(dvPort_group)
8095 )
8096 return dvPort_group.key
8097 else:
8098 self.logger.debug(
8099 "No disributed virtual switch found with name {}".format(
8100 network_name
8101 )
8102 )
8103
8104 except Exception as exp:
8105 self.logger.error(
8106 "Error occurred while creating disributed virtaul port group {}"
8107 " : {}".format(network_name, exp)
8108 )
8109
8110 return None
8111
8112 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
8113 """
8114 Method to reconfigure disributed virtual portgroup
8115
8116 Args:
8117 dvPort_group_name - name of disributed virtual portgroup
8118 content - vCenter content object
8119 config_info - disributed virtual portgroup configuration
8120
8121 Returns:
8122 task object
8123 """
8124 try:
8125 dvPort_group = self.get_dvport_group(dvPort_group_name)
8126
8127 if dvPort_group:
8128 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8129 dv_pg_spec.configVersion = dvPort_group.config.configVersion
8130 dv_pg_spec.defaultPortConfig = (
8131 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8132 )
8133
8134 if "vlanID" in config_info:
8135 dv_pg_spec.defaultPortConfig.vlan = (
8136 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
8137 )
8138 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
8139
8140 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
8141
8142 return task
8143 else:
8144 return None
8145 except Exception as exp:
8146 self.logger.error(
8147 "Error occurred while reconfiguraing disributed virtaul port group {}"
8148 " : {}".format(dvPort_group_name, exp)
8149 )
8150
8151 return None
8152
8153 def destroy_dvport_group(self, dvPort_group_name):
8154 """
8155 Method to destroy disributed virtual portgroup
8156
8157 Args:
8158 network_name - name of network/portgroup
8159
8160 Returns:
8161 True if portgroup successfully got deleted else false
8162 """
8163 vcenter_conect, _ = self.get_vcenter_content()
8164
8165 try:
8166 status = None
8167 dvPort_group = self.get_dvport_group(dvPort_group_name)
8168
8169 if dvPort_group:
8170 task = dvPort_group.Destroy_Task()
8171 status = self.wait_for_vcenter_task(task, vcenter_conect)
8172
8173 return status
8174 except vmodl.MethodFault as exp:
8175 self.logger.error(
8176 "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
8177 exp, dvPort_group_name
8178 )
8179 )
8180
8181 return None
8182
8183 def get_dvport_group(self, dvPort_group_name):
8184 """
8185 Method to get disributed virtual portgroup
8186
8187 Args:
8188 network_name - name of network/portgroup
8189
8190 Returns:
8191 portgroup object
8192 """
8193 _, content = self.get_vcenter_content()
8194 dvPort_group = None
8195
8196 try:
8197 container = content.viewManager.CreateContainerView(
8198 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
8199 )
8200
8201 for item in container.view:
8202 if item.key == dvPort_group_name:
8203 dvPort_group = item
8204 break
8205
8206 return dvPort_group
8207 except vmodl.MethodFault as exp:
8208 self.logger.error(
8209 "Caught vmodl fault {} for disributed virtual port group {}".format(
8210 exp, dvPort_group_name
8211 )
8212 )
8213
8214 return None
8215
8216 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
8217 """
8218 Method to get disributed virtual portgroup vlanID
8219
8220 Args:
8221 network_name - name of network/portgroup
8222
8223 Returns:
8224 vlan ID
8225 """
8226 vlanId = None
8227
8228 try:
8229 dvPort_group = self.get_dvport_group(dvPort_group_name)
8230
8231 if dvPort_group:
8232 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
8233 except vmodl.MethodFault as exp:
8234 self.logger.error(
8235 "Caught vmodl fault {} for disributed virtaul port group {}".format(
8236 exp, dvPort_group_name
8237 )
8238 )
8239
8240 return vlanId
8241
8242 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
8243 """
8244 Method to configure vlanID in disributed virtual portgroup vlanID
8245
8246 Args:
8247 network_name - name of network/portgroup
8248
8249 Returns:
8250 None
8251 """
8252 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
8253
8254 if vlanID == 0:
8255 # configure vlanID
8256 vlanID = self.genrate_vlanID(dvPort_group_name)
8257 config = {"vlanID": vlanID}
8258 task = self.reconfig_portgroup(
8259 content, dvPort_group_name, config_info=config
8260 )
8261
8262 if task:
8263 status = self.wait_for_vcenter_task(task, vcenter_conect)
8264
8265 if status:
8266 self.logger.info(
8267 "Reconfigured Port group {} for vlan ID {}".format(
8268 dvPort_group_name, vlanID
8269 )
8270 )
8271 else:
8272 self.logger.error(
8273 "Fail reconfigure portgroup {} for vlanID{}".format(
8274 dvPort_group_name, vlanID
8275 )
8276 )
8277
8278 def genrate_vlanID(self, network_name):
8279 """
8280 Method to get unused vlanID
8281 Args:
8282 network_name - name of network/portgroup
8283 Returns:
8284 vlanID
8285 """
8286 vlan_id = None
8287 used_ids = []
8288
8289 if self.config.get("vlanID_range") is None:
8290 raise vimconn.VimConnConflictException(
8291 "You must provide a 'vlanID_range' "
8292 "at config value before creating sriov network with vlan tag"
8293 )
8294
8295 if "used_vlanIDs" not in self.persistent_info:
8296 self.persistent_info["used_vlanIDs"] = {}
8297 else:
8298 used_ids = list(self.persistent_info["used_vlanIDs"].values())
8299
8300 for vlanID_range in self.config.get("vlanID_range"):
8301 start_vlanid, end_vlanid = vlanID_range.split("-")
8302
8303 if start_vlanid > end_vlanid:
8304 raise vimconn.VimConnConflictException(
8305 "Invalid vlan ID range {}".format(vlanID_range)
8306 )
8307
8308 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
8309 if vid not in used_ids:
8310 vlan_id = vid
8311 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
8312 return vlan_id
8313
8314 if vlan_id is None:
8315 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
8316
8317 def get_obj(self, content, vimtype, name):
8318 """
8319 Get the vsphere object associated with a given text name
8320 """
8321 obj = None
8322 container = content.viewManager.CreateContainerView(
8323 content.rootFolder, vimtype, True
8324 )
8325
8326 for item in container.view:
8327 if item.name == name:
8328 obj = item
8329 break
8330
8331 return obj
8332
8333 def insert_media_to_vm(self, vapp, image_id):
8334 """
8335 Method to insert media CD-ROM (ISO image) from catalog to vm.
8336 vapp - vapp object to get vm id
8337 Image_id - image id for cdrom to be inerted to vm
8338 """
8339 # create connection object
8340 vca = self.connect()
8341 try:
8342 # fetching catalog details
8343 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
8344
8345 if vca._session:
8346 headers = {
8347 "Accept": "application/*+xml;version=" + API_VERSION,
8348 "x-vcloud-authorization": vca._session.headers[
8349 "x-vcloud-authorization"
8350 ],
8351 }
8352 response = self.perform_request(
8353 req_type="GET", url=rest_url, headers=headers
8354 )
8355
8356 if response.status_code != 200:
8357 self.logger.error(
8358 "REST call {} failed reason : {}"
8359 "status code : {}".format(
8360 rest_url, response.text, response.status_code
8361 )
8362 )
8363
8364 raise vimconn.VimConnException(
8365 "insert_media_to_vm(): Failed to get " "catalog details"
8366 )
8367
8368 # searching iso name and id
8369 iso_name, media_id = self.get_media_details(vca, response.text)
8370
8371 if iso_name and media_id:
8372 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
8373 <ns6:MediaInsertOrEjectParams
8374 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
8375 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
8376 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
8377 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
8378 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
8379 xmlns:ns7="http://www.vmware.com/schema/ovf"
8380 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
8381 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
8382 <ns6:Media
8383 type="application/vnd.vmware.vcloud.media+xml"
8384 name="{}"
8385 id="urn:vcloud:media:{}"
8386 href="https://{}/api/media/{}"/>
8387 </ns6:MediaInsertOrEjectParams>""".format(
8388 iso_name, media_id, self.url, media_id
8389 )
8390
8391 for vms in vapp.get_all_vms():
8392 vm_id = vms.get("id").split(":")[-1]
8393
8394 headers[
8395 "Content-Type"
8396 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
8397 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
8398 self.url, vm_id
8399 )
8400
8401 response = self.perform_request(
8402 req_type="POST", url=rest_url, data=data, headers=headers
8403 )
8404
8405 if response.status_code != 202:
8406 error_msg = (
8407 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
8408 "Status code {}".format(response.text, response.status_code)
8409 )
8410 self.logger.error(error_msg)
8411
8412 raise vimconn.VimConnException(error_msg)
8413 else:
8414 task = self.get_task_from_response(response.text)
8415 result = self.client.get_task_monitor().wait_for_success(
8416 task=task
8417 )
8418
8419 if result.get("status") == "success":
8420 self.logger.info(
8421 "insert_media_to_vm(): Sucessfully inserted media ISO"
8422 " image to vm {}".format(vm_id)
8423 )
8424 except Exception as exp:
8425 self.logger.error(
8426 "insert_media_to_vm() : exception occurred "
8427 "while inserting media CD-ROM"
8428 )
8429
8430 raise vimconn.VimConnException(message=exp)
8431
8432 def get_media_details(self, vca, content):
8433 """
8434 Method to get catalog item details
8435 vca - connection object
8436 content - Catalog details
8437 Return - Media name, media id
8438 """
8439 cataloghref_list = []
8440 try:
8441 if content:
8442 vm_list_xmlroot = XmlElementTree.fromstring(content)
8443
8444 for child in vm_list_xmlroot.iter():
8445 if "CatalogItem" in child.tag:
8446 cataloghref_list.append(child.attrib.get("href"))
8447
8448 if cataloghref_list is not None:
8449 for href in cataloghref_list:
8450 if href:
8451 headers = {
8452 "Accept": "application/*+xml;version=" + API_VERSION,
8453 "x-vcloud-authorization": vca._session.headers[
8454 "x-vcloud-authorization"
8455 ],
8456 }
8457 response = self.perform_request(
8458 req_type="GET", url=href, headers=headers
8459 )
8460
8461 if response.status_code != 200:
8462 self.logger.error(
8463 "REST call {} failed reason : {}"
8464 "status code : {}".format(
8465 href, response.text, response.status_code
8466 )
8467 )
8468
8469 raise vimconn.VimConnException(
8470 "get_media_details : Failed to get "
8471 "catalogitem details"
8472 )
8473
8474 list_xmlroot = XmlElementTree.fromstring(response.text)
8475
8476 for child in list_xmlroot.iter():
8477 if "Entity" in child.tag:
8478 if "media" in child.attrib.get("href"):
8479 name = child.attrib.get("name")
8480 media_id = (
8481 child.attrib.get("href").split("/").pop()
8482 )
8483
8484 return name, media_id
8485 else:
8486 self.logger.debug("Media name and id not found")
8487
8488 return False, False
8489 except Exception as exp:
8490 self.logger.error(
8491 "get_media_details : exception occurred " "getting media details"
8492 )
8493
8494 raise vimconn.VimConnException(message=exp)
8495
8496 def retry_rest(self, method, url, add_headers=None, data=None):
8497 """Method to get Token & retry respective REST request
8498 Args:
8499 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
8500 url - request url to be used
8501 add_headers - Additional headers (optional)
8502 data - Request payload data to be passed in request
8503 Returns:
8504 response - Response of request
8505 """
8506 response = None
8507
8508 # Get token
8509 self.get_token()
8510
8511 if self.client._session:
8512 headers = {
8513 "Accept": "application/*+xml;version=" + API_VERSION,
8514 "x-vcloud-authorization": self.client._session.headers[
8515 "x-vcloud-authorization"
8516 ],
8517 }
8518
8519 if add_headers:
8520 headers.update(add_headers)
8521
8522 if method == "GET":
8523 response = self.perform_request(req_type="GET", url=url, headers=headers)
8524 elif method == "PUT":
8525 response = self.perform_request(
8526 req_type="PUT", url=url, headers=headers, data=data
8527 )
8528 elif method == "POST":
8529 response = self.perform_request(
8530 req_type="POST", url=url, headers=headers, data=data
8531 )
8532 elif method == "DELETE":
8533 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
8534
8535 return response
8536
8537 def get_token(self):
8538 """Generate a new token if expired
8539
8540 Returns:
8541 The return client object that letter can be used to connect to vCloud director as admin for VDC
8542 """
8543 self.client = self.connect()
8544
8545 def get_vdc_details(self):
8546 """Get VDC details using pyVcloud Lib
8547
8548 Returns org and vdc object
8549 """
8550 vdc = None
8551
8552 try:
8553 org = Org(self.client, resource=self.client.get_org())
8554 vdc = org.get_vdc(self.tenant_name)
8555 except Exception as e:
8556 # pyvcloud not giving a specific exception, Refresh nevertheless
8557 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
8558
8559 # Retry once, if failed by refreshing token
8560 if vdc is None:
8561 self.get_token()
8562 org = Org(self.client, resource=self.client.get_org())
8563 vdc = org.get_vdc(self.tenant_name)
8564
8565 return org, vdc
8566
8567 def perform_request(self, req_type, url, headers=None, data=None):
8568 """Perform the POST/PUT/GET/DELETE request."""
8569 # Log REST request details
8570 self.log_request(req_type, url=url, headers=headers, data=data)
8571 # perform request and return its result
8572
8573 if req_type == "GET":
8574 response = requests.get(url=url, headers=headers, verify=False)
8575 elif req_type == "PUT":
8576 response = requests.put(url=url, headers=headers, data=data, verify=False)
8577 elif req_type == "POST":
8578 response = requests.post(url=url, headers=headers, data=data, verify=False)
8579 elif req_type == "DELETE":
8580 response = requests.delete(url=url, headers=headers, verify=False)
8581
8582 # Log the REST response
8583 self.log_response(response)
8584
8585 return response
8586
8587 def log_request(self, req_type, url=None, headers=None, data=None):
8588 """Logs REST request details"""
8589
8590 if req_type is not None:
8591 self.logger.debug("Request type: {}".format(req_type))
8592
8593 if url is not None:
8594 self.logger.debug("Request url: {}".format(url))
8595
8596 if headers is not None:
8597 for header in headers:
8598 self.logger.debug(
8599 "Request header: {}: {}".format(header, headers[header])
8600 )
8601
8602 if data is not None:
8603 self.logger.debug("Request data: {}".format(data))
8604
8605 def log_response(self, response):
8606 """Logs REST response details"""
8607
8608 self.logger.debug("Response status code: {} ".format(response.status_code))
8609
8610 def get_task_from_response(self, content):
8611 """
8612 content - API response.text(response.text)
8613 return task object
8614 """
8615 xmlroot = XmlElementTree.fromstring(content)
8616
8617 if xmlroot.tag.split("}")[1] == "Task":
8618 return xmlroot
8619 else:
8620 for ele in xmlroot:
8621 if ele.tag.split("}")[1] == "Tasks":
8622 task = ele[0]
8623 break
8624
8625 return task
8626
8627 def power_on_vapp(self, vapp_id, vapp_name):
8628 """
8629 vapp_id - vApp uuid
8630 vapp_name - vAapp name
8631 return - Task object
8632 """
8633 headers = {
8634 "Accept": "application/*+xml;version=" + API_VERSION,
8635 "x-vcloud-authorization": self.client._session.headers[
8636 "x-vcloud-authorization"
8637 ],
8638 }
8639
8640 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
8641 self.url, vapp_id
8642 )
8643 response = self.perform_request(
8644 req_type="POST", url=poweron_href, headers=headers
8645 )
8646
8647 if response.status_code != 202:
8648 self.logger.error(
8649 "REST call {} failed reason : {}"
8650 "status code : {} ".format(
8651 poweron_href, response.text, response.status_code
8652 )
8653 )
8654
8655 raise vimconn.VimConnException(
8656 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
8657 )
8658 else:
8659 poweron_task = self.get_task_from_response(response.text)
8660
8661 return poweron_task