Fix black issues
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_vcd_network_list(self):
697 """Method available organization for a logged in tenant
698
699 Returns:
700 The return vca object that letter can be used to connect to vcloud direct as admin
701 """
702
703 self.logger.debug(
704 "get_vcd_network_list(): retrieving network list for vcd {}".format(
705 self.tenant_name
706 )
707 )
708
709 if not self.tenant_name:
710 raise vimconn.VimConnConnectionException("Tenant name is empty.")
711
712 _, vdc = self.get_vdc_details()
713 if vdc is None:
714 raise vimconn.VimConnConnectionException(
715 "Can't retrieve information for a VDC {}".format(self.tenant_name)
716 )
717
718 vdc_uuid = vdc.get("id").split(":")[3]
719 if self.client._session:
720 headers = {
721 "Accept": "application/*+xml;version=" + API_VERSION,
722 "x-vcloud-authorization": self.client._session.headers[
723 "x-vcloud-authorization"
724 ],
725 }
726 response = self.perform_request(
727 req_type="GET", url=vdc.get("href"), headers=headers
728 )
729
730 if response.status_code != 200:
731 self.logger.error("Failed to get vdc content")
732 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
733 else:
734 content = XmlElementTree.fromstring(response.text)
735
736 network_list = []
737 try:
738 for item in content:
739 if item.tag.split("}")[-1] == "AvailableNetworks":
740 for net in item:
741 response = self.perform_request(
742 req_type="GET", url=net.get("href"), headers=headers
743 )
744
745 if response.status_code != 200:
746 self.logger.error("Failed to get network content")
747 raise vimconn.VimConnNotFoundException(
748 "Failed to get network content"
749 )
750 else:
751 net_details = XmlElementTree.fromstring(response.text)
752
753 filter_dict = {}
754 net_uuid = net_details.get("id").split(":")
755
756 if len(net_uuid) != 4:
757 continue
758 else:
759 net_uuid = net_uuid[3]
760 # create dict entry
761 self.logger.debug(
762 "get_vcd_network_list(): Adding network {} "
763 "to a list vcd id {} network {}".format(
764 net_uuid, vdc_uuid, net_details.get("name")
765 )
766 )
767 filter_dict["name"] = net_details.get("name")
768 filter_dict["id"] = net_uuid
769
770 if [
771 i.text
772 for i in net_details
773 if i.tag.split("}")[-1] == "IsShared"
774 ][0] == "true":
775 shared = True
776 else:
777 shared = False
778
779 filter_dict["shared"] = shared
780 filter_dict["tenant_id"] = vdc_uuid
781
782 if int(net_details.get("status")) == 1:
783 filter_dict["admin_state_up"] = True
784 else:
785 filter_dict["admin_state_up"] = False
786
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 network_list.append(filter_dict)
790 self.logger.debug(
791 "get_vcd_network_list adding entry {}".format(
792 filter_dict
793 )
794 )
795 except Exception:
796 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
797 pass
798
799 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
800
801 return network_list
802
803 def get_network_list(self, filter_dict={}):
804 """Obtain tenant networks of VIM
805 Filter_dict can be:
806 name: network name OR/AND
807 id: network uuid OR/AND
808 shared: boolean OR/AND
809 tenant_id: tenant OR/AND
810 admin_state_up: boolean
811 status: 'ACTIVE'
812
813 [{key : value , key : value}]
814
815 Returns the network list of dictionaries:
816 [{<the fields at Filter_dict plus some VIM specific>}, ...]
817 List can be empty
818 """
819
820 self.logger.debug(
821 "get_network_list(): retrieving network list for vcd {}".format(
822 self.tenant_name
823 )
824 )
825
826 if not self.tenant_name:
827 raise vimconn.VimConnConnectionException("Tenant name is empty.")
828
829 _, vdc = self.get_vdc_details()
830 if vdc is None:
831 raise vimconn.VimConnConnectionException(
832 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
833 )
834
835 try:
836 vdcid = vdc.get("id").split(":")[3]
837
838 if self.client._session:
839 headers = {
840 "Accept": "application/*+xml;version=" + API_VERSION,
841 "x-vcloud-authorization": self.client._session.headers[
842 "x-vcloud-authorization"
843 ],
844 }
845 response = self.perform_request(
846 req_type="GET", url=vdc.get("href"), headers=headers
847 )
848
849 if response.status_code != 200:
850 self.logger.error("Failed to get vdc content")
851 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
852 else:
853 content = XmlElementTree.fromstring(response.text)
854
855 network_list = []
856 for item in content:
857 if item.tag.split("}")[-1] == "AvailableNetworks":
858 for net in item:
859 response = self.perform_request(
860 req_type="GET", url=net.get("href"), headers=headers
861 )
862
863 if response.status_code != 200:
864 self.logger.error("Failed to get network content")
865 raise vimconn.VimConnNotFoundException(
866 "Failed to get network content"
867 )
868 else:
869 net_details = XmlElementTree.fromstring(response.text)
870
871 filter_entry = {}
872 net_uuid = net_details.get("id").split(":")
873
874 if len(net_uuid) != 4:
875 continue
876 else:
877 net_uuid = net_uuid[3]
878 # create dict entry
879 self.logger.debug(
880 "get_network_list(): Adding net {}"
881 " to a list vcd id {} network {}".format(
882 net_uuid, vdcid, net_details.get("name")
883 )
884 )
885 filter_entry["name"] = net_details.get("name")
886 filter_entry["id"] = net_uuid
887
888 if [
889 i.text
890 for i in net_details
891 if i.tag.split("}")[-1] == "IsShared"
892 ][0] == "true":
893 shared = True
894 else:
895 shared = False
896
897 filter_entry["shared"] = shared
898 filter_entry["tenant_id"] = vdcid
899
900 if int(net_details.get("status")) == 1:
901 filter_entry["admin_state_up"] = True
902 else:
903 filter_entry["admin_state_up"] = False
904
905 filter_entry["status"] = "ACTIVE"
906 filter_entry["type"] = "bridge"
907 filtered_entry = filter_entry.copy()
908
909 if filter_dict is not None and filter_dict:
910 # we remove all the key : value we don't care and match only
911 # respected field
912 filtered_dict = set(filter_entry.keys()) - set(
913 filter_dict
914 )
915
916 for unwanted_key in filtered_dict:
917 del filter_entry[unwanted_key]
918
919 if filter_dict == filter_entry:
920 network_list.append(filtered_entry)
921 else:
922 network_list.append(filtered_entry)
923 except Exception as e:
924 self.logger.debug("Error in get_network_list", exc_info=True)
925
926 if isinstance(e, vimconn.VimConnException):
927 raise
928 else:
929 raise vimconn.VimConnNotFoundException(
930 "Failed : Networks list not found {} ".format(e)
931 )
932
933 self.logger.debug("Returning {}".format(network_list))
934
935 return network_list
936
937 def get_network(self, net_id):
938 """Method obtains network details of net_id VIM network
939 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]
940 """
941 try:
942 _, vdc = self.get_vdc_details()
943 vdc_id = vdc.get("id").split(":")[3]
944
945 if self.client._session:
946 headers = {
947 "Accept": "application/*+xml;version=" + API_VERSION,
948 "x-vcloud-authorization": self.client._session.headers[
949 "x-vcloud-authorization"
950 ],
951 }
952 response = self.perform_request(
953 req_type="GET", url=vdc.get("href"), headers=headers
954 )
955
956 if response.status_code != 200:
957 self.logger.error("Failed to get vdc content")
958 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
959 else:
960 content = XmlElementTree.fromstring(response.text)
961
962 filter_dict = {}
963
964 for item in content:
965 if item.tag.split("}")[-1] == "AvailableNetworks":
966 for net in item:
967 response = self.perform_request(
968 req_type="GET", url=net.get("href"), headers=headers
969 )
970
971 if response.status_code != 200:
972 self.logger.error("Failed to get network content")
973 raise vimconn.VimConnNotFoundException(
974 "Failed to get network content"
975 )
976 else:
977 net_details = XmlElementTree.fromstring(response.text)
978
979 vdc_network_id = net_details.get("id").split(":")
980 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
981 filter_dict["name"] = net_details.get("name")
982 filter_dict["id"] = vdc_network_id[3]
983
984 if [
985 i.text
986 for i in net_details
987 if i.tag.split("}")[-1] == "IsShared"
988 ][0] == "true":
989 shared = True
990 else:
991 shared = False
992
993 filter_dict["shared"] = shared
994 filter_dict["tenant_id"] = vdc_id
995
996 if int(net_details.get("status")) == 1:
997 filter_dict["admin_state_up"] = True
998 else:
999 filter_dict["admin_state_up"] = False
1000
1001 filter_dict["status"] = "ACTIVE"
1002 filter_dict["type"] = "bridge"
1003 self.logger.debug("Returning {}".format(filter_dict))
1004
1005 return filter_dict
1006 else:
1007 raise vimconn.VimConnNotFoundException(
1008 "Network {} not found".format(net_id)
1009 )
1010 except Exception as e:
1011 self.logger.debug("Error in get_network")
1012 self.logger.debug(traceback.format_exc())
1013
1014 if isinstance(e, vimconn.VimConnException):
1015 raise
1016 else:
1017 raise vimconn.VimConnNotFoundException(
1018 "Failed : Network not found {} ".format(e)
1019 )
1020
1021 return filter_dict
1022
1023 def delete_network(self, net_id, created_items=None):
1024 """
1025 Removes a tenant network from VIM and its associated elements
1026 :param net_id: VIM identifier of the network, provided by method new_network
1027 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1028 Returns the network identifier or raises an exception upon error or when network is not found
1029 """
1030
1031 # ############# Stub code for SRIOV #################
1032 # dvport_group = self.get_dvport_group(net_id)
1033 # if dvport_group:
1034 # #delete portgroup
1035 # status = self.destroy_dvport_group(net_id)
1036 # if status:
1037 # # Remove vlanID from persistent info
1038 # if net_id in self.persistent_info["used_vlanIDs"]:
1039 # del self.persistent_info["used_vlanIDs"][net_id]
1040 #
1041 # return net_id
1042
1043 vcd_network = self.get_vcd_network(network_uuid=net_id)
1044 if vcd_network is not None and vcd_network:
1045 if self.delete_network_action(network_uuid=net_id):
1046 return net_id
1047 else:
1048 raise vimconn.VimConnNotFoundException(
1049 "Network {} not found".format(net_id)
1050 )
1051
1052 def refresh_nets_status(self, net_list):
1053 """Get the status of the networks
1054 Params: the list of network identifiers
1055 Returns a dictionary with:
1056 net_id: #VIM id of this network
1057 status: #Mandatory. Text with one of:
1058 # DELETED (not found at vim)
1059 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1060 # OTHER (Vim reported other status not understood)
1061 # ERROR (VIM indicates an ERROR status)
1062 # ACTIVE, INACTIVE, DOWN (admin down),
1063 # BUILD (on building process)
1064 #
1065 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1066 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1067
1068 """
1069 dict_entry = {}
1070 try:
1071 for net in net_list:
1072 errormsg = ""
1073 vcd_network = self.get_vcd_network(network_uuid=net)
1074 if vcd_network is not None and vcd_network:
1075 if vcd_network["status"] == "1":
1076 status = "ACTIVE"
1077 else:
1078 status = "DOWN"
1079 else:
1080 status = "DELETED"
1081 errormsg = "Network not found."
1082
1083 dict_entry[net] = {
1084 "status": status,
1085 "error_msg": errormsg,
1086 "vim_info": yaml.safe_dump(vcd_network),
1087 }
1088 except Exception:
1089 self.logger.debug("Error in refresh_nets_status")
1090 self.logger.debug(traceback.format_exc())
1091
1092 return dict_entry
1093
1094 def get_flavor(self, flavor_id):
1095 """Obtain flavor details from the VIM
1096 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
1097 """
1098 if flavor_id not in vimconnector.flavorlist:
1099 raise vimconn.VimConnNotFoundException("Flavor not found.")
1100
1101 return vimconnector.flavorlist[flavor_id]
1102
1103 def new_flavor(self, flavor_data):
1104 """Adds a tenant flavor to VIM
1105 flavor_data contains a dictionary with information, keys:
1106 name: flavor name
1107 ram: memory (cloud type) in MBytes
1108 vpcus: cpus (cloud type)
1109 extended: EPA parameters
1110 - numas: #items requested in same NUMA
1111 memory: number of 1G huge pages memory
1112 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
1113 threads
1114 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
1115 - name: interface name
1116 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
1117 bandwidth: X Gbps; requested guarantee bandwidth
1118 vpci: requested virtual PCI address
1119 disk: disk size
1120 is_public:
1121 #TODO to concrete
1122 Returns the flavor identifier"""
1123
1124 # generate a new uuid put to internal dict and return it.
1125 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1126 new_flavor = flavor_data
1127 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1128 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1129 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1130
1131 if not isinstance(ram, int):
1132 raise vimconn.VimConnException("Non-integer value for ram")
1133 elif not isinstance(cpu, int):
1134 raise vimconn.VimConnException("Non-integer value for cpu")
1135 elif not isinstance(disk, int):
1136 raise vimconn.VimConnException("Non-integer value for disk")
1137
1138 extended_flv = flavor_data.get("extended")
1139 if extended_flv:
1140 numas = extended_flv.get("numas")
1141 if numas:
1142 for numa in numas:
1143 # overwrite ram and vcpus
1144 if "memory" in numa:
1145 ram = numa["memory"] * 1024
1146
1147 if "paired-threads" in numa:
1148 cpu = numa["paired-threads"] * 2
1149 elif "cores" in numa:
1150 cpu = numa["cores"]
1151 elif "threads" in numa:
1152 cpu = numa["threads"]
1153
1154 new_flavor[FLAVOR_RAM_KEY] = ram
1155 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1156 new_flavor[FLAVOR_DISK_KEY] = disk
1157 # generate a new uuid put to internal dict and return it.
1158 flavor_id = uuid.uuid4()
1159 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1160 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1161
1162 return str(flavor_id)
1163
1164 def delete_flavor(self, flavor_id):
1165 """Deletes a tenant flavor from VIM identify by its id
1166
1167 Returns the used id or raise an exception
1168 """
1169 if flavor_id not in vimconnector.flavorlist:
1170 raise vimconn.VimConnNotFoundException("Flavor not found.")
1171
1172 vimconnector.flavorlist.pop(flavor_id, None)
1173
1174 return flavor_id
1175
1176 def new_image(self, image_dict):
1177 """
1178 Adds a tenant image to VIM
1179 Returns:
1180 200, image-id if the image is created
1181 <0, message if there is an error
1182 """
1183 return self.get_image_id_from_path(image_dict["location"])
1184
1185 def delete_image(self, image_id):
1186 """
1187 Deletes a tenant image from VIM
1188 Args:
1189 image_id is ID of Image to be deleted
1190 Return:
1191 returns the image identifier in UUID format or raises an exception on error
1192 """
1193 conn = self.connect_as_admin()
1194
1195 if not conn:
1196 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1197
1198 # Get Catalog details
1199 url_list = [self.url, "/api/catalog/", image_id]
1200 catalog_herf = "".join(url_list)
1201
1202 headers = {
1203 "Accept": "application/*+xml;version=" + API_VERSION,
1204 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1205 }
1206
1207 response = self.perform_request(
1208 req_type="GET", url=catalog_herf, headers=headers
1209 )
1210
1211 if response.status_code != requests.codes.ok:
1212 self.logger.debug(
1213 "delete_image():GET REST API call {} failed. "
1214 "Return status code {}".format(catalog_herf, response.status_code)
1215 )
1216
1217 raise vimconn.VimConnNotFoundException(
1218 "Fail to get image {}".format(image_id)
1219 )
1220
1221 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1222 namespaces = {
1223 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1224 }
1225 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1226
1227 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1228 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1229
1230 for catalogItem in catalogItems:
1231 catalogItem_href = catalogItem.attrib["href"]
1232
1233 response = self.perform_request(
1234 req_type="GET", url=catalogItem_href, headers=headers
1235 )
1236
1237 if response.status_code != requests.codes.ok:
1238 self.logger.debug(
1239 "delete_image():GET REST API call {} failed. "
1240 "Return status code {}".format(catalog_herf, response.status_code)
1241 )
1242 raise vimconn.VimConnNotFoundException(
1243 "Fail to get catalogItem {} for catalog {}".format(
1244 catalogItem, image_id
1245 )
1246 )
1247
1248 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1249 namespaces = {
1250 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1251 }
1252 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1253 catalogitem_remove_href = lxmlroot_respond.find(
1254 "xmlns:Link[@rel='remove']", namespaces
1255 ).attrib["href"]
1256
1257 # Remove catalogItem
1258 response = self.perform_request(
1259 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1260 )
1261
1262 if response.status_code == requests.codes.no_content:
1263 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1264 else:
1265 raise vimconn.VimConnException(
1266 "Fail to delete Catalog Item {}".format(catalogItem)
1267 )
1268
1269 # Remove catalog
1270 url_list = [self.url, "/api/admin/catalog/", image_id]
1271 catalog_remove_herf = "".join(url_list)
1272 response = self.perform_request(
1273 req_type="DELETE", url=catalog_remove_herf, headers=headers
1274 )
1275
1276 if response.status_code == requests.codes.no_content:
1277 self.logger.debug("Deleted Catalog {}".format(image_id))
1278
1279 return image_id
1280 else:
1281 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1282
1283 def catalog_exists(self, catalog_name, catalogs):
1284 """
1285
1286 :param catalog_name:
1287 :param catalogs:
1288 :return:
1289 """
1290 for catalog in catalogs:
1291 if catalog["name"] == catalog_name:
1292 return catalog["id"]
1293
1294 def create_vimcatalog(self, vca=None, catalog_name=None):
1295 """Create new catalog entry in vCloud director.
1296
1297 Args
1298 vca: vCloud director.
1299 catalog_name catalog that client wish to create. Note no validation done for a name.
1300 Client must make sure that provide valid string representation.
1301
1302 Returns catalog id if catalog created else None.
1303
1304 """
1305 try:
1306 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1307
1308 if lxml_catalog_element:
1309 id_attr_value = lxml_catalog_element.get("id")
1310 return id_attr_value.split(":")[-1]
1311
1312 catalogs = vca.list_catalogs()
1313 except Exception as ex:
1314 self.logger.error(
1315 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1316 catalog_name, ex
1317 )
1318 )
1319 raise
1320 return self.catalog_exists(catalog_name, catalogs)
1321
1322 # noinspection PyIncorrectDocstring
1323 def upload_ovf(
1324 self,
1325 vca=None,
1326 catalog_name=None,
1327 image_name=None,
1328 media_file_name=None,
1329 description="",
1330 progress=False,
1331 chunk_bytes=128 * 1024,
1332 ):
1333 """
1334 Uploads a OVF file to a vCloud catalog
1335
1336 :param chunk_bytes:
1337 :param progress:
1338 :param description:
1339 :param image_name:
1340 :param vca:
1341 :param catalog_name: (str): The name of the catalog to upload the media.
1342 :param media_file_name: (str): The name of the local media file to upload.
1343 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1344 """
1345 os.path.isfile(media_file_name)
1346 statinfo = os.stat(media_file_name)
1347
1348 # find a catalog entry where we upload OVF.
1349 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1350 # status change.
1351 # if VCD can parse OVF we upload VMDK file
1352 try:
1353 for catalog in vca.list_catalogs():
1354 if catalog_name != catalog["name"]:
1355 continue
1356 catalog_href = "{}/api/catalog/{}/action/upload".format(
1357 self.url, catalog["id"]
1358 )
1359 data = """
1360 <UploadVAppTemplateParams name="{}"
1361 xmlns="http://www.vmware.com/vcloud/v1.5"
1362 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1363 <Description>{} vApp Template</Description>
1364 </UploadVAppTemplateParams>
1365 """.format(
1366 catalog_name, description
1367 )
1368
1369 if self.client:
1370 headers = {
1371 "Accept": "application/*+xml;version=" + API_VERSION,
1372 "x-vcloud-authorization": self.client._session.headers[
1373 "x-vcloud-authorization"
1374 ],
1375 }
1376 headers[
1377 "Content-Type"
1378 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1379
1380 response = self.perform_request(
1381 req_type="POST", url=catalog_href, headers=headers, data=data
1382 )
1383
1384 if response.status_code == requests.codes.created:
1385 catalogItem = XmlElementTree.fromstring(response.text)
1386 entity = [
1387 child
1388 for child in catalogItem
1389 if child.get("type")
1390 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1391 ][0]
1392 href = entity.get("href")
1393 template = href
1394
1395 response = self.perform_request(
1396 req_type="GET", url=href, headers=headers
1397 )
1398
1399 if response.status_code == requests.codes.ok:
1400 headers["Content-Type"] = "Content-Type text/xml"
1401 result = re.search(
1402 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1403 response.text,
1404 )
1405
1406 if result:
1407 transfer_href = result.group(1)
1408
1409 response = self.perform_request(
1410 req_type="PUT",
1411 url=transfer_href,
1412 headers=headers,
1413 data=open(media_file_name, "rb"),
1414 )
1415
1416 if response.status_code != requests.codes.ok:
1417 self.logger.debug(
1418 "Failed create vApp template for catalog name {} and image {}".format(
1419 catalog_name, media_file_name
1420 )
1421 )
1422 return False
1423
1424 # TODO fix this with aync block
1425 time.sleep(5)
1426
1427 self.logger.debug(
1428 "vApp template for catalog name {} and image {}".format(
1429 catalog_name, media_file_name
1430 )
1431 )
1432
1433 # uploading VMDK file
1434 # check status of OVF upload and upload remaining files.
1435 response = self.perform_request(
1436 req_type="GET", url=template, headers=headers
1437 )
1438
1439 if response.status_code == requests.codes.ok:
1440 result = re.search(
1441 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1442 )
1443
1444 if result:
1445 link_href = result.group(1)
1446
1447 # we skip ovf since it already uploaded.
1448 if "ovf" in link_href:
1449 continue
1450
1451 # The OVF file and VMDK must be in a same directory
1452 head, _ = os.path.split(media_file_name)
1453 file_vmdk = head + "/" + link_href.split("/")[-1]
1454
1455 if not os.path.isfile(file_vmdk):
1456 return False
1457
1458 statinfo = os.stat(file_vmdk)
1459 if statinfo.st_size == 0:
1460 return False
1461
1462 hrefvmdk = link_href
1463
1464 if progress:
1465 widgets = [
1466 "Uploading file: ",
1467 Percentage(),
1468 " ",
1469 Bar(),
1470 " ",
1471 ETA(),
1472 " ",
1473 FileTransferSpeed(),
1474 ]
1475 progress_bar = ProgressBar(
1476 widgets=widgets, maxval=statinfo.st_size
1477 ).start()
1478
1479 bytes_transferred = 0
1480 f = open(file_vmdk, "rb")
1481
1482 while bytes_transferred < statinfo.st_size:
1483 my_bytes = f.read(chunk_bytes)
1484 if len(my_bytes) <= chunk_bytes:
1485 headers["Content-Range"] = "bytes {}-{}/{}".format(
1486 bytes_transferred,
1487 len(my_bytes) - 1,
1488 statinfo.st_size,
1489 )
1490 headers["Content-Length"] = str(len(my_bytes))
1491 response = requests.put(
1492 url=hrefvmdk,
1493 headers=headers,
1494 data=my_bytes,
1495 verify=False,
1496 )
1497
1498 if response.status_code == requests.codes.ok:
1499 bytes_transferred += len(my_bytes)
1500 if progress:
1501 progress_bar.update(bytes_transferred)
1502 else:
1503 self.logger.debug(
1504 "file upload failed with error: [{}] {}".format(
1505 response.status_code, response.text
1506 )
1507 )
1508
1509 f.close()
1510
1511 return False
1512
1513 f.close()
1514 if progress:
1515 progress_bar.finish()
1516 time.sleep(10)
1517
1518 return True
1519 else:
1520 self.logger.debug(
1521 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1522 catalog_name, media_file_name
1523 )
1524 )
1525 return False
1526 except Exception as exp:
1527 self.logger.debug(
1528 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1529 catalog_name, media_file_name, exp
1530 )
1531 )
1532
1533 raise vimconn.VimConnException(
1534 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1535 catalog_name, media_file_name, exp
1536 )
1537 )
1538
1539 self.logger.debug(
1540 "Failed retrieve catalog name {} for OVF file {}".format(
1541 catalog_name, media_file_name
1542 )
1543 )
1544
1545 return False
1546
1547 def upload_vimimage(
1548 self,
1549 vca=None,
1550 catalog_name=None,
1551 media_name=None,
1552 medial_file_name=None,
1553 progress=False,
1554 ):
1555 """Upload media file"""
1556 # TODO add named parameters for readability
1557 return self.upload_ovf(
1558 vca=vca,
1559 catalog_name=catalog_name,
1560 image_name=media_name.split(".")[0],
1561 media_file_name=medial_file_name,
1562 description="medial_file_name",
1563 progress=progress,
1564 )
1565
1566 def validate_uuid4(self, uuid_string=None):
1567 """Method validate correct format of UUID.
1568
1569 Return: true if string represent valid uuid
1570 """
1571 try:
1572 uuid.UUID(uuid_string, version=4)
1573 except ValueError:
1574 return False
1575
1576 return True
1577
1578 def get_catalogid(self, catalog_name=None, catalogs=None):
1579 """Method check catalog and return catalog ID in UUID format.
1580
1581 Args
1582 catalog_name: catalog name as string
1583 catalogs: list of catalogs.
1584
1585 Return: catalogs uuid
1586 """
1587 for catalog in catalogs:
1588 if catalog["name"] == catalog_name:
1589 catalog_id = catalog["id"]
1590 return catalog_id
1591
1592 return None
1593
1594 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1595 """Method check catalog and return catalog name lookup done by catalog UUID.
1596
1597 Args
1598 catalog_name: catalog name as string
1599 catalogs: list of catalogs.
1600
1601 Return: catalogs name or None
1602 """
1603 if not self.validate_uuid4(uuid_string=catalog_uuid):
1604 return None
1605
1606 for catalog in catalogs:
1607 catalog_id = catalog.get("id")
1608
1609 if catalog_id == catalog_uuid:
1610 return catalog.get("name")
1611
1612 return None
1613
1614 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1615 """Method check catalog and return catalog name lookup done by catalog UUID.
1616
1617 Args
1618 catalog_name: catalog name as string
1619 catalogs: list of catalogs.
1620
1621 Return: catalogs name or None
1622 """
1623 if not self.validate_uuid4(uuid_string=catalog_uuid):
1624 return None
1625
1626 for catalog in catalogs:
1627 catalog_id = catalog.get("id")
1628
1629 if catalog_id == catalog_uuid:
1630 return catalog
1631
1632 return None
1633
1634 def get_image_id_from_path(self, path=None, progress=False):
1635 """Method upload OVF image to vCloud director.
1636
1637 Each OVF image represented as single catalog entry in vcloud director.
1638 The method check for existing catalog entry. The check done by file name without file extension.
1639
1640 if given catalog name already present method will respond with existing catalog uuid otherwise
1641 it will create new catalog entry and upload OVF file to newly created catalog.
1642
1643 If method can't create catalog entry or upload a file it will throw exception.
1644
1645 Method accept boolean flag progress that will output progress bar. It useful method
1646 for standalone upload use case. In case to test large file upload.
1647
1648 Args
1649 path: - valid path to OVF file.
1650 progress - boolean progress bar show progress bar.
1651
1652 Return: if image uploaded correct method will provide image catalog UUID.
1653 """
1654 if not path:
1655 raise vimconn.VimConnException("Image path can't be None.")
1656
1657 if not os.path.isfile(path):
1658 raise vimconn.VimConnException("Can't read file. File not found.")
1659
1660 if not os.access(path, os.R_OK):
1661 raise vimconn.VimConnException(
1662 "Can't read file. Check file permission to read."
1663 )
1664
1665 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1666
1667 _, filename = os.path.split(path)
1668 _, file_extension = os.path.splitext(path)
1669 if file_extension != ".ovf":
1670 self.logger.debug(
1671 "Wrong file extension {} connector support only OVF container.".format(
1672 file_extension
1673 )
1674 )
1675
1676 raise vimconn.VimConnException(
1677 "Wrong container. vCloud director supports only OVF."
1678 )
1679
1680 catalog_name = os.path.splitext(filename)[0]
1681 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1682 self.logger.debug(
1683 "File name {} Catalog Name {} file path {} "
1684 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1685 )
1686
1687 try:
1688 org, _ = self.get_vdc_details()
1689 catalogs = org.list_catalogs()
1690 except Exception as exp:
1691 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1692
1693 raise vimconn.VimConnException(
1694 "Failed get catalogs() with Exception {} ".format(exp)
1695 )
1696
1697 if len(catalogs) == 0:
1698 self.logger.info(
1699 "Creating a new catalog entry {} in vcloud director".format(
1700 catalog_name
1701 )
1702 )
1703
1704 if self.create_vimcatalog(org, catalog_md5_name) is None:
1705 raise vimconn.VimConnException(
1706 "Failed create new catalog {} ".format(catalog_md5_name)
1707 )
1708
1709 result = self.upload_vimimage(
1710 vca=org,
1711 catalog_name=catalog_md5_name,
1712 media_name=filename,
1713 medial_file_name=path,
1714 progress=progress,
1715 )
1716
1717 if not result:
1718 raise vimconn.VimConnException(
1719 "Failed create vApp template for catalog {} ".format(catalog_name)
1720 )
1721
1722 return self.get_catalogid(catalog_name, catalogs)
1723 else:
1724 for catalog in catalogs:
1725 # search for existing catalog if we find same name we return ID
1726 # TODO optimize this
1727 if catalog["name"] == catalog_md5_name:
1728 self.logger.debug(
1729 "Found existing catalog entry for {} "
1730 "catalog id {}".format(
1731 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1732 )
1733 )
1734
1735 return self.get_catalogid(catalog_md5_name, catalogs)
1736
1737 # if we didn't find existing catalog we create a new one and upload image.
1738 self.logger.debug(
1739 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1740 )
1741 if self.create_vimcatalog(org, catalog_md5_name) is None:
1742 raise vimconn.VimConnException(
1743 "Failed create new catalog {} ".format(catalog_md5_name)
1744 )
1745
1746 result = self.upload_vimimage(
1747 vca=org,
1748 catalog_name=catalog_md5_name,
1749 media_name=filename,
1750 medial_file_name=path,
1751 progress=progress,
1752 )
1753 if not result:
1754 raise vimconn.VimConnException(
1755 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1756 )
1757
1758 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1759
1760 def get_image_list(self, filter_dict={}):
1761 """Obtain tenant images from VIM
1762 Filter_dict can be:
1763 name: image name
1764 id: image uuid
1765 checksum: image checksum
1766 location: image path
1767 Returns the image list of dictionaries:
1768 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1769 List can be empty
1770 """
1771 try:
1772 org, _ = self.get_vdc_details()
1773 image_list = []
1774 catalogs = org.list_catalogs()
1775
1776 if len(catalogs) == 0:
1777 return image_list
1778 else:
1779 for catalog in catalogs:
1780 catalog_uuid = catalog.get("id")
1781 name = catalog.get("name")
1782 filtered_dict = {}
1783
1784 if filter_dict.get("name") and filter_dict["name"] != name:
1785 continue
1786
1787 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1788 continue
1789
1790 filtered_dict["name"] = name
1791 filtered_dict["id"] = catalog_uuid
1792 image_list.append(filtered_dict)
1793
1794 self.logger.debug(
1795 "List of already created catalog items: {}".format(image_list)
1796 )
1797
1798 return image_list
1799 except Exception as exp:
1800 raise vimconn.VimConnException(
1801 "Exception occured while retriving catalog items {}".format(exp)
1802 )
1803
1804 def get_vappid(self, vdc=None, vapp_name=None):
1805 """Method takes vdc object and vApp name and returns vapp uuid or None
1806
1807 Args:
1808 vdc: The VDC object.
1809 vapp_name: is application vappp name identifier
1810
1811 Returns:
1812 The return vApp name otherwise None
1813 """
1814 if vdc is None or vapp_name is None:
1815 return None
1816
1817 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1818 try:
1819 refs = [
1820 ref
1821 for ref in vdc.ResourceEntities.ResourceEntity
1822 if ref.name == vapp_name
1823 and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1824 ]
1825
1826 if len(refs) == 1:
1827 return refs[0].href.split("vapp")[1][1:]
1828 except Exception as e:
1829 self.logger.exception(e)
1830 return False
1831
1832 return None
1833
1834 def check_vapp(self, vdc=None, vapp_uuid=None):
1835 """Method Method returns True or False if vapp deployed in vCloud director
1836
1837 Args:
1838 vca: Connector to VCA
1839 vdc: The VDC object.
1840 vappid: vappid is application identifier
1841
1842 Returns:
1843 The return True if vApp deployed
1844 :param vdc:
1845 :param vapp_uuid:
1846 """
1847 try:
1848 refs = [
1849 ref
1850 for ref in vdc.ResourceEntities.ResourceEntity
1851 if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1852 ]
1853
1854 for ref in refs:
1855 vappid = ref.href.split("vapp")[1][1:]
1856 # find vapp with respected vapp uuid
1857
1858 if vappid == vapp_uuid:
1859 return True
1860 except Exception as e:
1861 self.logger.exception(e)
1862
1863 return False
1864
1865 return False
1866
1867 def get_namebyvappid(self, vapp_uuid=None):
1868 """Method returns vApp name from vCD and lookup done by vapp_id.
1869
1870 Args:
1871 vapp_uuid: vappid is application identifier
1872
1873 Returns:
1874 The return vApp name otherwise None
1875 """
1876 try:
1877 if self.client and vapp_uuid:
1878 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1879 headers = {
1880 "Accept": "application/*+xml;version=" + API_VERSION,
1881 "x-vcloud-authorization": self.client._session.headers[
1882 "x-vcloud-authorization"
1883 ],
1884 }
1885
1886 response = self.perform_request(
1887 req_type="GET", url=vapp_call, headers=headers
1888 )
1889
1890 # Retry login if session expired & retry sending request
1891 if response.status_code == 403:
1892 response = self.retry_rest("GET", vapp_call)
1893
1894 tree = XmlElementTree.fromstring(response.text)
1895
1896 return tree.attrib["name"] if "name" in tree.attrib else None
1897 except Exception as e:
1898 self.logger.exception(e)
1899
1900 return None
1901
1902 return None
1903
1904 def new_vminstance(
1905 self,
1906 name=None,
1907 description="",
1908 start=False,
1909 image_id=None,
1910 flavor_id=None,
1911 affinity_group_list=[],
1912 net_list=[],
1913 cloud_config=None,
1914 disk_list=None,
1915 availability_zone_index=None,
1916 availability_zone_list=None,
1917 ):
1918 """Adds a VM instance to VIM
1919 Params:
1920 'start': (boolean) indicates if VM must start or created in pause mode.
1921 'image_id','flavor_id': image and flavor VIM id to use for the VM
1922 'net_list': list of interfaces, each one is a dictionary with:
1923 'name': (optional) name for the interface.
1924 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1925 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1926 capabilities
1927 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1928 'mac_address': (optional) mac address to assign to this interface
1929 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1930 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1931 for tagging VF
1932 'type': (mandatory) can be one of:
1933 'virtual', in this case always connected to a network of type 'net_type=bridge'
1934 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1935 data/ptp network or it can created unconnected
1936 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1937 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1938 are allocated on the same physical NIC
1939 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1940 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1941 or True, it must apply the default VIM behaviour
1942 After execution the method will add the key:
1943 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1944 interface. 'net_list' is modified
1945 'cloud_config': (optional) dictionary with:
1946 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1947 'users': (optional) list of users to be inserted, each item is a dict with:
1948 'name': (mandatory) user name,
1949 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1950 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1951 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1952 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1953 'dest': (mandatory) string with the destination absolute path
1954 'encoding': (optional, by default text). Can be one of:
1955 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1956 'content' (mandatory): string with the content of the file
1957 'permissions': (optional) string with file permissions, typically octal notation '0644'
1958 'owner': (optional) file owner, string with the format 'owner:group'
1959 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1960 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1961 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1962 'size': (mandatory) string with the size of the disk in GB
1963 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1964 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1965 availability_zone_index is None
1966 Returns a tuple with the instance identifier and created_items or raises an exception on error
1967 created_items can be None or a dictionary where this method can include key-values that will be passed to
1968 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1969 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1970 as not present.
1971 """
1972 self.logger.info("Creating new instance for entry {}".format(name))
1973 self.logger.debug(
1974 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1975 "availability_zone_index {} availability_zone_list {}".format(
1976 description,
1977 start,
1978 image_id,
1979 flavor_id,
1980 net_list,
1981 cloud_config,
1982 disk_list,
1983 availability_zone_index,
1984 availability_zone_list,
1985 )
1986 )
1987
1988 # new vm name = vmname + tenant_id + uuid
1989 new_vm_name = [name, "-", str(uuid.uuid4())]
1990 vmname_andid = "".join(new_vm_name)
1991
1992 for net in net_list:
1993 if net["type"] == "PCI-PASSTHROUGH":
1994 raise vimconn.VimConnNotSupportedException(
1995 "Current vCD version does not support type : {}".format(net["type"])
1996 )
1997
1998 if len(net_list) > 10:
1999 raise vimconn.VimConnNotSupportedException(
2000 "The VM hardware versions 7 and above support upto 10 NICs only"
2001 )
2002
2003 # if vm already deployed we return existing uuid
2004 # we check for presence of VDC, Catalog entry and Flavor.
2005 org, vdc = self.get_vdc_details()
2006 if vdc is None:
2007 raise vimconn.VimConnNotFoundException(
2008 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
2009 name
2010 )
2011 )
2012
2013 catalogs = org.list_catalogs()
2014 if catalogs is None:
2015 # Retry once, if failed by refreshing token
2016 self.get_token()
2017 org = Org(self.client, resource=self.client.get_org())
2018 catalogs = org.list_catalogs()
2019
2020 if catalogs is None:
2021 raise vimconn.VimConnNotFoundException(
2022 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
2023 name
2024 )
2025 )
2026
2027 catalog_hash_name = self.get_catalogbyid(
2028 catalog_uuid=image_id, catalogs=catalogs
2029 )
2030 if catalog_hash_name:
2031 self.logger.info(
2032 "Found catalog entry {} for image id {}".format(
2033 catalog_hash_name, image_id
2034 )
2035 )
2036 else:
2037 raise vimconn.VimConnNotFoundException(
2038 "new_vminstance(): Failed create vApp {}: "
2039 "(Failed retrieve catalog information {})".format(name, image_id)
2040 )
2041
2042 # Set vCPU and Memory based on flavor.
2043 vm_cpus = None
2044 vm_memory = None
2045 vm_disk = None
2046 numas = None
2047
2048 if flavor_id is not None:
2049 if flavor_id not in vimconnector.flavorlist:
2050 raise vimconn.VimConnNotFoundException(
2051 "new_vminstance(): Failed create vApp {}: "
2052 "Failed retrieve flavor information "
2053 "flavor id {}".format(name, flavor_id)
2054 )
2055 else:
2056 try:
2057 flavor = vimconnector.flavorlist[flavor_id]
2058 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
2059 vm_memory = flavor[FLAVOR_RAM_KEY]
2060 vm_disk = flavor[FLAVOR_DISK_KEY]
2061 extended = flavor.get("extended", None)
2062
2063 if extended:
2064 numas = extended.get("numas", None)
2065 except Exception as exp:
2066 raise vimconn.VimConnException(
2067 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
2068 )
2069
2070 # image upload creates template name as catalog name space Template.
2071 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
2072 # power_on = 'false'
2073 # if start:
2074 # power_on = 'true'
2075
2076 # client must provide at least one entry in net_list if not we report error
2077 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
2078 # If no mgmt, then the 1st NN in netlist is considered as primary net.
2079 primary_net = None
2080 primary_netname = None
2081 primary_net_href = None
2082 # network_mode = 'bridged'
2083 if net_list is not None and len(net_list) > 0:
2084 for net in net_list:
2085 if "use" in net and net["use"] == "mgmt" and not primary_net:
2086 primary_net = net
2087
2088 if primary_net is None:
2089 primary_net = net_list[0]
2090
2091 try:
2092 primary_net_id = primary_net["net_id"]
2093 url_list = [self.url, "/api/network/", primary_net_id]
2094 primary_net_href = "".join(url_list)
2095 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
2096
2097 if "name" in network_dict:
2098 primary_netname = network_dict["name"]
2099 except KeyError:
2100 raise vimconn.VimConnException(
2101 "Corrupted flavor. {}".format(primary_net)
2102 )
2103 else:
2104 raise vimconn.VimConnUnexpectedResponse(
2105 "new_vminstance(): Failed network list is empty."
2106 )
2107
2108 # use: 'data', 'bridge', 'mgmt'
2109 # create vApp. Set vcpu and ram based on flavor id.
2110 try:
2111 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
2112 if not vdc_obj:
2113 raise vimconn.VimConnNotFoundException(
2114 "new_vminstance(): Failed to get VDC object"
2115 )
2116
2117 for retry in (1, 2):
2118 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
2119 catalog_items = [items.attrib]
2120
2121 if len(catalog_items) == 1:
2122 if self.client:
2123 headers = {
2124 "Accept": "application/*+xml;version=" + API_VERSION,
2125 "x-vcloud-authorization": self.client._session.headers[
2126 "x-vcloud-authorization"
2127 ],
2128 }
2129
2130 response = self.perform_request(
2131 req_type="GET",
2132 url=catalog_items[0].get("href"),
2133 headers=headers,
2134 )
2135 catalogItem = XmlElementTree.fromstring(response.text)
2136 entity = [
2137 child
2138 for child in catalogItem
2139 if child.get("type")
2140 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
2141 ][0]
2142 vapp_tempalte_href = entity.get("href")
2143
2144 response = self.perform_request(
2145 req_type="GET", url=vapp_tempalte_href, headers=headers
2146 )
2147
2148 if response.status_code != requests.codes.ok:
2149 self.logger.debug(
2150 "REST API call {} failed. Return status code {}".format(
2151 vapp_tempalte_href, response.status_code
2152 )
2153 )
2154 else:
2155 result = (response.text).replace("\n", " ")
2156
2157 vapp_template_tree = XmlElementTree.fromstring(response.text)
2158 children_element = [
2159 child for child in vapp_template_tree if "Children" in child.tag
2160 ][0]
2161 vm_element = [child for child in children_element if "Vm" in child.tag][
2162 0
2163 ]
2164 vm_name = vm_element.get("name")
2165 vm_id = vm_element.get("id")
2166 vm_href = vm_element.get("href")
2167
2168 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
2169 # result).group(1)
2170 memory_mb = re.search(
2171 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
2172 result,
2173 ).group(1)
2174 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
2175
2176 headers[
2177 "Content-Type"
2178 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
2179 vdc_id = vdc.get("id").split(":")[-1]
2180 instantiate_vapp_href = (
2181 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
2182 self.url, vdc_id
2183 )
2184 )
2185
2186 with open(
2187 os.path.join(
2188 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2189 ),
2190 "r",
2191 ) as f:
2192 template = f.read()
2193
2194 data = template.format(
2195 vmname_andid,
2196 primary_netname,
2197 primary_net_href,
2198 vapp_tempalte_href,
2199 vm_href,
2200 vm_id,
2201 vm_name,
2202 primary_netname,
2203 cpu=vm_cpus,
2204 core=1,
2205 memory=vm_memory,
2206 )
2207
2208 response = self.perform_request(
2209 req_type="POST",
2210 url=instantiate_vapp_href,
2211 headers=headers,
2212 data=data,
2213 )
2214
2215 if response.status_code != 201:
2216 self.logger.error(
2217 "REST call {} failed reason : {}"
2218 "status code : {}".format(
2219 instantiate_vapp_href, response.text, response.status_code
2220 )
2221 )
2222 raise vimconn.VimConnException(
2223 "new_vminstance(): Failed to create"
2224 "vAapp {}".format(vmname_andid)
2225 )
2226 else:
2227 vapptask = self.get_task_from_response(response.text)
2228
2229 if vapptask is None and retry == 1:
2230 self.get_token() # Retry getting token
2231 continue
2232 else:
2233 break
2234
2235 if vapptask is None or vapptask is False:
2236 raise vimconn.VimConnUnexpectedResponse(
2237 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2238 )
2239
2240 # wait for task to complete
2241 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2242
2243 if result.get("status") == "success":
2244 self.logger.debug(
2245 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2246 )
2247 else:
2248 raise vimconn.VimConnUnexpectedResponse(
2249 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2250 )
2251 except Exception as exp:
2252 raise vimconn.VimConnUnexpectedResponse(
2253 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2254 vmname_andid, exp
2255 )
2256 )
2257
2258 # we should have now vapp in undeployed state.
2259 try:
2260 vdc_obj = VDC(self.client, href=vdc.get("href"))
2261 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2262 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2263 vapp = VApp(self.client, resource=vapp_resource)
2264 except Exception as exp:
2265 raise vimconn.VimConnUnexpectedResponse(
2266 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2267 vmname_andid, exp
2268 )
2269 )
2270
2271 if vapp_uuid is None:
2272 raise vimconn.VimConnUnexpectedResponse(
2273 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2274 vmname_andid
2275 )
2276 )
2277
2278 # Add PCI passthrough/SRIOV configrations
2279 pci_devices_info = []
2280 reserve_memory = False
2281
2282 for net in net_list:
2283 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2284 pci_devices_info.append(net)
2285 elif (
2286 net["type"] == "VF"
2287 or net["type"] == "SR-IOV"
2288 or net["type"] == "VFnotShared"
2289 ) and "net_id" in net:
2290 reserve_memory = True
2291
2292 # Add PCI
2293 if len(pci_devices_info) > 0:
2294 self.logger.info(
2295 "Need to add PCI devices {} into VM {}".format(
2296 pci_devices_info, vmname_andid
2297 )
2298 )
2299 PCI_devices_status, _, _ = self.add_pci_devices(
2300 vapp_uuid, pci_devices_info, vmname_andid
2301 )
2302
2303 if PCI_devices_status:
2304 self.logger.info(
2305 "Added PCI devives {} to VM {}".format(
2306 pci_devices_info, vmname_andid
2307 )
2308 )
2309 reserve_memory = True
2310 else:
2311 self.logger.info(
2312 "Fail to add PCI devives {} to VM {}".format(
2313 pci_devices_info, vmname_andid
2314 )
2315 )
2316
2317 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2318 self.add_serial_device(vapp_uuid)
2319
2320 if vm_disk:
2321 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2322 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2323 if result:
2324 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2325
2326 # Add new or existing disks to vApp
2327 if disk_list:
2328 added_existing_disk = False
2329 for disk in disk_list:
2330 if "device_type" in disk and disk["device_type"] == "cdrom":
2331 image_id = disk["image_id"]
2332 # Adding CD-ROM to VM
2333 # will revisit code once specification ready to support this feature
2334 self.insert_media_to_vm(vapp, image_id)
2335 elif "image_id" in disk and disk["image_id"] is not None:
2336 self.logger.debug(
2337 "Adding existing disk from image {} to vm {} ".format(
2338 disk["image_id"], vapp_uuid
2339 )
2340 )
2341 self.add_existing_disk(
2342 catalogs=catalogs,
2343 image_id=disk["image_id"],
2344 size=disk["size"],
2345 template_name=templateName,
2346 vapp_uuid=vapp_uuid,
2347 )
2348 added_existing_disk = True
2349 else:
2350 # Wait till added existing disk gets reflected into vCD database/API
2351 if added_existing_disk:
2352 time.sleep(5)
2353 added_existing_disk = False
2354 self.add_new_disk(vapp_uuid, disk["size"])
2355
2356 if numas:
2357 # Assigning numa affinity setting
2358 for numa in numas:
2359 if "paired-threads-id" in numa:
2360 paired_threads_id = numa["paired-threads-id"]
2361 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2362
2363 # add NICs & connect to networks in netlist
2364 try:
2365 vdc_obj = VDC(self.client, href=vdc.get("href"))
2366 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2367 vapp = VApp(self.client, resource=vapp_resource)
2368 vapp_id = vapp_resource.get("id").split(":")[-1]
2369
2370 self.logger.info("Removing primary NIC: ")
2371 # First remove all NICs so that NIC properties can be adjusted as needed
2372 self.remove_primary_network_adapter_from_all_vms(vapp)
2373
2374 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2375 primary_nic_index = 0
2376 nicIndex = 0
2377 for net in net_list:
2378 # openmano uses network id in UUID format.
2379 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2380 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2381 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2382
2383 if "net_id" not in net:
2384 continue
2385
2386 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2387 # Same will be returned in refresh_vms_status() as vim_interface_id
2388 net["vim_id"] = net[
2389 "net_id"
2390 ] # Provide the same VIM identifier as the VIM network
2391
2392 interface_net_id = net["net_id"]
2393 interface_net_name = self.get_network_name_by_id(
2394 network_uuid=interface_net_id
2395 )
2396 interface_network_mode = net["use"]
2397
2398 if interface_network_mode == "mgmt":
2399 primary_nic_index = nicIndex
2400
2401 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2402 - DHCP (The IP address is obtained from a DHCP service.)
2403 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2404 - NONE (No IP addressing mode specified.)"""
2405
2406 if primary_netname is not None:
2407 self.logger.debug(
2408 "new_vminstance(): Filtering by net name {}".format(
2409 interface_net_name
2410 )
2411 )
2412 nets = [
2413 n
2414 for n in self.get_network_list()
2415 if n.get("name") == interface_net_name
2416 ]
2417
2418 if len(nets) == 1:
2419 self.logger.info(
2420 "new_vminstance(): Found requested network: {}".format(
2421 nets[0].get("name")
2422 )
2423 )
2424
2425 if interface_net_name != primary_netname:
2426 # connect network to VM - with all DHCP by default
2427 self.logger.info(
2428 "new_vminstance(): Attaching net {} to vapp".format(
2429 interface_net_name
2430 )
2431 )
2432 self.connect_vapp_to_org_vdc_network(
2433 vapp_id, nets[0].get("name")
2434 )
2435
2436 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2437 nic_type = "VMXNET3"
2438 if "type" in net and net["type"] not in type_list:
2439 # fetching nic type from vnf
2440 if "model" in net:
2441 if net["model"] is not None:
2442 if (
2443 net["model"].lower() == "paravirt"
2444 or net["model"].lower() == "virtio"
2445 ):
2446 nic_type = "VMXNET3"
2447 else:
2448 nic_type = net["model"]
2449
2450 self.logger.info(
2451 "new_vminstance(): adding network adapter "
2452 "to a network {}".format(nets[0].get("name"))
2453 )
2454 self.add_network_adapter_to_vms(
2455 vapp,
2456 nets[0].get("name"),
2457 primary_nic_index,
2458 nicIndex,
2459 net,
2460 nic_type=nic_type,
2461 )
2462 else:
2463 self.logger.info(
2464 "new_vminstance(): adding network adapter "
2465 "to a network {}".format(nets[0].get("name"))
2466 )
2467
2468 if net["type"] in ["SR-IOV", "VF"]:
2469 nic_type = net["type"]
2470 self.add_network_adapter_to_vms(
2471 vapp,
2472 nets[0].get("name"),
2473 primary_nic_index,
2474 nicIndex,
2475 net,
2476 nic_type=nic_type,
2477 )
2478 nicIndex += 1
2479
2480 # cloud-init for ssh-key injection
2481 if cloud_config:
2482 # Create a catalog which will be carrying the config drive ISO
2483 # This catalog is deleted during vApp deletion. The catalog name carries
2484 # vApp UUID and thats how it gets identified during its deletion.
2485 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2486 self.logger.info(
2487 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2488 config_drive_catalog_name
2489 )
2490 )
2491 config_drive_catalog_id = self.create_vimcatalog(
2492 org, config_drive_catalog_name
2493 )
2494
2495 if config_drive_catalog_id is None:
2496 error_msg = (
2497 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2498 "ISO".format(config_drive_catalog_name)
2499 )
2500 raise Exception(error_msg)
2501
2502 # Create config-drive ISO
2503 _, userdata = self._create_user_data(cloud_config)
2504 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2505 iso_path = self.create_config_drive_iso(userdata)
2506 self.logger.debug(
2507 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2508 iso_path
2509 )
2510 )
2511
2512 self.logger.info(
2513 "new_vminstance(): uploading iso to catalog {}".format(
2514 config_drive_catalog_name
2515 )
2516 )
2517 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2518 # Attach the config-drive ISO to the VM
2519 self.logger.info(
2520 "new_vminstance(): Attaching the config-drive ISO to the VM"
2521 )
2522 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2523 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2524
2525 # If VM has PCI devices or SRIOV reserve memory for VM
2526 if reserve_memory:
2527 self.reserve_memory_for_all_vms(vapp, memory_mb)
2528
2529 self.logger.debug(
2530 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2531 )
2532
2533 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2534 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2535 if result.get("status") == "success":
2536 self.logger.info(
2537 "new_vminstance(): Successfully power on "
2538 "vApp {}".format(vmname_andid)
2539 )
2540 else:
2541 self.logger.error(
2542 "new_vminstance(): failed to power on vApp "
2543 "{}".format(vmname_andid)
2544 )
2545
2546 except Exception as exp:
2547 try:
2548 self.delete_vminstance(vapp_uuid)
2549 except Exception as exp2:
2550 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2551 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2552 self.logger.error(
2553 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2554 name, exp
2555 )
2556 )
2557 raise vimconn.VimConnException(
2558 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2559 name, exp
2560 )
2561 )
2562 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2563 wait_time = 0
2564 vapp_uuid = None
2565 while wait_time <= MAX_WAIT_TIME:
2566 try:
2567 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2568 vapp = VApp(self.client, resource=vapp_resource)
2569 except Exception as exp:
2570 raise vimconn.VimConnUnexpectedResponse(
2571 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2572 vmname_andid, exp
2573 )
2574 )
2575
2576 # if vapp and vapp.me.deployed:
2577 if vapp and vapp_resource.get("deployed") == "true":
2578 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2579 break
2580 else:
2581 self.logger.debug(
2582 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2583 )
2584 time.sleep(INTERVAL_TIME)
2585
2586 wait_time += INTERVAL_TIME
2587
2588 # SET Affinity Rule for VM
2589 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2590 # While creating VIM account user has to pass the Host Group names in availability_zone list
2591 # "availability_zone" is a part of VIM "config" parameters
2592 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2593 # Host groups are referred as availability zones
2594 # With following procedure, deployed VM will be added into a VM group.
2595 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2596 if availability_zone_list:
2597 self.logger.debug(
2598 "Existing Host Groups in VIM {}".format(
2599 self.config.get("availability_zone")
2600 )
2601 )
2602 # Admin access required for creating Affinity rules
2603 client = self.connect_as_admin()
2604
2605 if not client:
2606 raise vimconn.VimConnConnectionException(
2607 "Failed to connect vCD as admin"
2608 )
2609 else:
2610 self.client = client
2611
2612 if self.client:
2613 headers = {
2614 "Accept": "application/*+xml;version=27.0",
2615 "x-vcloud-authorization": self.client._session.headers[
2616 "x-vcloud-authorization"
2617 ],
2618 }
2619
2620 # Step1: Get provider vdc details from organization
2621 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2622 if pvdc_href is not None:
2623 # Step2: Found required pvdc, now get resource pool information
2624 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2625 if respool_href is None:
2626 # Raise error if respool_href not found
2627 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2628 pvdc_href
2629 )
2630 self.log_message(msg)
2631
2632 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2633 # get availability Zone
2634 vm_az = self.get_vm_availability_zone(
2635 availability_zone_index, availability_zone_list
2636 )
2637
2638 # check if provided av zone(hostGroup) is present in vCD VIM
2639 status = self.check_availibility_zone(vm_az, respool_href, headers)
2640 if status is False:
2641 msg = (
2642 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2643 "resource pool {} status: {}"
2644 ).format(vm_az, respool_href, status)
2645 self.log_message(msg)
2646 else:
2647 self.logger.debug(
2648 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2649 )
2650
2651 # Step4: Find VM group references to create vm group
2652 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2653 if vmgrp_href is None:
2654 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2655 self.log_message(msg)
2656
2657 # Step5: Create a VmGroup with name az_VmGroup
2658 vmgrp_name = (
2659 vm_az + "_" + name
2660 ) # Formed VM Group name = Host Group name + VM name
2661 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2662 if status is not True:
2663 msg = "new_vminstance(): Error in creating VM group {}".format(
2664 vmgrp_name
2665 )
2666 self.log_message(msg)
2667
2668 # VM Group url to add vms to vm group
2669 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2670
2671 # Step6: Add VM to VM Group
2672 # Find VM uuid from vapp_uuid
2673 vm_details = self.get_vapp_details_rest(vapp_uuid)
2674 vm_uuid = vm_details["vmuuid"]
2675
2676 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2677 if status is not True:
2678 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2679 vmgrp_name
2680 )
2681 self.log_message(msg)
2682
2683 # Step7: Create VM to Host affinity rule
2684 addrule_href = self.get_add_rule_reference(respool_href, headers)
2685 if addrule_href is None:
2686 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2687 respool_href
2688 )
2689 self.log_message(msg)
2690
2691 status = self.create_vm_to_host_affinity_rule(
2692 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2693 )
2694 if status is False:
2695 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2696 name, vm_az
2697 )
2698 self.log_message(msg)
2699 else:
2700 self.logger.debug(
2701 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2702 name, vm_az
2703 )
2704 )
2705 # Reset token to a normal user to perform other operations
2706 self.get_token()
2707
2708 if vapp_uuid is not None:
2709 return vapp_uuid, None
2710 else:
2711 raise vimconn.VimConnUnexpectedResponse(
2712 "new_vminstance(): Failed create new vm instance {}".format(name)
2713 )
2714
2715 def create_config_drive_iso(self, user_data):
2716 tmpdir = tempfile.mkdtemp()
2717 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2718 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2719 os.makedirs(latest_dir)
2720 with open(
2721 os.path.join(latest_dir, "meta_data.json"), "w"
2722 ) as meta_file_obj, open(
2723 os.path.join(latest_dir, "user_data"), "w"
2724 ) as userdata_file_obj:
2725 userdata_file_obj.write(user_data)
2726 meta_file_obj.write(
2727 json.dumps(
2728 {
2729 "availability_zone": "nova",
2730 "launch_index": 0,
2731 "name": "ConfigDrive",
2732 "uuid": str(uuid.uuid4()),
2733 }
2734 )
2735 )
2736 genisoimage_cmd = (
2737 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2738 iso_path=iso_path, source_dir_path=tmpdir
2739 )
2740 )
2741 self.logger.info(
2742 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2743 genisoimage_cmd
2744 )
2745 )
2746
2747 try:
2748 FNULL = open(os.devnull, "w")
2749 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2750 except subprocess.CalledProcessError as e:
2751 shutil.rmtree(tmpdir, ignore_errors=True)
2752 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2753 e
2754 )
2755 self.logger.error(error_msg)
2756 raise Exception(error_msg)
2757
2758 return iso_path
2759
2760 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2761 if not os.path.isfile(iso_file_path):
2762 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2763 iso_file_path
2764 )
2765 self.logger.error(error_msg)
2766 raise Exception(error_msg)
2767
2768 iso_file_stat = os.stat(iso_file_path)
2769 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2770 <Media
2771 xmlns="http://www.vmware.com/vcloud/v1.5"
2772 name="{iso_name}"
2773 size="{iso_size}"
2774 imageType="iso">
2775 <Description>ISO image for config-drive</Description>
2776 </Media>""".format(
2777 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2778 )
2779 headers = {
2780 "Accept": "application/*+xml;version=" + API_VERSION,
2781 "x-vcloud-authorization": self.client._session.headers[
2782 "x-vcloud-authorization"
2783 ],
2784 }
2785 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2786 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2787 response = self.perform_request(
2788 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2789 )
2790
2791 if response.status_code != 201:
2792 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2793 catalog_href
2794 )
2795 self.logger.error(error_msg)
2796 raise Exception(error_msg)
2797
2798 catalogItem = XmlElementTree.fromstring(response.text)
2799 entity = [
2800 child
2801 for child in catalogItem
2802 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2803 ][0]
2804 entity_href = entity.get("href")
2805
2806 response = self.perform_request(
2807 req_type="GET", url=entity_href, headers=headers
2808 )
2809 if response.status_code != 200:
2810 raise Exception(
2811 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2812 entity_href
2813 )
2814 )
2815
2816 match = re.search(
2817 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2818 response.text,
2819 re.DOTALL,
2820 )
2821 if match:
2822 media_upload_href = match.group(1)
2823 else:
2824 raise Exception(
2825 "Could not parse the upload URL for the media file from the last response"
2826 )
2827 upload_iso_task = self.get_task_from_response(response.text)
2828 headers["Content-Type"] = "application/octet-stream"
2829 response = self.perform_request(
2830 req_type="PUT",
2831 url=media_upload_href,
2832 headers=headers,
2833 data=open(iso_file_path, "rb"),
2834 )
2835
2836 if response.status_code != 200:
2837 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2838
2839 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2840 if result.get("status") != "success":
2841 raise Exception(
2842 "The upload iso task failed with status {}".format(result.get("status"))
2843 )
2844
2845 def get_vcd_availibility_zones(self, respool_href, headers):
2846 """Method to find presence of av zone is VIM resource pool
2847
2848 Args:
2849 respool_href - resource pool href
2850 headers - header information
2851
2852 Returns:
2853 vcd_az - list of azone present in vCD
2854 """
2855 vcd_az = []
2856 url = respool_href
2857 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2858
2859 if resp.status_code != requests.codes.ok:
2860 self.logger.debug(
2861 "REST API call {} failed. Return status code {}".format(
2862 url, resp.status_code
2863 )
2864 )
2865 else:
2866 # Get the href to hostGroups and find provided hostGroup is present in it
2867 resp_xml = XmlElementTree.fromstring(resp.content)
2868 for child in resp_xml:
2869 if "VMWProviderVdcResourcePool" in child.tag:
2870 for schild in child:
2871 if "Link" in schild.tag:
2872 if (
2873 schild.attrib.get("type")
2874 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
2875 ):
2876 hostGroup = schild.attrib.get("href")
2877 hg_resp = self.perform_request(
2878 req_type="GET", url=hostGroup, headers=headers
2879 )
2880
2881 if hg_resp.status_code != requests.codes.ok:
2882 self.logger.debug(
2883 "REST API call {} failed. Return status code {}".format(
2884 hostGroup, hg_resp.status_code
2885 )
2886 )
2887 else:
2888 hg_resp_xml = XmlElementTree.fromstring(
2889 hg_resp.content
2890 )
2891 for hostGroup in hg_resp_xml:
2892 if "HostGroup" in hostGroup.tag:
2893 # append host group name to the list
2894 vcd_az.append(hostGroup.attrib.get("name"))
2895
2896 return vcd_az
2897
2898 def set_availability_zones(self):
2899 """
2900 Set vim availability zone
2901 """
2902 vim_availability_zones = None
2903 availability_zone = None
2904
2905 if "availability_zone" in self.config:
2906 vim_availability_zones = self.config.get("availability_zone")
2907
2908 if isinstance(vim_availability_zones, str):
2909 availability_zone = [vim_availability_zones]
2910 elif isinstance(vim_availability_zones, list):
2911 availability_zone = vim_availability_zones
2912 else:
2913 return availability_zone
2914
2915 return availability_zone
2916
2917 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2918 """
2919 Return the availability zone to be used by the created VM.
2920 returns: The VIM availability zone to be used or None
2921 """
2922 if availability_zone_index is None:
2923 if not self.config.get("availability_zone"):
2924 return None
2925 elif isinstance(self.config.get("availability_zone"), str):
2926 return self.config["availability_zone"]
2927 else:
2928 return self.config["availability_zone"][0]
2929
2930 vim_availability_zones = self.availability_zone
2931
2932 # check if VIM offer enough availability zones describe in the VNFD
2933 if vim_availability_zones and len(availability_zone_list) <= len(
2934 vim_availability_zones
2935 ):
2936 # check if all the names of NFV AV match VIM AV names
2937 match_by_index = False
2938 for av in availability_zone_list:
2939 if av not in vim_availability_zones:
2940 match_by_index = True
2941 break
2942
2943 if match_by_index:
2944 self.logger.debug(
2945 "Required Availability zone or Host Group not found in VIM config"
2946 )
2947 self.logger.debug(
2948 "Input Availability zone list: {}".format(availability_zone_list)
2949 )
2950 self.logger.debug(
2951 "VIM configured Availability zones: {}".format(
2952 vim_availability_zones
2953 )
2954 )
2955 self.logger.debug("VIM Availability zones will be used by index")
2956 return vim_availability_zones[availability_zone_index]
2957 else:
2958 return availability_zone_list[availability_zone_index]
2959 else:
2960 raise vimconn.VimConnConflictException(
2961 "No enough availability zones at VIM for this deployment"
2962 )
2963
2964 def create_vm_to_host_affinity_rule(
2965 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2966 ):
2967 """Method to create VM to Host Affinity rule in vCD
2968
2969 Args:
2970 addrule_href - href to make a POST request
2971 vmgrpname - name of the VM group created
2972 hostgrpnmae - name of the host group created earlier
2973 polarity - Affinity or Anti-affinity (default: Affinity)
2974 headers - headers to make REST call
2975
2976 Returns:
2977 True- if rule is created
2978 False- Failed to create rule due to some error
2979
2980 """
2981 task_status = False
2982 rule_name = polarity + "_" + vmgrpname
2983 payload = """<?xml version="1.0" encoding="UTF-8"?>
2984 <vmext:VMWVmHostAffinityRule
2985 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2986 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2987 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2988 <vcloud:Name>{}</vcloud:Name>
2989 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2990 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2991 <vcloud:Polarity>{}</vcloud:Polarity>
2992 <vmext:HostGroupName>{}</vmext:HostGroupName>
2993 <vmext:VmGroupName>{}</vmext:VmGroupName>
2994 </vmext:VMWVmHostAffinityRule>""".format(
2995 rule_name, polarity, hostgrpname, vmgrpname
2996 )
2997
2998 resp = self.perform_request(
2999 req_type="POST", url=addrule_href, headers=headers, data=payload
3000 )
3001
3002 if resp.status_code != requests.codes.accepted:
3003 self.logger.debug(
3004 "REST API call {} failed. Return status code {}".format(
3005 addrule_href, resp.status_code
3006 )
3007 )
3008 task_status = False
3009
3010 return task_status
3011 else:
3012 affinity_task = self.get_task_from_response(resp.content)
3013 self.logger.debug("affinity_task: {}".format(affinity_task))
3014
3015 if affinity_task is None or affinity_task is False:
3016 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
3017 # wait for task to complete
3018 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
3019
3020 if result.get("status") == "success":
3021 self.logger.debug(
3022 "Successfully created affinity rule {}".format(rule_name)
3023 )
3024 return True
3025 else:
3026 raise vimconn.VimConnUnexpectedResponse(
3027 "failed to create affinity rule {}".format(rule_name)
3028 )
3029
3030 def get_add_rule_reference(self, respool_href, headers):
3031 """This method finds href to add vm to host affinity rule to vCD
3032
3033 Args:
3034 respool_href- href to resource pool
3035 headers- header information to make REST call
3036
3037 Returns:
3038 None - if no valid href to add rule found or
3039 addrule_href - href to add vm to host affinity rule of resource pool
3040 """
3041 addrule_href = None
3042 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3043
3044 if resp.status_code != requests.codes.ok:
3045 self.logger.debug(
3046 "REST API call {} failed. Return status code {}".format(
3047 respool_href, resp.status_code
3048 )
3049 )
3050 else:
3051 resp_xml = XmlElementTree.fromstring(resp.content)
3052 for child in resp_xml:
3053 if "VMWProviderVdcResourcePool" in child.tag:
3054 for schild in child:
3055 if "Link" in schild.tag:
3056 if (
3057 schild.attrib.get("type")
3058 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
3059 and schild.attrib.get("rel") == "add"
3060 ):
3061 addrule_href = schild.attrib.get("href")
3062 break
3063
3064 return addrule_href
3065
3066 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
3067 """Method to add deployed VM to newly created VM Group.
3068 This is required to create VM to Host affinity in vCD
3069
3070 Args:
3071 vm_uuid- newly created vm uuid
3072 vmGroupNameURL- URL to VM Group name
3073 vmGroup_name- Name of VM group created
3074 headers- Headers for REST request
3075
3076 Returns:
3077 True- if VM added to VM group successfully
3078 False- if any error encounter
3079 """
3080 addvm_resp = self.perform_request(
3081 req_type="GET", url=vmGroupNameURL, headers=headers
3082 ) # , data=payload)
3083
3084 if addvm_resp.status_code != requests.codes.ok:
3085 self.logger.debug(
3086 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
3087 vmGroupNameURL, addvm_resp.status_code
3088 )
3089 )
3090 return False
3091 else:
3092 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
3093 for child in resp_xml:
3094 if child.tag.split("}")[1] == "Link":
3095 if child.attrib.get("rel") == "addVms":
3096 addvmtogrpURL = child.attrib.get("href")
3097
3098 # Get vm details
3099 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
3100 vmdetailsURL = "".join(url_list)
3101
3102 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
3103
3104 if resp.status_code != requests.codes.ok:
3105 self.logger.debug(
3106 "REST API call {} failed. Return status code {}".format(
3107 vmdetailsURL, resp.status_code
3108 )
3109 )
3110 return False
3111
3112 # Parse VM details
3113 resp_xml = XmlElementTree.fromstring(resp.content)
3114 if resp_xml.tag.split("}")[1] == "Vm":
3115 vm_id = resp_xml.attrib.get("id")
3116 vm_name = resp_xml.attrib.get("name")
3117 vm_href = resp_xml.attrib.get("href")
3118 # print vm_id, vm_name, vm_href
3119
3120 # Add VM into VMgroup
3121 payload = """<?xml version="1.0" encoding="UTF-8"?>\
3122 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
3123 xmlns="http://www.vmware.com/vcloud/versions" \
3124 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
3125 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
3126 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
3127 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
3128 xmlns:ns7="http://www.vmware.com/schema/ovf" \
3129 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
3130 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
3131 <ns2:VmReference href="{}" id="{}" name="{}" \
3132 type="application/vnd.vmware.vcloud.vm+xml" />\
3133 </ns2:Vms>""".format(
3134 vm_href, vm_id, vm_name
3135 )
3136
3137 addvmtogrp_resp = self.perform_request(
3138 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
3139 )
3140
3141 if addvmtogrp_resp.status_code != requests.codes.accepted:
3142 self.logger.debug(
3143 "REST API call {} failed. Return status code {}".format(
3144 addvmtogrpURL, addvmtogrp_resp.status_code
3145 )
3146 )
3147
3148 return False
3149 else:
3150 self.logger.debug(
3151 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
3152 )
3153
3154 return True
3155
3156 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
3157 """Method to create a VM group in vCD
3158
3159 Args:
3160 vmgroup_name : Name of VM group to be created
3161 vmgroup_href : href for vmgroup
3162 headers- Headers for REST request
3163 """
3164 # POST to add URL with required data
3165 vmgroup_status = False
3166 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
3167 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
3168 <vmCount>1</vmCount>\
3169 </VMWVmGroup>""".format(
3170 vmgroup_name
3171 )
3172 resp = self.perform_request(
3173 req_type="POST", url=vmgroup_href, headers=headers, data=payload
3174 )
3175
3176 if resp.status_code != requests.codes.accepted:
3177 self.logger.debug(
3178 "REST API call {} failed. Return status code {}".format(
3179 vmgroup_href, resp.status_code
3180 )
3181 )
3182
3183 return vmgroup_status
3184 else:
3185 vmgroup_task = self.get_task_from_response(resp.content)
3186 if vmgroup_task is None or vmgroup_task is False:
3187 raise vimconn.VimConnUnexpectedResponse(
3188 "create_vmgroup(): failed to create VM group {}".format(
3189 vmgroup_name
3190 )
3191 )
3192
3193 # wait for task to complete
3194 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
3195
3196 if result.get("status") == "success":
3197 self.logger.debug(
3198 "create_vmgroup(): Successfully created VM group {}".format(
3199 vmgroup_name
3200 )
3201 )
3202 # time.sleep(10)
3203 vmgroup_status = True
3204
3205 return vmgroup_status
3206 else:
3207 raise vimconn.VimConnUnexpectedResponse(
3208 "create_vmgroup(): failed to create VM group {}".format(
3209 vmgroup_name
3210 )
3211 )
3212
3213 def find_vmgroup_reference(self, url, headers):
3214 """Method to create a new VMGroup which is required to add created VM
3215 Args:
3216 url- resource pool href
3217 headers- header information
3218
3219 Returns:
3220 returns href to VM group to create VM group
3221 """
3222 # Perform GET on resource pool to find 'add' link to create VMGroup
3223 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
3224 vmgrp_href = None
3225 resp = self.perform_request(req_type="GET", url=url, headers=headers)
3226
3227 if resp.status_code != requests.codes.ok:
3228 self.logger.debug(
3229 "REST API call {} failed. Return status code {}".format(
3230 url, resp.status_code
3231 )
3232 )
3233 else:
3234 # Get the href to add vmGroup to vCD
3235 resp_xml = XmlElementTree.fromstring(resp.content)
3236 for child in resp_xml:
3237 if "VMWProviderVdcResourcePool" in child.tag:
3238 for schild in child:
3239 if "Link" in schild.tag:
3240 # Find href with type VMGroup and rel with add
3241 if (
3242 schild.attrib.get("type")
3243 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3244 and schild.attrib.get("rel") == "add"
3245 ):
3246 vmgrp_href = schild.attrib.get("href")
3247
3248 return vmgrp_href
3249
3250 def check_availibility_zone(self, az, respool_href, headers):
3251 """Method to verify requested av zone is present or not in provided
3252 resource pool
3253
3254 Args:
3255 az - name of hostgroup (availibility_zone)
3256 respool_href - Resource Pool href
3257 headers - Headers to make REST call
3258 Returns:
3259 az_found - True if availibility_zone is found else False
3260 """
3261 az_found = False
3262 headers["Accept"] = "application/*+xml;version=27.0"
3263 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3264
3265 if resp.status_code != requests.codes.ok:
3266 self.logger.debug(
3267 "REST API call {} failed. Return status code {}".format(
3268 respool_href, resp.status_code
3269 )
3270 )
3271 else:
3272 # Get the href to hostGroups and find provided hostGroup is present in it
3273 resp_xml = XmlElementTree.fromstring(resp.content)
3274
3275 for child in resp_xml:
3276 if "VMWProviderVdcResourcePool" in child.tag:
3277 for schild in child:
3278 if "Link" in schild.tag:
3279 if (
3280 schild.attrib.get("type")
3281 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3282 ):
3283 hostGroup_href = schild.attrib.get("href")
3284 hg_resp = self.perform_request(
3285 req_type="GET", url=hostGroup_href, headers=headers
3286 )
3287
3288 if hg_resp.status_code != requests.codes.ok:
3289 self.logger.debug(
3290 "REST API call {} failed. Return status code {}".format(
3291 hostGroup_href, hg_resp.status_code
3292 )
3293 )
3294 else:
3295 hg_resp_xml = XmlElementTree.fromstring(
3296 hg_resp.content
3297 )
3298 for hostGroup in hg_resp_xml:
3299 if "HostGroup" in hostGroup.tag:
3300 if hostGroup.attrib.get("name") == az:
3301 az_found = True
3302 break
3303
3304 return az_found
3305
3306 def get_pvdc_for_org(self, org_vdc, headers):
3307 """This method gets provider vdc references from organisation
3308
3309 Args:
3310 org_vdc - name of the organisation VDC to find pvdc
3311 headers - headers to make REST call
3312
3313 Returns:
3314 None - if no pvdc href found else
3315 pvdc_href - href to pvdc
3316 """
3317 # Get provider VDC references from vCD
3318 pvdc_href = None
3319 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3320 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3321 url = "".join(url_list)
3322
3323 response = self.perform_request(req_type="GET", url=url, headers=headers)
3324 if response.status_code != requests.codes.ok:
3325 self.logger.debug(
3326 "REST API call {} failed. Return status code {}".format(
3327 url, response.status_code
3328 )
3329 )
3330 else:
3331 xmlroot_response = XmlElementTree.fromstring(response.text)
3332 for child in xmlroot_response:
3333 if "ProviderVdcReference" in child.tag:
3334 pvdc_href = child.attrib.get("href")
3335 # Get vdcReferences to find org
3336 pvdc_resp = self.perform_request(
3337 req_type="GET", url=pvdc_href, headers=headers
3338 )
3339
3340 if pvdc_resp.status_code != requests.codes.ok:
3341 raise vimconn.VimConnException(
3342 "REST API call {} failed. "
3343 "Return status code {}".format(url, pvdc_resp.status_code)
3344 )
3345
3346 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3347 for child in pvdc_resp_xml:
3348 if "Link" in child.tag:
3349 if (
3350 child.attrib.get("type")
3351 == "application/vnd.vmware.admin.vdcReferences+xml"
3352 ):
3353 vdc_href = child.attrib.get("href")
3354
3355 # Check if provided org is present in vdc
3356 vdc_resp = self.perform_request(
3357 req_type="GET", url=vdc_href, headers=headers
3358 )
3359
3360 if vdc_resp.status_code != requests.codes.ok:
3361 raise vimconn.VimConnException(
3362 "REST API call {} failed. "
3363 "Return status code {}".format(
3364 url, vdc_resp.status_code
3365 )
3366 )
3367 vdc_resp_xml = XmlElementTree.fromstring(
3368 vdc_resp.content
3369 )
3370
3371 for child in vdc_resp_xml:
3372 if "VdcReference" in child.tag:
3373 if child.attrib.get("name") == org_vdc:
3374 return pvdc_href
3375
3376 def get_resource_pool_details(self, pvdc_href, headers):
3377 """Method to get resource pool information.
3378 Host groups are property of resource group.
3379 To get host groups, we need to GET details of resource pool.
3380
3381 Args:
3382 pvdc_href: href to pvdc details
3383 headers: headers
3384
3385 Returns:
3386 respool_href - Returns href link reference to resource pool
3387 """
3388 respool_href = None
3389 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3390
3391 if resp.status_code != requests.codes.ok:
3392 self.logger.debug(
3393 "REST API call {} failed. Return status code {}".format(
3394 pvdc_href, resp.status_code
3395 )
3396 )
3397 else:
3398 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3399 for child in respool_resp_xml:
3400 if "Link" in child.tag:
3401 if (
3402 child.attrib.get("type")
3403 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3404 ):
3405 respool_href = child.attrib.get("href")
3406 break
3407
3408 return respool_href
3409
3410 def log_message(self, msg):
3411 """
3412 Method to log error messages related to Affinity rule creation
3413 in new_vminstance & raise Exception
3414 Args :
3415 msg - Error message to be logged
3416
3417 """
3418 # get token to connect vCD as a normal user
3419 self.get_token()
3420 self.logger.debug(msg)
3421
3422 raise vimconn.VimConnException(msg)
3423
3424 # #
3425 # #
3426 # # based on current discussion
3427 # #
3428 # #
3429 # # server:
3430 # created: '2016-09-08T11:51:58'
3431 # description: simple-instance.linux1.1
3432 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
3433 # hostId: e836c036-74e7-11e6-b249-0800273e724c
3434 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
3435 # status: ACTIVE
3436 # error_msg:
3437 # interfaces: …
3438 #
3439 def get_vminstance(self, vim_vm_uuid=None):
3440 """Returns the VM instance information from VIM"""
3441 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3442
3443 _, vdc = self.get_vdc_details()
3444 if vdc is None:
3445 raise vimconn.VimConnConnectionException(
3446 "Failed to get a reference of VDC for a tenant {}".format(
3447 self.tenant_name
3448 )
3449 )
3450
3451 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3452 if not vm_info_dict:
3453 self.logger.debug(
3454 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3455 vim_vm_uuid
3456 )
3457 )
3458 raise vimconn.VimConnNotFoundException(
3459 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3460 )
3461
3462 status_key = vm_info_dict["status"]
3463 error = ""
3464 try:
3465 vm_dict = {
3466 "created": vm_info_dict["created"],
3467 "description": vm_info_dict["name"],
3468 "status": vcdStatusCode2manoFormat[int(status_key)],
3469 "hostId": vm_info_dict["vmuuid"],
3470 "error_msg": error,
3471 "vim_info": yaml.safe_dump(vm_info_dict),
3472 "interfaces": [],
3473 }
3474
3475 if "interfaces" in vm_info_dict:
3476 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3477 else:
3478 vm_dict["interfaces"] = []
3479 except KeyError:
3480 vm_dict = {
3481 "created": "",
3482 "description": "",
3483 "status": vcdStatusCode2manoFormat[int(-1)],
3484 "hostId": vm_info_dict["vmuuid"],
3485 "error_msg": "Inconsistency state",
3486 "vim_info": yaml.safe_dump(vm_info_dict),
3487 "interfaces": [],
3488 }
3489
3490 return vm_dict
3491
3492 def delete_vminstance(self, vm__vim_uuid, created_items=None):
3493 """Method poweroff and remove VM instance from vcloud director network.
3494
3495 Args:
3496 vm__vim_uuid: VM UUID
3497
3498 Returns:
3499 Returns the instance identifier
3500 """
3501 self.logger.debug(
3502 "Client requesting delete vm instance {} ".format(vm__vim_uuid)
3503 )
3504
3505 _, vdc = self.get_vdc_details()
3506 vdc_obj = VDC(self.client, href=vdc.get("href"))
3507 if vdc_obj is None:
3508 self.logger.debug(
3509 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3510 self.tenant_name
3511 )
3512 )
3513 raise vimconn.VimConnException(
3514 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3515 self.tenant_name
3516 )
3517 )
3518
3519 try:
3520 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3521 if vapp_name is None:
3522 self.logger.debug(
3523 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3524 vm__vim_uuid
3525 )
3526 )
3527
3528 return (
3529 -1,
3530 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3531 vm__vim_uuid
3532 ),
3533 )
3534
3535 self.logger.info(
3536 "Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
3537 )
3538 vapp_resource = vdc_obj.get_vapp(vapp_name)
3539 vapp = VApp(self.client, resource=vapp_resource)
3540
3541 # Delete vApp and wait for status change if task executed and vApp is None.
3542 if vapp:
3543 if vapp_resource.get("deployed") == "true":
3544 self.logger.info("Powering off vApp {}".format(vapp_name))
3545 # Power off vApp
3546 powered_off = False
3547 wait_time = 0
3548
3549 while wait_time <= MAX_WAIT_TIME:
3550 power_off_task = vapp.power_off()
3551 result = self.client.get_task_monitor().wait_for_success(
3552 task=power_off_task
3553 )
3554
3555 if result.get("status") == "success":
3556 powered_off = True
3557 break
3558 else:
3559 self.logger.info(
3560 "Wait for vApp {} to power off".format(vapp_name)
3561 )
3562 time.sleep(INTERVAL_TIME)
3563
3564 wait_time += INTERVAL_TIME
3565
3566 if not powered_off:
3567 self.logger.debug(
3568 "delete_vminstance(): Failed to power off VM instance {} ".format(
3569 vm__vim_uuid
3570 )
3571 )
3572 else:
3573 self.logger.info(
3574 "delete_vminstance(): Powered off VM instance {} ".format(
3575 vm__vim_uuid
3576 )
3577 )
3578
3579 # Undeploy vApp
3580 self.logger.info("Undeploy vApp {}".format(vapp_name))
3581 wait_time = 0
3582 undeployed = False
3583 while wait_time <= MAX_WAIT_TIME:
3584 vapp = VApp(self.client, resource=vapp_resource)
3585 if not vapp:
3586 self.logger.debug(
3587 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3588 vm__vim_uuid
3589 )
3590 )
3591
3592 return (
3593 -1,
3594 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3595 vm__vim_uuid
3596 ),
3597 )
3598
3599 undeploy_task = vapp.undeploy()
3600 result = self.client.get_task_monitor().wait_for_success(
3601 task=undeploy_task
3602 )
3603
3604 if result.get("status") == "success":
3605 undeployed = True
3606 break
3607 else:
3608 self.logger.debug(
3609 "Wait for vApp {} to undeploy".format(vapp_name)
3610 )
3611 time.sleep(INTERVAL_TIME)
3612
3613 wait_time += INTERVAL_TIME
3614
3615 if not undeployed:
3616 self.logger.debug(
3617 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3618 vm__vim_uuid
3619 )
3620 )
3621
3622 # delete vapp
3623 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3624 if vapp is not None:
3625 wait_time = 0
3626 result = False
3627
3628 while wait_time <= MAX_WAIT_TIME:
3629 vapp = VApp(self.client, resource=vapp_resource)
3630 if not vapp:
3631 self.logger.debug(
3632 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3633 vm__vim_uuid
3634 )
3635 )
3636
3637 return (
3638 -1,
3639 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3640 vm__vim_uuid
3641 ),
3642 )
3643
3644 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3645 result = self.client.get_task_monitor().wait_for_success(
3646 task=delete_task
3647 )
3648 if result.get("status") == "success":
3649 break
3650 else:
3651 self.logger.debug(
3652 "Wait for vApp {} to delete".format(vapp_name)
3653 )
3654 time.sleep(INTERVAL_TIME)
3655
3656 wait_time += INTERVAL_TIME
3657
3658 if result is None:
3659 self.logger.debug(
3660 "delete_vminstance(): Failed delete uuid {} ".format(
3661 vm__vim_uuid
3662 )
3663 )
3664 else:
3665 self.logger.info(
3666 "Deleted vm instance {} sccessfully".format(vm__vim_uuid)
3667 )
3668 config_drive_catalog_name, config_drive_catalog_id = (
3669 "cfg_drv-" + vm__vim_uuid,
3670 None,
3671 )
3672 catalog_list = self.get_image_list()
3673
3674 try:
3675 config_drive_catalog_id = [
3676 catalog_["id"]
3677 for catalog_ in catalog_list
3678 if catalog_["name"] == config_drive_catalog_name
3679 ][0]
3680 except IndexError:
3681 pass
3682
3683 if config_drive_catalog_id:
3684 self.logger.debug(
3685 "delete_vminstance(): Found a config drive catalog {} matching "
3686 'vapp_name"{}". Deleting it.'.format(
3687 config_drive_catalog_id, vapp_name
3688 )
3689 )
3690 self.delete_image(config_drive_catalog_id)
3691
3692 return vm__vim_uuid
3693 except Exception:
3694 self.logger.debug(traceback.format_exc())
3695
3696 raise vimconn.VimConnException(
3697 "delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid)
3698 )
3699
3700 def refresh_vms_status(self, vm_list):
3701 """Get the status of the virtual machines and their interfaces/ports
3702 Params: the list of VM identifiers
3703 Returns a dictionary with:
3704 vm_id: #VIM id of this Virtual Machine
3705 status: #Mandatory. Text with one of:
3706 # DELETED (not found at vim)
3707 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3708 # OTHER (Vim reported other status not understood)
3709 # ERROR (VIM indicates an ERROR status)
3710 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3711 # CREATING (on building process), ERROR
3712 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3713 #
3714 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3715 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3716 interfaces:
3717 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3718 mac_address: #Text format XX:XX:XX:XX:XX:XX
3719 vim_net_id: #network id where this interface is connected
3720 vim_interface_id: #interface/port VIM id
3721 ip_address: #null, or text with IPv4, IPv6 address
3722 """
3723 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3724
3725 _, vdc = self.get_vdc_details()
3726 if vdc is None:
3727 raise vimconn.VimConnException(
3728 "Failed to get a reference of VDC for a tenant {}".format(
3729 self.tenant_name
3730 )
3731 )
3732
3733 vms_dict = {}
3734 nsx_edge_list = []
3735 for vmuuid in vm_list:
3736 vapp_name = self.get_namebyvappid(vmuuid)
3737 if vapp_name is not None:
3738 try:
3739 vm_pci_details = self.get_vm_pci_details(vmuuid)
3740 vdc_obj = VDC(self.client, href=vdc.get("href"))
3741 vapp_resource = vdc_obj.get_vapp(vapp_name)
3742 the_vapp = VApp(self.client, resource=vapp_resource)
3743
3744 vm_details = {}
3745 for vm in the_vapp.get_all_vms():
3746 headers = {
3747 "Accept": "application/*+xml;version=" + API_VERSION,
3748 "x-vcloud-authorization": self.client._session.headers[
3749 "x-vcloud-authorization"
3750 ],
3751 }
3752 response = self.perform_request(
3753 req_type="GET", url=vm.get("href"), headers=headers
3754 )
3755
3756 if response.status_code != 200:
3757 self.logger.error(
3758 "refresh_vms_status : REST call {} failed reason : {}"
3759 "status code : {}".format(
3760 vm.get("href"), response.text, response.status_code
3761 )
3762 )
3763 raise vimconn.VimConnException(
3764 "refresh_vms_status : Failed to get VM details"
3765 )
3766
3767 xmlroot = XmlElementTree.fromstring(response.text)
3768 result = response.text.replace("\n", " ")
3769 hdd_match = re.search(
3770 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3771 result,
3772 )
3773
3774 if hdd_match:
3775 hdd_mb = hdd_match.group(1)
3776 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3777
3778 cpus_match = re.search(
3779 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3780 result,
3781 )
3782
3783 if cpus_match:
3784 cpus = cpus_match.group(1)
3785 vm_details["cpus"] = int(cpus) if cpus else None
3786
3787 memory_mb = re.search(
3788 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3789 result,
3790 ).group(1)
3791 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3792 vm_details["status"] = vcdStatusCode2manoFormat[
3793 int(xmlroot.get("status"))
3794 ]
3795 vm_details["id"] = xmlroot.get("id")
3796 vm_details["name"] = xmlroot.get("name")
3797 vm_info = [vm_details]
3798
3799 if vm_pci_details:
3800 vm_info[0].update(vm_pci_details)
3801
3802 vm_dict = {
3803 "status": vcdStatusCode2manoFormat[
3804 int(vapp_resource.get("status"))
3805 ],
3806 "error_msg": vcdStatusCode2manoFormat[
3807 int(vapp_resource.get("status"))
3808 ],
3809 "vim_info": yaml.safe_dump(vm_info),
3810 "interfaces": [],
3811 }
3812
3813 # get networks
3814 vm_ip = None
3815 vm_mac = None
3816 networks = re.findall(
3817 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3818 result,
3819 )
3820
3821 for network in networks:
3822 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3823 vm_mac = mac_s.group(1) if mac_s else None
3824 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3825 vm_ip = ip_s.group(1) if ip_s else None
3826
3827 if vm_ip is None:
3828 if not nsx_edge_list:
3829 nsx_edge_list = self.get_edge_details()
3830 if nsx_edge_list is None:
3831 raise vimconn.VimConnException(
3832 "refresh_vms_status:"
3833 "Failed to get edge details from NSX Manager"
3834 )
3835
3836 if vm_mac is not None:
3837 vm_ip = self.get_ipaddr_from_NSXedge(
3838 nsx_edge_list, vm_mac
3839 )
3840
3841 net_s = re.search('network="(.*?)"', network)
3842 network_name = net_s.group(1) if net_s else None
3843 vm_net_id = self.get_network_id_by_name(network_name)
3844 interface = {
3845 "mac_address": vm_mac,
3846 "vim_net_id": vm_net_id,
3847 "vim_interface_id": vm_net_id,
3848 "ip_address": vm_ip,
3849 }
3850 vm_dict["interfaces"].append(interface)
3851
3852 # add a vm to vm dict
3853 vms_dict.setdefault(vmuuid, vm_dict)
3854 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3855 except Exception as exp:
3856 self.logger.debug("Error in response {}".format(exp))
3857 self.logger.debug(traceback.format_exc())
3858
3859 return vms_dict
3860
3861 def get_edge_details(self):
3862 """Get the NSX edge list from NSX Manager
3863 Returns list of NSX edges
3864 """
3865 edge_list = []
3866 rheaders = {"Content-Type": "application/xml"}
3867 nsx_api_url = "/api/4.0/edges"
3868
3869 self.logger.debug(
3870 "Get edge details from NSX Manager {} {}".format(
3871 self.nsx_manager, nsx_api_url
3872 )
3873 )
3874
3875 try:
3876 resp = requests.get(
3877 self.nsx_manager + nsx_api_url,
3878 auth=(self.nsx_user, self.nsx_password),
3879 verify=False,
3880 headers=rheaders,
3881 )
3882 if resp.status_code == requests.codes.ok:
3883 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3884 for edge_pages in paged_Edge_List:
3885 if edge_pages.tag == "edgePage":
3886 for edge_summary in edge_pages:
3887 if edge_summary.tag == "pagingInfo":
3888 for element in edge_summary:
3889 if (
3890 element.tag == "totalCount"
3891 and element.text == "0"
3892 ):
3893 raise vimconn.VimConnException(
3894 "get_edge_details: No NSX edges details found: {}".format(
3895 self.nsx_manager
3896 )
3897 )
3898
3899 if edge_summary.tag == "edgeSummary":
3900 for element in edge_summary:
3901 if element.tag == "id":
3902 edge_list.append(element.text)
3903 else:
3904 raise vimconn.VimConnException(
3905 "get_edge_details: No NSX edge details found: {}".format(
3906 self.nsx_manager
3907 )
3908 )
3909
3910 if not edge_list:
3911 raise vimconn.VimConnException(
3912 "get_edge_details: "
3913 "No NSX edge details found: {}".format(self.nsx_manager)
3914 )
3915 else:
3916 self.logger.debug(
3917 "get_edge_details: Found NSX edges {}".format(edge_list)
3918 )
3919
3920 return edge_list
3921 else:
3922 self.logger.debug(
3923 "get_edge_details: "
3924 "Failed to get NSX edge details from NSX Manager: {}".format(
3925 resp.content
3926 )
3927 )
3928
3929 return None
3930
3931 except Exception as exp:
3932 self.logger.debug(
3933 "get_edge_details: "
3934 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3935 )
3936 raise vimconn.VimConnException(
3937 "get_edge_details: "
3938 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3939 )
3940
3941 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3942 """Get IP address details from NSX edges, using the MAC address
3943 PARAMS: nsx_edges : List of NSX edges
3944 mac_address : Find IP address corresponding to this MAC address
3945 Returns: IP address corrresponding to the provided MAC address
3946 """
3947 ip_addr = None
3948 rheaders = {"Content-Type": "application/xml"}
3949
3950 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3951
3952 try:
3953 for edge in nsx_edges:
3954 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3955
3956 resp = requests.get(
3957 self.nsx_manager + nsx_api_url,
3958 auth=(self.nsx_user, self.nsx_password),
3959 verify=False,
3960 headers=rheaders,
3961 )
3962
3963 if resp.status_code == requests.codes.ok:
3964 dhcp_leases = XmlElementTree.fromstring(resp.text)
3965 for child in dhcp_leases:
3966 if child.tag == "dhcpLeaseInfo":
3967 dhcpLeaseInfo = child
3968 for leaseInfo in dhcpLeaseInfo:
3969 for elem in leaseInfo:
3970 if (elem.tag) == "macAddress":
3971 edge_mac_addr = elem.text
3972
3973 if (elem.tag) == "ipAddress":
3974 ip_addr = elem.text
3975
3976 if edge_mac_addr is not None:
3977 if edge_mac_addr == mac_address:
3978 self.logger.debug(
3979 "Found ip addr {} for mac {} at NSX edge {}".format(
3980 ip_addr, mac_address, edge
3981 )
3982 )
3983
3984 return ip_addr
3985 else:
3986 self.logger.debug(
3987 "get_ipaddr_from_NSXedge: "
3988 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3989 resp.content
3990 )
3991 )
3992
3993 self.logger.debug(
3994 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3995 )
3996
3997 return None
3998
3999 except XmlElementTree.ParseError as Err:
4000 self.logger.debug(
4001 "ParseError in response from NSX Manager {}".format(Err.message),
4002 exc_info=True,
4003 )
4004
4005 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
4006 """Send and action over a VM instance from VIM
4007 Returns the vm_id if the action was successfully sent to the VIM"""
4008
4009 self.logger.debug(
4010 "Received action for vm {} and action dict {}".format(
4011 vm__vim_uuid, action_dict
4012 )
4013 )
4014
4015 if vm__vim_uuid is None or action_dict is None:
4016 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
4017
4018 _, vdc = self.get_vdc_details()
4019 if vdc is None:
4020 raise vimconn.VimConnException(
4021 "Failed to get a reference of VDC for a tenant {}".format(
4022 self.tenant_name
4023 )
4024 )
4025
4026 vapp_name = self.get_namebyvappid(vm__vim_uuid)
4027 if vapp_name is None:
4028 self.logger.debug(
4029 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
4030 vm__vim_uuid
4031 )
4032 )
4033
4034 raise vimconn.VimConnException(
4035 "Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
4036 )
4037 else:
4038 self.logger.info(
4039 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid)
4040 )
4041
4042 try:
4043 vdc_obj = VDC(self.client, href=vdc.get("href"))
4044 vapp_resource = vdc_obj.get_vapp(vapp_name)
4045 vapp = VApp(self.client, resource=vapp_resource)
4046
4047 if "start" in action_dict:
4048 self.logger.info(
4049 "action_vminstance: Power on vApp: {}".format(vapp_name)
4050 )
4051 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4052 result = self.client.get_task_monitor().wait_for_success(
4053 task=poweron_task
4054 )
4055 self.instance_actions_result("start", result, vapp_name)
4056 elif "rebuild" in action_dict:
4057 self.logger.info(
4058 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
4059 )
4060 rebuild_task = vapp.deploy(power_on=True)
4061 result = self.client.get_task_monitor().wait_for_success(
4062 task=rebuild_task
4063 )
4064 self.instance_actions_result("rebuild", result, vapp_name)
4065 elif "pause" in action_dict:
4066 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
4067 pause_task = vapp.undeploy(action="suspend")
4068 result = self.client.get_task_monitor().wait_for_success(
4069 task=pause_task
4070 )
4071 self.instance_actions_result("pause", result, vapp_name)
4072 elif "resume" in action_dict:
4073 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
4074 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
4075 result = self.client.get_task_monitor().wait_for_success(
4076 task=poweron_task
4077 )
4078 self.instance_actions_result("resume", result, vapp_name)
4079 elif "shutoff" in action_dict or "shutdown" in action_dict:
4080 action_name, _ = list(action_dict.items())[0]
4081 self.logger.info(
4082 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
4083 )
4084 shutdown_task = vapp.shutdown()
4085 result = self.client.get_task_monitor().wait_for_success(
4086 task=shutdown_task
4087 )
4088 if action_name == "shutdown":
4089 self.instance_actions_result("shutdown", result, vapp_name)
4090 else:
4091 self.instance_actions_result("shutoff", result, vapp_name)
4092 elif "forceOff" in action_dict:
4093 result = vapp.undeploy(action="powerOff")
4094 self.instance_actions_result("forceOff", result, vapp_name)
4095 elif "reboot" in action_dict:
4096 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
4097 reboot_task = vapp.reboot()
4098 self.client.get_task_monitor().wait_for_success(task=reboot_task)
4099 else:
4100 raise vimconn.VimConnException(
4101 "action_vminstance: Invalid action {} or action is None.".format(
4102 action_dict
4103 )
4104 )
4105
4106 return vm__vim_uuid
4107 except Exception as exp:
4108 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
4109
4110 raise vimconn.VimConnException(
4111 "action_vminstance: Failed with Exception {}".format(exp)
4112 )
4113
4114 def instance_actions_result(self, action, result, vapp_name):
4115 if result.get("status") == "success":
4116 self.logger.info(
4117 "action_vminstance: Sucessfully {} the vApp: {}".format(
4118 action, vapp_name
4119 )
4120 )
4121 else:
4122 self.logger.error(
4123 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
4124 )
4125
4126 def get_vminstance_console(self, vm_id, console_type="novnc"):
4127 """
4128 Get a console for the virtual machine
4129 Params:
4130 vm_id: uuid of the VM
4131 console_type, can be:
4132 "novnc" (by default), "xvpvnc" for VNC types,
4133 "rdp-html5" for RDP types, "spice-html5" for SPICE types
4134 Returns dict with the console parameters:
4135 protocol: ssh, ftp, http, https, ...
4136 server: usually ip address
4137 port: the http, ssh, ... port
4138 suffix: extra text, e.g. the http path and query string
4139 """
4140 console_dict = {}
4141
4142 if console_type is None or console_type == "novnc":
4143 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
4144 self.url, vm_id
4145 )
4146 headers = {
4147 "Accept": "application/*+xml;version=" + API_VERSION,
4148 "x-vcloud-authorization": self.client._session.headers[
4149 "x-vcloud-authorization"
4150 ],
4151 }
4152 response = self.perform_request(
4153 req_type="POST", url=url_rest_call, headers=headers
4154 )
4155
4156 if response.status_code == 403:
4157 response = self.retry_rest("GET", url_rest_call)
4158
4159 if response.status_code != 200:
4160 self.logger.error(
4161 "REST call {} failed reason : {}"
4162 "status code : {}".format(
4163 url_rest_call, response.text, response.status_code
4164 )
4165 )
4166 raise vimconn.VimConnException(
4167 "get_vminstance_console : Failed to get " "VM Mks ticket details"
4168 )
4169
4170 s = re.search("<Host>(.*?)</Host>", response.text)
4171 console_dict["server"] = s.group(1) if s else None
4172 s1 = re.search("<Port>(\d+)</Port>", response.text)
4173 console_dict["port"] = s1.group(1) if s1 else None
4174 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
4175 self.url, vm_id
4176 )
4177 headers = {
4178 "Accept": "application/*+xml;version=" + API_VERSION,
4179 "x-vcloud-authorization": self.client._session.headers[
4180 "x-vcloud-authorization"
4181 ],
4182 }
4183 response = self.perform_request(
4184 req_type="POST", url=url_rest_call, headers=headers
4185 )
4186
4187 if response.status_code == 403:
4188 response = self.retry_rest("GET", url_rest_call)
4189
4190 if response.status_code != 200:
4191 self.logger.error(
4192 "REST call {} failed reason : {}"
4193 "status code : {}".format(
4194 url_rest_call, response.text, response.status_code
4195 )
4196 )
4197 raise vimconn.VimConnException(
4198 "get_vminstance_console : Failed to get " "VM console details"
4199 )
4200
4201 s = re.search(">.*?/(vm-\d+.*)</", response.text)
4202 console_dict["suffix"] = s.group(1) if s else None
4203 console_dict["protocol"] = "https"
4204
4205 return console_dict
4206
4207 # NOT USED METHODS in current version
4208
4209 def host_vim2gui(self, host, server_dict):
4210 """Transform host dictionary from VIM format to GUI format,
4211 and append to the server_dict
4212 """
4213 raise vimconn.VimConnNotImplemented("Should have implemented this")
4214
4215 def get_hosts_info(self):
4216 """Get the information of deployed hosts
4217 Returns the hosts content"""
4218 raise vimconn.VimConnNotImplemented("Should have implemented this")
4219
4220 def get_hosts(self, vim_tenant):
4221 """Get the hosts and deployed instances
4222 Returns the hosts content"""
4223 raise vimconn.VimConnNotImplemented("Should have implemented this")
4224
4225 def get_processor_rankings(self):
4226 """Get the processor rankings in the VIM database"""
4227 raise vimconn.VimConnNotImplemented("Should have implemented this")
4228
4229 def new_host(self, host_data):
4230 """Adds a new host to VIM"""
4231 """Returns status code of the VIM response"""
4232 raise vimconn.VimConnNotImplemented("Should have implemented this")
4233
4234 def new_external_port(self, port_data):
4235 """Adds a external port to VIM"""
4236 """Returns the port identifier"""
4237 raise vimconn.VimConnNotImplemented("Should have implemented this")
4238
4239 def new_external_network(self, net_name, net_type):
4240 """Adds a external network to VIM (shared)"""
4241 """Returns the network identifier"""
4242 raise vimconn.VimConnNotImplemented("Should have implemented this")
4243
4244 def connect_port_network(self, port_id, network_id, admin=False):
4245 """Connects a external port to a network"""
4246 """Returns status code of the VIM response"""
4247 raise vimconn.VimConnNotImplemented("Should have implemented this")
4248
4249 def new_vminstancefromJSON(self, vm_data):
4250 """Adds a VM instance to VIM"""
4251 """Returns the instance identifier"""
4252 raise vimconn.VimConnNotImplemented("Should have implemented this")
4253
4254 def get_network_name_by_id(self, network_uuid=None):
4255 """Method gets vcloud director network named based on supplied uuid.
4256
4257 Args:
4258 network_uuid: network_id
4259
4260 Returns:
4261 The return network name.
4262 """
4263
4264 if not network_uuid:
4265 return None
4266
4267 try:
4268 org_dict = self.get_org(self.org_uuid)
4269 if "networks" in org_dict:
4270 org_network_dict = org_dict["networks"]
4271
4272 for net_uuid in org_network_dict:
4273 if net_uuid == network_uuid:
4274 return org_network_dict[net_uuid]
4275 except Exception:
4276 self.logger.debug("Exception in get_network_name_by_id")
4277 self.logger.debug(traceback.format_exc())
4278
4279 return None
4280
4281 def get_network_id_by_name(self, network_name=None):
4282 """Method gets vcloud director network uuid based on supplied name.
4283
4284 Args:
4285 network_name: network_name
4286 Returns:
4287 The return network uuid.
4288 network_uuid: network_id
4289 """
4290 if not network_name:
4291 self.logger.debug("get_network_id_by_name() : Network name is empty")
4292 return None
4293
4294 try:
4295 org_dict = self.get_org(self.org_uuid)
4296 if org_dict and "networks" in org_dict:
4297 org_network_dict = org_dict["networks"]
4298
4299 for net_uuid, net_name in org_network_dict.items():
4300 if net_name == network_name:
4301 return net_uuid
4302
4303 except KeyError as exp:
4304 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4305
4306 return None
4307
4308 def get_physical_network_by_name(self, physical_network_name):
4309 """
4310 Methos returns uuid of physical network which passed
4311 Args:
4312 physical_network_name: physical network name
4313 Returns:
4314 UUID of physical_network_name
4315 """
4316 try:
4317 client_as_admin = self.connect_as_admin()
4318
4319 if not client_as_admin:
4320 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4321
4322 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4323 vm_list_rest_call = "".join(url_list)
4324
4325 if client_as_admin._session:
4326 headers = {
4327 "Accept": "application/*+xml;version=" + API_VERSION,
4328 "x-vcloud-authorization": client_as_admin._session.headers[
4329 "x-vcloud-authorization"
4330 ],
4331 }
4332 response = self.perform_request(
4333 req_type="GET", url=vm_list_rest_call, headers=headers
4334 )
4335 provider_network = None
4336 available_network = None
4337 # add_vdc_rest_url = None
4338
4339 if response.status_code != requests.codes.ok:
4340 self.logger.debug(
4341 "REST API call {} failed. Return status code {}".format(
4342 vm_list_rest_call, response.status_code
4343 )
4344 )
4345 return None
4346 else:
4347 try:
4348 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4349 for child in vm_list_xmlroot:
4350 if child.tag.split("}")[1] == "ProviderVdcReference":
4351 provider_network = child.attrib.get("href")
4352 # application/vnd.vmware.admin.providervdc+xml
4353
4354 if child.tag.split("}")[1] == "Link":
4355 if (
4356 child.attrib.get("type")
4357 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4358 and child.attrib.get("rel") == "add"
4359 ):
4360 child.attrib.get("href")
4361 except Exception:
4362 self.logger.debug(
4363 "Failed parse respond for rest api call {}".format(
4364 vm_list_rest_call
4365 )
4366 )
4367 self.logger.debug("Respond body {}".format(response.text))
4368
4369 return None
4370
4371 # find pvdc provided available network
4372 response = self.perform_request(
4373 req_type="GET", url=provider_network, headers=headers
4374 )
4375
4376 if response.status_code != requests.codes.ok:
4377 self.logger.debug(
4378 "REST API call {} failed. Return status code {}".format(
4379 vm_list_rest_call, response.status_code
4380 )
4381 )
4382
4383 return None
4384
4385 try:
4386 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4387 for child in vm_list_xmlroot.iter():
4388 if child.tag.split("}")[1] == "AvailableNetworks":
4389 for networks in child.iter():
4390 if (
4391 networks.attrib.get("href") is not None
4392 and networks.attrib.get("name") is not None
4393 ):
4394 if (
4395 networks.attrib.get("name")
4396 == physical_network_name
4397 ):
4398 network_url = networks.attrib.get("href")
4399 available_network = network_url[
4400 network_url.rindex("/") + 1 :
4401 ]
4402 break
4403 except Exception:
4404 return None
4405
4406 return available_network
4407 except Exception as e:
4408 self.logger.error("Error while getting physical network: {}".format(e))
4409
4410 def list_org_action(self):
4411 """
4412 Method leverages vCloud director and query for available organization for particular user
4413
4414 Args:
4415 vca - is active VCA connection.
4416 vdc_name - is a vdc name that will be used to query vms action
4417
4418 Returns:
4419 The return XML respond
4420 """
4421 url_list = [self.url, "/api/org"]
4422 vm_list_rest_call = "".join(url_list)
4423
4424 if self.client._session:
4425 headers = {
4426 "Accept": "application/*+xml;version=" + API_VERSION,
4427 "x-vcloud-authorization": self.client._session.headers[
4428 "x-vcloud-authorization"
4429 ],
4430 }
4431
4432 response = self.perform_request(
4433 req_type="GET", url=vm_list_rest_call, headers=headers
4434 )
4435
4436 if response.status_code == 403:
4437 response = self.retry_rest("GET", vm_list_rest_call)
4438
4439 if response.status_code == requests.codes.ok:
4440 return response.text
4441
4442 return None
4443
4444 def get_org_action(self, org_uuid=None):
4445 """
4446 Method leverages vCloud director and retrieve available object for organization.
4447
4448 Args:
4449 org_uuid - vCD organization uuid
4450 self.client - is active connection.
4451
4452 Returns:
4453 The return XML respond
4454 """
4455
4456 if org_uuid is None:
4457 return None
4458
4459 url_list = [self.url, "/api/org/", org_uuid]
4460 vm_list_rest_call = "".join(url_list)
4461
4462 if self.client._session:
4463 headers = {
4464 "Accept": "application/*+xml;version=" + API_VERSION,
4465 "x-vcloud-authorization": self.client._session.headers[
4466 "x-vcloud-authorization"
4467 ],
4468 }
4469
4470 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4471 response = self.perform_request(
4472 req_type="GET", url=vm_list_rest_call, headers=headers
4473 )
4474
4475 if response.status_code == 403:
4476 response = self.retry_rest("GET", vm_list_rest_call)
4477
4478 if response.status_code == requests.codes.ok:
4479 return response.text
4480
4481 return None
4482
4483 def get_org(self, org_uuid=None):
4484 """
4485 Method retrieves available organization in vCloud Director
4486
4487 Args:
4488 org_uuid - is a organization uuid.
4489
4490 Returns:
4491 The return dictionary with following key
4492 "network" - for network list under the org
4493 "catalogs" - for network list under the org
4494 "vdcs" - for vdc list under org
4495 """
4496
4497 org_dict = {}
4498
4499 if org_uuid is None:
4500 return org_dict
4501
4502 content = self.get_org_action(org_uuid=org_uuid)
4503 try:
4504 vdc_list = {}
4505 network_list = {}
4506 catalog_list = {}
4507 vm_list_xmlroot = XmlElementTree.fromstring(content)
4508 for child in vm_list_xmlroot:
4509 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4510 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4511 "name"
4512 ]
4513 org_dict["vdcs"] = vdc_list
4514
4515 if (
4516 child.attrib["type"]
4517 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4518 ):
4519 network_list[
4520 child.attrib["href"].split("/")[-1:][0]
4521 ] = child.attrib["name"]
4522 org_dict["networks"] = network_list
4523
4524 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4525 catalog_list[
4526 child.attrib["href"].split("/")[-1:][0]
4527 ] = child.attrib["name"]
4528 org_dict["catalogs"] = catalog_list
4529 except Exception:
4530 pass
4531
4532 return org_dict
4533
4534 def get_org_list(self):
4535 """
4536 Method retrieves available organization in vCloud Director
4537
4538 Args:
4539 vca - is active VCA connection.
4540
4541 Returns:
4542 The return dictionary and key for each entry VDC UUID
4543 """
4544 org_dict = {}
4545
4546 content = self.list_org_action()
4547 try:
4548 vm_list_xmlroot = XmlElementTree.fromstring(content)
4549
4550 for vm_xml in vm_list_xmlroot:
4551 if vm_xml.tag.split("}")[1] == "Org":
4552 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4553 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4554 except Exception:
4555 pass
4556
4557 return org_dict
4558
4559 def vms_view_action(self, vdc_name=None):
4560 """Method leverages vCloud director vms query call
4561
4562 Args:
4563 vca - is active VCA connection.
4564 vdc_name - is a vdc name that will be used to query vms action
4565
4566 Returns:
4567 The return XML respond
4568 """
4569 vca = self.connect()
4570 if vdc_name is None:
4571 return None
4572
4573 url_list = [vca.host, "/api/vms/query"]
4574 vm_list_rest_call = "".join(url_list)
4575
4576 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4577 refs = [
4578 ref
4579 for ref in vca.vcloud_session.organization.Link
4580 if ref.name == vdc_name
4581 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4582 ]
4583
4584 if len(refs) == 1:
4585 response = self.perform_request(
4586 req_type="GET",
4587 url=vm_list_rest_call,
4588 headers=vca.vcloud_session.get_vcloud_headers(),
4589 verify=vca.verify,
4590 logger=vca.logger,
4591 )
4592
4593 if response.status_code == requests.codes.ok:
4594 return response.text
4595
4596 return None
4597
4598 def get_vapp_list(self, vdc_name=None):
4599 """
4600 Method retrieves vApp list deployed vCloud director and returns a dictionary
4601 contains a list of all vapp deployed for queried VDC.
4602 The key for a dictionary is vApp UUID
4603
4604
4605 Args:
4606 vca - is active VCA connection.
4607 vdc_name - is a vdc name that will be used to query vms action
4608
4609 Returns:
4610 The return dictionary and key for each entry vapp UUID
4611 """
4612 vapp_dict = {}
4613
4614 if vdc_name is None:
4615 return vapp_dict
4616
4617 content = self.vms_view_action(vdc_name=vdc_name)
4618 try:
4619 vm_list_xmlroot = XmlElementTree.fromstring(content)
4620 for vm_xml in vm_list_xmlroot:
4621 if vm_xml.tag.split("}")[1] == "VMRecord":
4622 if vm_xml.attrib["isVAppTemplate"] == "true":
4623 rawuuid = vm_xml.attrib["container"].split("/")[-1:]
4624 if "vappTemplate-" in rawuuid[0]:
4625 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4626 # vm and use raw UUID as key
4627 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
4628 except Exception:
4629 pass
4630
4631 return vapp_dict
4632
4633 def get_vm_list(self, vdc_name=None):
4634 """
4635 Method retrieves VM's list deployed vCloud director. It returns a dictionary
4636 contains a list of all VM's deployed for queried VDC.
4637 The key for a dictionary is VM UUID
4638
4639
4640 Args:
4641 vca - is active VCA connection.
4642 vdc_name - is a vdc name that will be used to query vms action
4643
4644 Returns:
4645 The return dictionary and key for each entry vapp UUID
4646 """
4647 vm_dict = {}
4648
4649 if vdc_name is None:
4650 return vm_dict
4651
4652 content = self.vms_view_action(vdc_name=vdc_name)
4653 try:
4654 vm_list_xmlroot = XmlElementTree.fromstring(content)
4655 for vm_xml in vm_list_xmlroot:
4656 if vm_xml.tag.split("}")[1] == "VMRecord":
4657 if vm_xml.attrib["isVAppTemplate"] == "false":
4658 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4659 if "vm-" in rawuuid[0]:
4660 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4661 # vm and use raw UUID as key
4662 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4663 except Exception:
4664 pass
4665
4666 return vm_dict
4667
4668 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4669 """
4670 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4671 contains a list of all VM's deployed for queried VDC.
4672 The key for a dictionary is VM UUID
4673
4674
4675 Args:
4676 vca - is active VCA connection.
4677 vdc_name - is a vdc name that will be used to query vms action
4678
4679 Returns:
4680 The return dictionary and key for each entry vapp UUID
4681 """
4682 vm_dict = {}
4683 vca = self.connect()
4684
4685 if not vca:
4686 raise vimconn.VimConnConnectionException("self.connect() is failed")
4687
4688 if vdc_name is None:
4689 return vm_dict
4690
4691 content = self.vms_view_action(vdc_name=vdc_name)
4692 try:
4693 vm_list_xmlroot = XmlElementTree.fromstring(content)
4694 for vm_xml in vm_list_xmlroot:
4695 if (
4696 vm_xml.tag.split("}")[1] == "VMRecord"
4697 and vm_xml.attrib["isVAppTemplate"] == "false"
4698 ):
4699 # lookup done by UUID
4700 if isuuid:
4701 if vapp_name in vm_xml.attrib["container"]:
4702 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4703 if "vm-" in rawuuid[0]:
4704 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4705 break
4706 # lookup done by Name
4707 else:
4708 if vapp_name in vm_xml.attrib["name"]:
4709 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4710 if "vm-" in rawuuid[0]:
4711 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4712 break
4713 except Exception:
4714 pass
4715
4716 return vm_dict
4717
4718 def get_network_action(self, network_uuid=None):
4719 """
4720 Method leverages vCloud director and query network based on network uuid
4721
4722 Args:
4723 vca - is active VCA connection.
4724 network_uuid - is a network uuid
4725
4726 Returns:
4727 The return XML respond
4728 """
4729 if network_uuid is None:
4730 return None
4731
4732 url_list = [self.url, "/api/network/", network_uuid]
4733 vm_list_rest_call = "".join(url_list)
4734
4735 if self.client._session:
4736 headers = {
4737 "Accept": "application/*+xml;version=" + API_VERSION,
4738 "x-vcloud-authorization": self.client._session.headers[
4739 "x-vcloud-authorization"
4740 ],
4741 }
4742 response = self.perform_request(
4743 req_type="GET", url=vm_list_rest_call, headers=headers
4744 )
4745
4746 # Retry login if session expired & retry sending request
4747 if response.status_code == 403:
4748 response = self.retry_rest("GET", vm_list_rest_call)
4749
4750 if response.status_code == requests.codes.ok:
4751 return response.text
4752
4753 return None
4754
4755 def get_vcd_network(self, network_uuid=None):
4756 """
4757 Method retrieves available network from vCloud Director
4758
4759 Args:
4760 network_uuid - is VCD network UUID
4761
4762 Each element serialized as key : value pair
4763
4764 Following keys available for access. network_configuration['Gateway'}
4765 <Configuration>
4766 <IpScopes>
4767 <IpScope>
4768 <IsInherited>true</IsInherited>
4769 <Gateway>172.16.252.100</Gateway>
4770 <Netmask>255.255.255.0</Netmask>
4771 <Dns1>172.16.254.201</Dns1>
4772 <Dns2>172.16.254.202</Dns2>
4773 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4774 <IsEnabled>true</IsEnabled>
4775 <IpRanges>
4776 <IpRange>
4777 <StartAddress>172.16.252.1</StartAddress>
4778 <EndAddress>172.16.252.99</EndAddress>
4779 </IpRange>
4780 </IpRanges>
4781 </IpScope>
4782 </IpScopes>
4783 <FenceMode>bridged</FenceMode>
4784
4785 Returns:
4786 The return dictionary and key for each entry vapp UUID
4787 """
4788 network_configuration = {}
4789
4790 if network_uuid is None:
4791 return network_uuid
4792
4793 try:
4794 content = self.get_network_action(network_uuid=network_uuid)
4795 if content is not None:
4796 vm_list_xmlroot = XmlElementTree.fromstring(content)
4797 network_configuration["status"] = vm_list_xmlroot.get("status")
4798 network_configuration["name"] = vm_list_xmlroot.get("name")
4799 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4800
4801 for child in vm_list_xmlroot:
4802 if child.tag.split("}")[1] == "IsShared":
4803 network_configuration["isShared"] = child.text.strip()
4804
4805 if child.tag.split("}")[1] == "Configuration":
4806 for configuration in child.iter():
4807 tagKey = configuration.tag.split("}")[1].strip()
4808 if tagKey != "":
4809 network_configuration[
4810 tagKey
4811 ] = configuration.text.strip()
4812 except Exception as exp:
4813 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4814
4815 raise vimconn.VimConnException(
4816 "get_vcd_network: Failed with Exception {}".format(exp)
4817 )
4818
4819 return network_configuration
4820
4821 def delete_network_action(self, network_uuid=None):
4822 """
4823 Method delete given network from vCloud director
4824
4825 Args:
4826 network_uuid - is a network uuid that client wish to delete
4827
4828 Returns:
4829 The return None or XML respond or false
4830 """
4831 client = self.connect_as_admin()
4832
4833 if not client:
4834 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4835
4836 if network_uuid is None:
4837 return False
4838
4839 url_list = [self.url, "/api/admin/network/", network_uuid]
4840 vm_list_rest_call = "".join(url_list)
4841
4842 if client._session:
4843 headers = {
4844 "Accept": "application/*+xml;version=" + API_VERSION,
4845 "x-vcloud-authorization": client._session.headers[
4846 "x-vcloud-authorization"
4847 ],
4848 }
4849 response = self.perform_request(
4850 req_type="DELETE", url=vm_list_rest_call, headers=headers
4851 )
4852
4853 if response.status_code == 202:
4854 return True
4855
4856 return False
4857
4858 def create_network(
4859 self,
4860 network_name=None,
4861 net_type="bridge",
4862 parent_network_uuid=None,
4863 ip_profile=None,
4864 isshared="true",
4865 ):
4866 """
4867 Method create network in vCloud director
4868
4869 Args:
4870 network_name - is network name to be created.
4871 net_type - can be 'bridge','data','ptp','mgmt'.
4872 ip_profile is a dict containing the IP parameters of the network
4873 isshared - is a boolean
4874 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4875 It optional attribute. by default if no parent network indicate the first available will be used.
4876
4877 Returns:
4878 The return network uuid or return None
4879 """
4880 new_network_name = [network_name, "-", str(uuid.uuid4())]
4881 content = self.create_network_rest(
4882 network_name="".join(new_network_name),
4883 ip_profile=ip_profile,
4884 net_type=net_type,
4885 parent_network_uuid=parent_network_uuid,
4886 isshared=isshared,
4887 )
4888
4889 if content is None:
4890 self.logger.debug("Failed create network {}.".format(network_name))
4891
4892 return None
4893
4894 try:
4895 vm_list_xmlroot = XmlElementTree.fromstring(content)
4896 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4897 if len(vcd_uuid) == 4:
4898 self.logger.info(
4899 "Created new network name: {} uuid: {}".format(
4900 network_name, vcd_uuid[3]
4901 )
4902 )
4903
4904 return vcd_uuid[3]
4905 except Exception:
4906 self.logger.debug("Failed create network {}".format(network_name))
4907
4908 return None
4909
4910 def create_network_rest(
4911 self,
4912 network_name=None,
4913 net_type="bridge",
4914 parent_network_uuid=None,
4915 ip_profile=None,
4916 isshared="true",
4917 ):
4918 """
4919 Method create network in vCloud director
4920
4921 Args:
4922 network_name - is network name to be created.
4923 net_type - can be 'bridge','data','ptp','mgmt'.
4924 ip_profile is a dict containing the IP parameters of the network
4925 isshared - is a boolean
4926 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4927 It optional attribute. by default if no parent network indicate the first available will be used.
4928
4929 Returns:
4930 The return network uuid or return None
4931 """
4932 client_as_admin = self.connect_as_admin()
4933
4934 if not client_as_admin:
4935 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4936
4937 if network_name is None:
4938 return None
4939
4940 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4941 vm_list_rest_call = "".join(url_list)
4942
4943 if client_as_admin._session:
4944 headers = {
4945 "Accept": "application/*+xml;version=" + API_VERSION,
4946 "x-vcloud-authorization": client_as_admin._session.headers[
4947 "x-vcloud-authorization"
4948 ],
4949 }
4950 response = self.perform_request(
4951 req_type="GET", url=vm_list_rest_call, headers=headers
4952 )
4953 provider_network = None
4954 available_networks = None
4955 add_vdc_rest_url = None
4956
4957 if response.status_code != requests.codes.ok:
4958 self.logger.debug(
4959 "REST API call {} failed. Return status code {}".format(
4960 vm_list_rest_call, response.status_code
4961 )
4962 )
4963
4964 return None
4965 else:
4966 try:
4967 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4968 for child in vm_list_xmlroot:
4969 if child.tag.split("}")[1] == "ProviderVdcReference":
4970 provider_network = child.attrib.get("href")
4971 # application/vnd.vmware.admin.providervdc+xml
4972
4973 if child.tag.split("}")[1] == "Link":
4974 if (
4975 child.attrib.get("type")
4976 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4977 and child.attrib.get("rel") == "add"
4978 ):
4979 add_vdc_rest_url = child.attrib.get("href")
4980 except Exception:
4981 self.logger.debug(
4982 "Failed parse respond for rest api call {}".format(
4983 vm_list_rest_call
4984 )
4985 )
4986 self.logger.debug("Respond body {}".format(response.text))
4987
4988 return None
4989
4990 # find pvdc provided available network
4991 response = self.perform_request(
4992 req_type="GET", url=provider_network, headers=headers
4993 )
4994
4995 if response.status_code != requests.codes.ok:
4996 self.logger.debug(
4997 "REST API call {} failed. Return status code {}".format(
4998 vm_list_rest_call, response.status_code
4999 )
5000 )
5001
5002 return None
5003
5004 if parent_network_uuid is None:
5005 try:
5006 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5007 for child in vm_list_xmlroot.iter():
5008 if child.tag.split("}")[1] == "AvailableNetworks":
5009 for networks in child.iter():
5010 # application/vnd.vmware.admin.network+xml
5011 if networks.attrib.get("href") is not None:
5012 available_networks = networks.attrib.get("href")
5013 break
5014 except Exception:
5015 return None
5016
5017 try:
5018 # Configure IP profile of the network
5019 ip_profile = (
5020 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
5021 )
5022
5023 if (
5024 "subnet_address" not in ip_profile
5025 or ip_profile["subnet_address"] is None
5026 ):
5027 subnet_rand = random.randint(0, 255)
5028 ip_base = "192.168.{}.".format(subnet_rand)
5029 ip_profile["subnet_address"] = ip_base + "0/24"
5030 else:
5031 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
5032
5033 if (
5034 "gateway_address" not in ip_profile
5035 or ip_profile["gateway_address"] is None
5036 ):
5037 ip_profile["gateway_address"] = ip_base + "1"
5038
5039 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
5040 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
5041
5042 if (
5043 "dhcp_enabled" not in ip_profile
5044 or ip_profile["dhcp_enabled"] is None
5045 ):
5046 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
5047
5048 if (
5049 "dhcp_start_address" not in ip_profile
5050 or ip_profile["dhcp_start_address"] is None
5051 ):
5052 ip_profile["dhcp_start_address"] = ip_base + "3"
5053
5054 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
5055 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
5056
5057 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
5058 ip_profile["dns_address"] = ip_base + "2"
5059
5060 gateway_address = ip_profile["gateway_address"]
5061 dhcp_count = int(ip_profile["dhcp_count"])
5062 subnet_address = self.convert_cidr_to_netmask(
5063 ip_profile["subnet_address"]
5064 )
5065
5066 if ip_profile["dhcp_enabled"] is True:
5067 dhcp_enabled = "true"
5068 else:
5069 dhcp_enabled = "false"
5070
5071 dhcp_start_address = ip_profile["dhcp_start_address"]
5072
5073 # derive dhcp_end_address from dhcp_start_address & dhcp_count
5074 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
5075 end_ip_int += dhcp_count - 1
5076 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
5077
5078 # ip_version = ip_profile['ip_version']
5079 dns_address = ip_profile["dns_address"]
5080 except KeyError as exp:
5081 self.logger.debug("Create Network REST: Key error {}".format(exp))
5082
5083 raise vimconn.VimConnException(
5084 "Create Network REST: Key error{}".format(exp)
5085 )
5086
5087 # either use client provided UUID or search for a first available
5088 # if both are not defined we return none
5089 if parent_network_uuid is not None:
5090 provider_network = None
5091 available_networks = None
5092 add_vdc_rest_url = None
5093 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
5094 add_vdc_rest_url = "".join(url_list)
5095 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
5096 available_networks = "".join(url_list)
5097
5098 # Creating all networks as Direct Org VDC type networks.
5099 # Unused in case of Underlay (data/ptp) network interface.
5100 fence_mode = "isolated"
5101 is_inherited = "false"
5102 dns_list = dns_address.split(";")
5103 dns1 = dns_list[0]
5104 dns2_text = ""
5105
5106 if len(dns_list) >= 2:
5107 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
5108 dns_list[1]
5109 )
5110
5111 if net_type == "isolated":
5112 fence_mode = "isolated"
5113 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5114 <Description>Openmano created</Description>
5115 <Configuration>
5116 <IpScopes>
5117 <IpScope>
5118 <IsInherited>{1:s}</IsInherited>
5119 <Gateway>{2:s}</Gateway>
5120 <Netmask>{3:s}</Netmask>
5121 <Dns1>{4:s}</Dns1>{5:s}
5122 <IsEnabled>{6:s}</IsEnabled>
5123 <IpRanges>
5124 <IpRange>
5125 <StartAddress>{7:s}</StartAddress>
5126 <EndAddress>{8:s}</EndAddress>
5127 </IpRange>
5128 </IpRanges>
5129 </IpScope>
5130 </IpScopes>
5131 <FenceMode>{9:s}</FenceMode>
5132 </Configuration>
5133 <IsShared>{10:s}</IsShared>
5134 </OrgVdcNetwork> """.format(
5135 escape(network_name),
5136 is_inherited,
5137 gateway_address,
5138 subnet_address,
5139 dns1,
5140 dns2_text,
5141 dhcp_enabled,
5142 dhcp_start_address,
5143 dhcp_end_address,
5144 fence_mode,
5145 isshared,
5146 )
5147 else:
5148 fence_mode = "bridged"
5149 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5150 <Description>Openmano created</Description>
5151 <Configuration>
5152 <IpScopes>
5153 <IpScope>
5154 <IsInherited>{1:s}</IsInherited>
5155 <Gateway>{2:s}</Gateway>
5156 <Netmask>{3:s}</Netmask>
5157 <Dns1>{4:s}</Dns1>{5:s}
5158 <IsEnabled>{6:s}</IsEnabled>
5159 <IpRanges>
5160 <IpRange>
5161 <StartAddress>{7:s}</StartAddress>
5162 <EndAddress>{8:s}</EndAddress>
5163 </IpRange>
5164 </IpRanges>
5165 </IpScope>
5166 </IpScopes>
5167 <ParentNetwork href="{9:s}"/>
5168 <FenceMode>{10:s}</FenceMode>
5169 </Configuration>
5170 <IsShared>{11:s}</IsShared>
5171 </OrgVdcNetwork> """.format(
5172 escape(network_name),
5173 is_inherited,
5174 gateway_address,
5175 subnet_address,
5176 dns1,
5177 dns2_text,
5178 dhcp_enabled,
5179 dhcp_start_address,
5180 dhcp_end_address,
5181 available_networks,
5182 fence_mode,
5183 isshared,
5184 )
5185
5186 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
5187 try:
5188 response = self.perform_request(
5189 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
5190 )
5191
5192 if response.status_code != 201:
5193 self.logger.debug(
5194 "Create Network POST REST API call failed. "
5195 "Return status code {}, response.text: {}".format(
5196 response.status_code, response.text
5197 )
5198 )
5199 else:
5200 network_task = self.get_task_from_response(response.text)
5201 self.logger.debug(
5202 "Create Network REST : Waiting for Network creation complete"
5203 )
5204 time.sleep(5)
5205 result = self.client.get_task_monitor().wait_for_success(
5206 task=network_task
5207 )
5208
5209 if result.get("status") == "success":
5210 return response.text
5211 else:
5212 self.logger.debug(
5213 "create_network_rest task failed. Network Create response : {}".format(
5214 response.text
5215 )
5216 )
5217 except Exception as exp:
5218 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
5219
5220 return None
5221
5222 def convert_cidr_to_netmask(self, cidr_ip=None):
5223 """
5224 Method sets convert CIDR netmask address to normal IP format
5225 Args:
5226 cidr_ip : CIDR IP address
5227 Returns:
5228 netmask : Converted netmask
5229 """
5230 if cidr_ip is not None:
5231 if "/" in cidr_ip:
5232 _, net_bits = cidr_ip.split("/")
5233 netmask = socket.inet_ntoa(
5234 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
5235 )
5236 else:
5237 netmask = cidr_ip
5238
5239 return netmask
5240
5241 return None
5242
5243 def get_provider_rest(self, vca=None):
5244 """
5245 Method gets provider vdc view from vcloud director
5246
5247 Args:
5248 network_name - is network name to be created.
5249 parent_network_uuid - is parent provider vdc network that will be used for mapping.
5250 It optional attribute. by default if no parent network indicate the first available will be used.
5251
5252 Returns:
5253 The return xml content of respond or None
5254 """
5255 url_list = [self.url, "/api/admin"]
5256
5257 if vca:
5258 headers = {
5259 "Accept": "application/*+xml;version=" + API_VERSION,
5260 "x-vcloud-authorization": self.client._session.headers[
5261 "x-vcloud-authorization"
5262 ],
5263 }
5264 response = self.perform_request(
5265 req_type="GET", url="".join(url_list), headers=headers
5266 )
5267
5268 if response.status_code == requests.codes.ok:
5269 return response.text
5270
5271 return None
5272
5273 def create_vdc(self, vdc_name=None):
5274 vdc_dict = {}
5275 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
5276
5277 if xml_content is not None:
5278 try:
5279 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
5280 for child in task_resp_xmlroot:
5281 if child.tag.split("}")[1] == "Owner":
5282 vdc_id = child.attrib.get("href").split("/")[-1]
5283 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
5284
5285 return vdc_dict
5286 except Exception:
5287 self.logger.debug("Respond body {}".format(xml_content))
5288
5289 return None
5290
5291 def create_vdc_from_tmpl_rest(self, vdc_name=None):
5292 """
5293 Method create vdc in vCloud director based on VDC template.
5294 it uses pre-defined template.
5295
5296 Args:
5297 vdc_name - name of a new vdc.
5298
5299 Returns:
5300 The return xml content of respond or None
5301 """
5302 # pre-requesite atleast one vdc template should be available in vCD
5303 self.logger.info("Creating new vdc {}".format(vdc_name))
5304 vca = self.connect_as_admin()
5305
5306 if not vca:
5307 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5308
5309 if vdc_name is None:
5310 return None
5311
5312 url_list = [self.url, "/api/vdcTemplates"]
5313 vm_list_rest_call = "".join(url_list)
5314 headers = {
5315 "Accept": "application/*+xml;version=" + API_VERSION,
5316 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
5317 }
5318 response = self.perform_request(
5319 req_type="GET", url=vm_list_rest_call, headers=headers
5320 )
5321
5322 # container url to a template
5323 vdc_template_ref = None
5324 try:
5325 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5326 for child in vm_list_xmlroot:
5327 # application/vnd.vmware.admin.providervdc+xml
5328 # we need find a template from witch we instantiate VDC
5329 if child.tag.split("}")[1] == "VdcTemplate":
5330 if (
5331 child.attrib.get("type")
5332 == "application/vnd.vmware.admin.vdcTemplate+xml"
5333 ):
5334 vdc_template_ref = child.attrib.get("href")
5335 except Exception:
5336 self.logger.debug(
5337 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5338 )
5339 self.logger.debug("Respond body {}".format(response.text))
5340
5341 return None
5342
5343 # if we didn't found required pre defined template we return None
5344 if vdc_template_ref is None:
5345 return None
5346
5347 try:
5348 # instantiate vdc
5349 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
5350 vm_list_rest_call = "".join(url_list)
5351 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5352 <Source href="{1:s}"></Source>
5353 <Description>opnemano</Description>
5354 </InstantiateVdcTemplateParams>""".format(
5355 vdc_name, vdc_template_ref
5356 )
5357 headers[
5358 "Content-Type"
5359 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
5360 response = self.perform_request(
5361 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
5362 )
5363 vdc_task = self.get_task_from_response(response.text)
5364 self.client.get_task_monitor().wait_for_success(task=vdc_task)
5365
5366 # if we all ok we respond with content otherwise by default None
5367 if response.status_code >= 200 and response.status_code < 300:
5368 return response.text
5369
5370 return None
5371 except Exception:
5372 self.logger.debug(
5373 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5374 )
5375 self.logger.debug("Respond body {}".format(response.text))
5376
5377 return None
5378
5379 def create_vdc_rest(self, vdc_name=None):
5380 """
5381 Method create network in vCloud director
5382
5383 Args:
5384 vdc_name - vdc name to be created
5385 Returns:
5386 The return response
5387 """
5388 self.logger.info("Creating new vdc {}".format(vdc_name))
5389 vca = self.connect_as_admin()
5390
5391 if not vca:
5392 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5393
5394 if vdc_name is None:
5395 return None
5396
5397 url_list = [self.url, "/api/admin/org/", self.org_uuid]
5398 vm_list_rest_call = "".join(url_list)
5399
5400 if vca._session:
5401 headers = {
5402 "Accept": "application/*+xml;version=" + API_VERSION,
5403 "x-vcloud-authorization": self.client._session.headers[
5404 "x-vcloud-authorization"
5405 ],
5406 }
5407 response = self.perform_request(
5408 req_type="GET", url=vm_list_rest_call, headers=headers
5409 )
5410 provider_vdc_ref = None
5411 add_vdc_rest_url = None
5412 # available_networks = None
5413
5414 if response.status_code != requests.codes.ok:
5415 self.logger.debug(
5416 "REST API call {} failed. Return status code {}".format(
5417 vm_list_rest_call, response.status_code
5418 )
5419 )
5420
5421 return None
5422 else:
5423 try:
5424 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5425 for child in vm_list_xmlroot:
5426 # application/vnd.vmware.admin.providervdc+xml
5427 if child.tag.split("}")[1] == "Link":
5428 if (
5429 child.attrib.get("type")
5430 == "application/vnd.vmware.admin.createVdcParams+xml"
5431 and child.attrib.get("rel") == "add"
5432 ):
5433 add_vdc_rest_url = child.attrib.get("href")
5434 except Exception:
5435 self.logger.debug(
5436 "Failed parse respond for rest api call {}".format(
5437 vm_list_rest_call
5438 )
5439 )
5440 self.logger.debug("Respond body {}".format(response.text))
5441
5442 return None
5443
5444 response = self.get_provider_rest(vca=vca)
5445 try:
5446 vm_list_xmlroot = XmlElementTree.fromstring(response)
5447 for child in vm_list_xmlroot:
5448 if child.tag.split("}")[1] == "ProviderVdcReferences":
5449 for sub_child in child:
5450 provider_vdc_ref = sub_child.attrib.get("href")
5451 except Exception:
5452 self.logger.debug(
5453 "Failed parse respond for rest api call {}".format(
5454 vm_list_rest_call
5455 )
5456 )
5457 self.logger.debug("Respond body {}".format(response))
5458
5459 return None
5460
5461 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
5462 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
5463 <AllocationModel>ReservationPool</AllocationModel>
5464 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
5465 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
5466 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
5467 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
5468 <ProviderVdcReference
5469 name="Main Provider"
5470 href="{2:s}" />
5471 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
5472 escape(vdc_name), escape(vdc_name), provider_vdc_ref
5473 )
5474 headers[
5475 "Content-Type"
5476 ] = "application/vnd.vmware.admin.createVdcParams+xml"
5477 response = self.perform_request(
5478 req_type="POST",
5479 url=add_vdc_rest_url,
5480 headers=headers,
5481 data=data,
5482 )
5483
5484 # if we all ok we respond with content otherwise by default None
5485 if response.status_code == 201:
5486 return response.text
5487
5488 return None
5489
5490 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5491 """
5492 Method retrieve vapp detail from vCloud director
5493
5494 Args:
5495 vapp_uuid - is vapp identifier.
5496
5497 Returns:
5498 The return network uuid or return None
5499 """
5500 parsed_respond = {}
5501 vca = None
5502
5503 if need_admin_access:
5504 vca = self.connect_as_admin()
5505 else:
5506 vca = self.client
5507
5508 if not vca:
5509 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5510 if vapp_uuid is None:
5511 return None
5512
5513 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5514 get_vapp_restcall = "".join(url_list)
5515
5516 if vca._session:
5517 headers = {
5518 "Accept": "application/*+xml;version=" + API_VERSION,
5519 "x-vcloud-authorization": vca._session.headers[
5520 "x-vcloud-authorization"
5521 ],
5522 }
5523 response = self.perform_request(
5524 req_type="GET", url=get_vapp_restcall, headers=headers
5525 )
5526
5527 if response.status_code == 403:
5528 if need_admin_access is False:
5529 response = self.retry_rest("GET", get_vapp_restcall)
5530
5531 if response.status_code != requests.codes.ok:
5532 self.logger.debug(
5533 "REST API call {} failed. Return status code {}".format(
5534 get_vapp_restcall, response.status_code
5535 )
5536 )
5537
5538 return parsed_respond
5539
5540 try:
5541 xmlroot_respond = XmlElementTree.fromstring(response.text)
5542 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5543 "ovfDescriptorUploaded"
5544 ]
5545 namespaces = {
5546 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5547 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5548 "vmw": "http://www.vmware.com/schema/ovf",
5549 "vm": "http://www.vmware.com/vcloud/v1.5",
5550 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5551 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5552 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5553 }
5554
5555 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5556 if created_section is not None:
5557 parsed_respond["created"] = created_section.text
5558
5559 network_section = xmlroot_respond.find(
5560 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5561 )
5562 if (
5563 network_section is not None
5564 and "networkName" in network_section.attrib
5565 ):
5566 parsed_respond["networkname"] = network_section.attrib[
5567 "networkName"
5568 ]
5569
5570 ipscopes_section = xmlroot_respond.find(
5571 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5572 namespaces,
5573 )
5574 if ipscopes_section is not None:
5575 for ipscope in ipscopes_section:
5576 for scope in ipscope:
5577 tag_key = scope.tag.split("}")[1]
5578 if tag_key == "IpRanges":
5579 ip_ranges = scope.getchildren()
5580 for ipblock in ip_ranges:
5581 for block in ipblock:
5582 parsed_respond[
5583 block.tag.split("}")[1]
5584 ] = block.text
5585 else:
5586 parsed_respond[tag_key] = scope.text
5587
5588 # parse children section for other attrib
5589 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5590 if children_section is not None:
5591 parsed_respond["name"] = children_section.attrib["name"]
5592 parsed_respond["nestedHypervisorEnabled"] = (
5593 children_section.attrib["nestedHypervisorEnabled"]
5594 if "nestedHypervisorEnabled" in children_section.attrib
5595 else None
5596 )
5597 parsed_respond["deployed"] = children_section.attrib["deployed"]
5598 parsed_respond["status"] = children_section.attrib["status"]
5599 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5600 -1
5601 ]
5602 network_adapter = children_section.find(
5603 "vm:NetworkConnectionSection", namespaces
5604 )
5605 nic_list = []
5606 for adapters in network_adapter:
5607 adapter_key = adapters.tag.split("}")[1]
5608 if adapter_key == "PrimaryNetworkConnectionIndex":
5609 parsed_respond["primarynetwork"] = adapters.text
5610
5611 if adapter_key == "NetworkConnection":
5612 vnic = {}
5613 if "network" in adapters.attrib:
5614 vnic["network"] = adapters.attrib["network"]
5615 for adapter in adapters:
5616 setting_key = adapter.tag.split("}")[1]
5617 vnic[setting_key] = adapter.text
5618 nic_list.append(vnic)
5619
5620 for link in children_section:
5621 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5622 if link.attrib["rel"] == "screen:acquireTicket":
5623 parsed_respond["acquireTicket"] = link.attrib
5624
5625 if link.attrib["rel"] == "screen:acquireMksTicket":
5626 parsed_respond["acquireMksTicket"] = link.attrib
5627
5628 parsed_respond["interfaces"] = nic_list
5629 vCloud_extension_section = children_section.find(
5630 "xmlns:VCloudExtension", namespaces
5631 )
5632 if vCloud_extension_section is not None:
5633 vm_vcenter_info = {}
5634 vim_info = vCloud_extension_section.find(
5635 "vmext:VmVimInfo", namespaces
5636 )
5637 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5638
5639 if vmext is not None:
5640 vm_vcenter_info["vm_moref_id"] = vmext.find(
5641 "vmext:MoRef", namespaces
5642 ).text
5643
5644 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5645
5646 virtual_hardware_section = children_section.find(
5647 "ovf:VirtualHardwareSection", namespaces
5648 )
5649 vm_virtual_hardware_info = {}
5650 if virtual_hardware_section is not None:
5651 for item in virtual_hardware_section.iterfind(
5652 "ovf:Item", namespaces
5653 ):
5654 if (
5655 item.find("rasd:Description", namespaces).text
5656 == "Hard disk"
5657 ):
5658 disk_size = item.find(
5659 "rasd:HostResource", namespaces
5660 ).attrib["{" + namespaces["vm"] + "}capacity"]
5661 vm_virtual_hardware_info["disk_size"] = disk_size
5662 break
5663
5664 for link in virtual_hardware_section:
5665 if (
5666 link.tag.split("}")[1] == "Link"
5667 and "rel" in link.attrib
5668 ):
5669 if link.attrib["rel"] == "edit" and link.attrib[
5670 "href"
5671 ].endswith("/disks"):
5672 vm_virtual_hardware_info[
5673 "disk_edit_href"
5674 ] = link.attrib["href"]
5675 break
5676
5677 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5678 except Exception as exp:
5679 self.logger.info(
5680 "Error occurred calling rest api for getting vApp details {}".format(
5681 exp
5682 )
5683 )
5684
5685 return parsed_respond
5686
5687 def acquire_console(self, vm_uuid=None):
5688 if vm_uuid is None:
5689 return None
5690
5691 if self.client._session:
5692 headers = {
5693 "Accept": "application/*+xml;version=" + API_VERSION,
5694 "x-vcloud-authorization": self.client._session.headers[
5695 "x-vcloud-authorization"
5696 ],
5697 }
5698 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
5699 console_dict = vm_dict["acquireTicket"]
5700 console_rest_call = console_dict["href"]
5701
5702 response = self.perform_request(
5703 req_type="POST", url=console_rest_call, headers=headers
5704 )
5705
5706 if response.status_code == 403:
5707 response = self.retry_rest("POST", console_rest_call)
5708
5709 if response.status_code == requests.codes.ok:
5710 return response.text
5711
5712 return None
5713
5714 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5715 """
5716 Method retrieve vm disk details
5717
5718 Args:
5719 vapp_uuid - is vapp identifier.
5720 flavor_disk - disk size as specified in VNFD (flavor)
5721
5722 Returns:
5723 The return network uuid or return None
5724 """
5725 status = None
5726 try:
5727 # Flavor disk is in GB convert it into MB
5728 flavor_disk = int(flavor_disk) * 1024
5729 vm_details = self.get_vapp_details_rest(vapp_uuid)
5730
5731 if vm_details:
5732 vm_name = vm_details["name"]
5733 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5734
5735 if vm_details and "vm_virtual_hardware" in vm_details:
5736 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5737 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5738 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5739
5740 if flavor_disk > vm_disk:
5741 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5742 self.logger.info(
5743 "Modify disk of VM {} from {} to {} MB".format(
5744 vm_name, vm_disk, flavor_disk
5745 )
5746 )
5747 else:
5748 status = True
5749 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5750
5751 return status
5752 except Exception as exp:
5753 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5754
5755 def modify_vm_disk_rest(self, disk_href, disk_size):
5756 """
5757 Method retrieve modify vm disk size
5758
5759 Args:
5760 disk_href - vCD API URL to GET and PUT disk data
5761 disk_size - disk size as specified in VNFD (flavor)
5762
5763 Returns:
5764 The return network uuid or return None
5765 """
5766 if disk_href is None or disk_size is None:
5767 return None
5768
5769 if self.client._session:
5770 headers = {
5771 "Accept": "application/*+xml;version=" + API_VERSION,
5772 "x-vcloud-authorization": self.client._session.headers[
5773 "x-vcloud-authorization"
5774 ],
5775 }
5776 response = self.perform_request(
5777 req_type="GET", url=disk_href, headers=headers
5778 )
5779
5780 if response.status_code == 403:
5781 response = self.retry_rest("GET", disk_href)
5782
5783 if response.status_code != requests.codes.ok:
5784 self.logger.debug(
5785 "GET REST API call {} failed. Return status code {}".format(
5786 disk_href, response.status_code
5787 )
5788 )
5789
5790 return None
5791
5792 try:
5793 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5794 namespaces = {
5795 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5796 }
5797 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5798
5799 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5800 if item.find("rasd:Description", namespaces).text == "Hard disk":
5801 disk_item = item.find("rasd:HostResource", namespaces)
5802 if disk_item is not None:
5803 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5804 disk_size
5805 )
5806 break
5807
5808 data = lxmlElementTree.tostring(
5809 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5810 )
5811
5812 # Send PUT request to modify disk size
5813 headers[
5814 "Content-Type"
5815 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5816
5817 response = self.perform_request(
5818 req_type="PUT", url=disk_href, headers=headers, data=data
5819 )
5820 if response.status_code == 403:
5821 add_headers = {"Content-Type": headers["Content-Type"]}
5822 response = self.retry_rest("PUT", disk_href, add_headers, data)
5823
5824 if response.status_code != 202:
5825 self.logger.debug(
5826 "PUT REST API call {} failed. Return status code {}".format(
5827 disk_href, response.status_code
5828 )
5829 )
5830 else:
5831 modify_disk_task = self.get_task_from_response(response.text)
5832 result = self.client.get_task_monitor().wait_for_success(
5833 task=modify_disk_task
5834 )
5835 if result.get("status") == "success":
5836 return True
5837 else:
5838 return False
5839
5840 return None
5841 except Exception as exp:
5842 self.logger.info(
5843 "Error occurred calling rest api for modifing disk size {}".format(exp)
5844 )
5845
5846 return None
5847
5848 def add_serial_device(self, vapp_uuid):
5849 """
5850 Method to attach a serial device to a VM
5851
5852 Args:
5853 vapp_uuid - uuid of vApp/VM
5854
5855 Returns:
5856 """
5857 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5858 _, content = self.get_vcenter_content()
5859 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5860
5861 if vm_moref_id:
5862 try:
5863 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5864 self.logger.info(
5865 "VM {} is currently on host {}".format(vm_obj, host_obj)
5866 )
5867 if host_obj and vm_obj:
5868 spec = vim.vm.ConfigSpec()
5869 spec.deviceChange = []
5870 serial_spec = vim.vm.device.VirtualDeviceSpec()
5871 serial_spec.operation = "add"
5872 serial_port = vim.vm.device.VirtualSerialPort()
5873 serial_port.yieldOnPoll = True
5874 backing = serial_port.URIBackingInfo()
5875 backing.serviceURI = "tcp://:65500"
5876 backing.direction = "server"
5877 serial_port.backing = backing
5878 serial_spec.device = serial_port
5879 spec.deviceChange.append(serial_spec)
5880 vm_obj.ReconfigVM_Task(spec=spec)
5881 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5882 except vmodl.MethodFault as error:
5883 self.logger.error("Error occurred while adding PCI devices {} ", error)
5884
5885 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5886 """
5887 Method to attach pci devices to VM
5888
5889 Args:
5890 vapp_uuid - uuid of vApp/VM
5891 pci_devices - pci devices infromation as specified in VNFD (flavor)
5892
5893 Returns:
5894 The status of add pci device task , vm object and
5895 vcenter_conect object
5896 """
5897 vm_obj = None
5898 self.logger.info(
5899 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5900 )
5901 vcenter_conect, content = self.get_vcenter_content()
5902 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5903
5904 if vm_moref_id:
5905 try:
5906 no_of_pci_devices = len(pci_devices)
5907 if no_of_pci_devices > 0:
5908 # Get VM and its host
5909 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5910 self.logger.info(
5911 "VM {} is currently on host {}".format(vm_obj, host_obj)
5912 )
5913
5914 if host_obj and vm_obj:
5915 # get PCI devies from host on which vapp is currently installed
5916 avilable_pci_devices = self.get_pci_devices(
5917 host_obj, no_of_pci_devices
5918 )
5919
5920 if avilable_pci_devices is None:
5921 # find other hosts with active pci devices
5922 (
5923 new_host_obj,
5924 avilable_pci_devices,
5925 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5926
5927 if (
5928 new_host_obj is not None
5929 and avilable_pci_devices is not None
5930 and len(avilable_pci_devices) > 0
5931 ):
5932 # Migrate vm to the host where PCI devices are availble
5933 self.logger.info(
5934 "Relocate VM {} on new host {}".format(
5935 vm_obj, new_host_obj
5936 )
5937 )
5938
5939 task = self.relocate_vm(new_host_obj, vm_obj)
5940 if task is not None:
5941 result = self.wait_for_vcenter_task(
5942 task, vcenter_conect
5943 )
5944 self.logger.info(
5945 "Migrate VM status: {}".format(result)
5946 )
5947 host_obj = new_host_obj
5948 else:
5949 self.logger.info(
5950 "Fail to migrate VM : {}".format(result)
5951 )
5952 raise vimconn.VimConnNotFoundException(
5953 "Fail to migrate VM : {} to host {}".format(
5954 vmname_andid, new_host_obj
5955 )
5956 )
5957
5958 if (
5959 host_obj is not None
5960 and avilable_pci_devices is not None
5961 and len(avilable_pci_devices) > 0
5962 ):
5963 # Add PCI devices one by one
5964 for pci_device in avilable_pci_devices:
5965 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5966 if task:
5967 status = self.wait_for_vcenter_task(
5968 task, vcenter_conect
5969 )
5970
5971 if status:
5972 self.logger.info(
5973 "Added PCI device {} to VM {}".format(
5974 pci_device, str(vm_obj)
5975 )
5976 )
5977 else:
5978 self.logger.error(
5979 "Fail to add PCI device {} to VM {}".format(
5980 pci_device, str(vm_obj)
5981 )
5982 )
5983
5984 return True, vm_obj, vcenter_conect
5985 else:
5986 self.logger.error(
5987 "Currently there is no host with"
5988 " {} number of avaialble PCI devices required for VM {}".format(
5989 no_of_pci_devices, vmname_andid
5990 )
5991 )
5992
5993 raise vimconn.VimConnNotFoundException(
5994 "Currently there is no host with {} "
5995 "number of avaialble PCI devices required for VM {}".format(
5996 no_of_pci_devices, vmname_andid
5997 )
5998 )
5999 else:
6000 self.logger.debug(
6001 "No infromation about PCI devices {} ", pci_devices
6002 )
6003 except vmodl.MethodFault as error:
6004 self.logger.error("Error occurred while adding PCI devices {} ", error)
6005
6006 return None, vm_obj, vcenter_conect
6007
6008 def get_vm_obj(self, content, mob_id):
6009 """
6010 Method to get the vsphere VM object associated with a given morf ID
6011 Args:
6012 vapp_uuid - uuid of vApp/VM
6013 content - vCenter content object
6014 mob_id - mob_id of VM
6015
6016 Returns:
6017 VM and host object
6018 """
6019 vm_obj = None
6020 host_obj = None
6021
6022 try:
6023 container = content.viewManager.CreateContainerView(
6024 content.rootFolder, [vim.VirtualMachine], True
6025 )
6026 for vm in container.view:
6027 mobID = vm._GetMoId()
6028
6029 if mobID == mob_id:
6030 vm_obj = vm
6031 host_obj = vm_obj.runtime.host
6032 break
6033 except Exception as exp:
6034 self.logger.error("Error occurred while finding VM object : {}".format(exp))
6035
6036 return host_obj, vm_obj
6037
6038 def get_pci_devices(self, host, need_devices):
6039 """
6040 Method to get the details of pci devices on given host
6041 Args:
6042 host - vSphere host object
6043 need_devices - number of pci devices needed on host
6044
6045 Returns:
6046 array of pci devices
6047 """
6048 all_devices = []
6049 all_device_ids = []
6050 used_devices_ids = []
6051
6052 try:
6053 if host:
6054 pciPassthruInfo = host.config.pciPassthruInfo
6055 pciDevies = host.hardware.pciDevice
6056
6057 for pci_status in pciPassthruInfo:
6058 if pci_status.passthruActive:
6059 for device in pciDevies:
6060 if device.id == pci_status.id:
6061 all_device_ids.append(device.id)
6062 all_devices.append(device)
6063
6064 # check if devices are in use
6065 avalible_devices = all_devices
6066 for vm in host.vm:
6067 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
6068 vm_devices = vm.config.hardware.device
6069 for device in vm_devices:
6070 if type(device) is vim.vm.device.VirtualPCIPassthrough:
6071 if device.backing.id in all_device_ids:
6072 for use_device in avalible_devices:
6073 if use_device.id == device.backing.id:
6074 avalible_devices.remove(use_device)
6075
6076 used_devices_ids.append(device.backing.id)
6077 self.logger.debug(
6078 "Device {} from devices {}"
6079 "is in use".format(device.backing.id, device)
6080 )
6081 if len(avalible_devices) < need_devices:
6082 self.logger.debug(
6083 "Host {} don't have {} number of active devices".format(
6084 host, need_devices
6085 )
6086 )
6087 self.logger.debug(
6088 "found only {} devices {}".format(
6089 len(avalible_devices), avalible_devices
6090 )
6091 )
6092
6093 return None
6094 else:
6095 required_devices = avalible_devices[:need_devices]
6096 self.logger.info(
6097 "Found {} PCI devices on host {} but required only {}".format(
6098 len(avalible_devices), host, need_devices
6099 )
6100 )
6101 self.logger.info(
6102 "Retruning {} devices as {}".format(need_devices, required_devices)
6103 )
6104
6105 return required_devices
6106 except Exception as exp:
6107 self.logger.error(
6108 "Error {} occurred while finding pci devices on host: {}".format(
6109 exp, host
6110 )
6111 )
6112
6113 return None
6114
6115 def get_host_and_PCIdevices(self, content, need_devices):
6116 """
6117 Method to get the details of pci devices infromation on all hosts
6118
6119 Args:
6120 content - vSphere host object
6121 need_devices - number of pci devices needed on host
6122
6123 Returns:
6124 array of pci devices and host object
6125 """
6126 host_obj = None
6127 pci_device_objs = None
6128
6129 try:
6130 if content:
6131 container = content.viewManager.CreateContainerView(
6132 content.rootFolder, [vim.HostSystem], True
6133 )
6134 for host in container.view:
6135 devices = self.get_pci_devices(host, need_devices)
6136
6137 if devices:
6138 host_obj = host
6139 pci_device_objs = devices
6140 break
6141 except Exception as exp:
6142 self.logger.error(
6143 "Error {} occurred while finding pci devices on host: {}".format(
6144 exp, host_obj
6145 )
6146 )
6147
6148 return host_obj, pci_device_objs
6149
6150 def relocate_vm(self, dest_host, vm):
6151 """
6152 Method to get the relocate VM to new host
6153
6154 Args:
6155 dest_host - vSphere host object
6156 vm - vSphere VM object
6157
6158 Returns:
6159 task object
6160 """
6161 task = None
6162
6163 try:
6164 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
6165 task = vm.Relocate(relocate_spec)
6166 self.logger.info(
6167 "Migrating {} to destination host {}".format(vm, dest_host)
6168 )
6169 except Exception as exp:
6170 self.logger.error(
6171 "Error occurred while relocate VM {} to new host {}: {}".format(
6172 dest_host, vm, exp
6173 )
6174 )
6175
6176 return task
6177
6178 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
6179 """
6180 Waits and provides updates on a vSphere task
6181 """
6182 while task.info.state == vim.TaskInfo.State.running:
6183 time.sleep(2)
6184
6185 if task.info.state == vim.TaskInfo.State.success:
6186 if task.info.result is not None and not hideResult:
6187 self.logger.info(
6188 "{} completed successfully, result: {}".format(
6189 actionName, task.info.result
6190 )
6191 )
6192 else:
6193 self.logger.info("Task {} completed successfully.".format(actionName))
6194 else:
6195 self.logger.error(
6196 "{} did not complete successfully: {} ".format(
6197 actionName, task.info.error
6198 )
6199 )
6200
6201 return task.info.result
6202
6203 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
6204 """
6205 Method to add pci device in given VM
6206
6207 Args:
6208 host_object - vSphere host object
6209 vm_object - vSphere VM object
6210 host_pci_dev - host_pci_dev must be one of the devices from the
6211 host_object.hardware.pciDevice list
6212 which is configured as a PCI passthrough device
6213
6214 Returns:
6215 task object
6216 """
6217 task = None
6218
6219 if vm_object and host_object and host_pci_dev:
6220 try:
6221 # Add PCI device to VM
6222 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
6223 host=None
6224 ).pciPassthrough
6225 systemid_by_pciid = {
6226 item.pciDevice.id: item.systemId for item in pci_passthroughs
6227 }
6228
6229 if host_pci_dev.id not in systemid_by_pciid:
6230 self.logger.error(
6231 "Device {} is not a passthrough device ".format(host_pci_dev)
6232 )
6233 return None
6234
6235 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
6236 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
6237 deviceId=deviceId,
6238 id=host_pci_dev.id,
6239 systemId=systemid_by_pciid[host_pci_dev.id],
6240 vendorId=host_pci_dev.vendorId,
6241 deviceName=host_pci_dev.deviceName,
6242 )
6243
6244 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
6245 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
6246 new_device_config.operation = "add"
6247 vmConfigSpec = vim.vm.ConfigSpec()
6248 vmConfigSpec.deviceChange = [new_device_config]
6249 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
6250 self.logger.info(
6251 "Adding PCI device {} into VM {} from host {} ".format(
6252 host_pci_dev, vm_object, host_object
6253 )
6254 )
6255 except Exception as exp:
6256 self.logger.error(
6257 "Error occurred while adding pci devive {} to VM {}: {}".format(
6258 host_pci_dev, vm_object, exp
6259 )
6260 )
6261
6262 return task
6263
6264 def get_vm_vcenter_info(self):
6265 """
6266 Method to get details of vCenter and vm
6267
6268 Args:
6269 vapp_uuid - uuid of vApp or VM
6270
6271 Returns:
6272 Moref Id of VM and deails of vCenter
6273 """
6274 vm_vcenter_info = {}
6275
6276 if self.vcenter_ip is not None:
6277 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
6278 else:
6279 raise vimconn.VimConnException(
6280 message="vCenter IP is not provided."
6281 " Please provide vCenter IP while attaching datacenter "
6282 "to tenant in --config"
6283 )
6284
6285 if self.vcenter_port is not None:
6286 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
6287 else:
6288 raise vimconn.VimConnException(
6289 message="vCenter port is not provided."
6290 " Please provide vCenter port while attaching datacenter "
6291 "to tenant in --config"
6292 )
6293
6294 if self.vcenter_user is not None:
6295 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
6296 else:
6297 raise vimconn.VimConnException(
6298 message="vCenter user is not provided."
6299 " Please provide vCenter user while attaching datacenter "
6300 "to tenant in --config"
6301 )
6302
6303 if self.vcenter_password is not None:
6304 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
6305 else:
6306 raise vimconn.VimConnException(
6307 message="vCenter user password is not provided."
6308 " Please provide vCenter user password while attaching datacenter "
6309 "to tenant in --config"
6310 )
6311
6312 return vm_vcenter_info
6313
6314 def get_vm_pci_details(self, vmuuid):
6315 """
6316 Method to get VM PCI device details from vCenter
6317
6318 Args:
6319 vm_obj - vSphere VM object
6320
6321 Returns:
6322 dict of PCI devives attached to VM
6323
6324 """
6325 vm_pci_devices_info = {}
6326
6327 try:
6328 _, content = self.get_vcenter_content()
6329 vm_moref_id = self.get_vm_moref_id(vmuuid)
6330 if vm_moref_id:
6331 # Get VM and its host
6332 if content:
6333 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6334 if host_obj and vm_obj:
6335 vm_pci_devices_info["host_name"] = host_obj.name
6336 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
6337 0
6338 ].spec.ip.ipAddress
6339
6340 for device in vm_obj.config.hardware.device:
6341 if type(device) == vim.vm.device.VirtualPCIPassthrough:
6342 device_details = {
6343 "devide_id": device.backing.id,
6344 "pciSlotNumber": device.slotInfo.pciSlotNumber,
6345 }
6346 vm_pci_devices_info[
6347 device.deviceInfo.label
6348 ] = device_details
6349 else:
6350 self.logger.error(
6351 "Can not connect to vCenter while getting "
6352 "PCI devices infromationn"
6353 )
6354
6355 return vm_pci_devices_info
6356 except Exception as exp:
6357 self.logger.error(
6358 "Error occurred while getting VM information" " for VM : {}".format(exp)
6359 )
6360
6361 raise vimconn.VimConnException(message=exp)
6362
6363 def reserve_memory_for_all_vms(self, vapp, memory_mb):
6364 """
6365 Method to reserve memory for all VMs
6366 Args :
6367 vapp - VApp
6368 memory_mb - Memory in MB
6369 Returns:
6370 None
6371 """
6372 self.logger.info("Reserve memory for all VMs")
6373
6374 for vms in vapp.get_all_vms():
6375 vm_id = vms.get("id").split(":")[-1]
6376 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
6377 self.url, vm_id
6378 )
6379 headers = {
6380 "Accept": "application/*+xml;version=" + API_VERSION,
6381 "x-vcloud-authorization": self.client._session.headers[
6382 "x-vcloud-authorization"
6383 ],
6384 }
6385 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
6386 response = self.perform_request(
6387 req_type="GET", url=url_rest_call, headers=headers
6388 )
6389
6390 if response.status_code == 403:
6391 response = self.retry_rest("GET", url_rest_call)
6392
6393 if response.status_code != 200:
6394 self.logger.error(
6395 "REST call {} failed reason : {}"
6396 "status code : {}".format(
6397 url_rest_call, response.text, response.status_code
6398 )
6399 )
6400 raise vimconn.VimConnException(
6401 "reserve_memory_for_all_vms : Failed to get " "memory"
6402 )
6403
6404 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
6405 contentelem = lxmlElementTree.XML(bytexml)
6406 namespaces = {
6407 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
6408 }
6409 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6410
6411 # Find the reservation element in the response
6412 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
6413 for memelem in memelem_list:
6414 memelem.text = str(memory_mb)
6415
6416 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
6417
6418 response = self.perform_request(
6419 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6420 )
6421
6422 if response.status_code == 403:
6423 add_headers = {"Content-Type": headers["Content-Type"]}
6424 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6425
6426 if response.status_code != 202:
6427 self.logger.error(
6428 "REST call {} failed reason : {}"
6429 "status code : {} ".format(
6430 url_rest_call, response.text, response.status_code
6431 )
6432 )
6433 raise vimconn.VimConnException(
6434 "reserve_memory_for_all_vms : Failed to update "
6435 "virtual hardware memory section"
6436 )
6437 else:
6438 mem_task = self.get_task_from_response(response.text)
6439 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
6440
6441 if result.get("status") == "success":
6442 self.logger.info(
6443 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
6444 )
6445 else:
6446 self.logger.error(
6447 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
6448 )
6449
6450 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
6451 """
6452 Configure VApp network config with org vdc network
6453 Args :
6454 vapp - VApp
6455 Returns:
6456 None
6457 """
6458
6459 self.logger.info(
6460 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
6461 )
6462
6463 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
6464 self.url, vapp_id
6465 )
6466
6467 headers = {
6468 "Accept": "application/*+xml;version=" + API_VERSION,
6469 "x-vcloud-authorization": self.client._session.headers[
6470 "x-vcloud-authorization"
6471 ],
6472 }
6473 response = self.perform_request(
6474 req_type="GET", url=url_rest_call, headers=headers
6475 )
6476
6477 if response.status_code == 403:
6478 response = self.retry_rest("GET", url_rest_call)
6479
6480 if response.status_code != 200:
6481 self.logger.error(
6482 "REST call {} failed reason : {}"
6483 "status code : {}".format(
6484 url_rest_call, response.text, response.status_code
6485 )
6486 )
6487 raise vimconn.VimConnException(
6488 "connect_vapp_to_org_vdc_network : Failed to get "
6489 "network config section"
6490 )
6491
6492 data = response.text
6493 headers[
6494 "Content-Type"
6495 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
6496 net_id = self.get_network_id_by_name(net_name)
6497 if not net_id:
6498 raise vimconn.VimConnException(
6499 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
6500 )
6501
6502 bytexml = bytes(bytearray(data, encoding="utf-8"))
6503 newelem = lxmlElementTree.XML(bytexml)
6504 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6505 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6506 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6507
6508 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6509 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6510 if parentnetworklist:
6511 for pn in parentnetworklist:
6512 if "href" not in pn.keys():
6513 id_val = pn.get("id")
6514 href_val = "{}/api/network/{}".format(self.url, id_val)
6515 pn.set("href", href_val)
6516
6517 newstr = """<NetworkConfig networkName="{}">
6518 <Configuration>
6519 <ParentNetwork href="{}/api/network/{}"/>
6520 <FenceMode>bridged</FenceMode>
6521 </Configuration>
6522 </NetworkConfig>
6523 """.format(
6524 net_name, self.url, net_id
6525 )
6526 newcfgelem = lxmlElementTree.fromstring(newstr)
6527 if nwcfglist:
6528 nwcfglist[0].addnext(newcfgelem)
6529
6530 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6531
6532 response = self.perform_request(
6533 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6534 )
6535
6536 if response.status_code == 403:
6537 add_headers = {"Content-Type": headers["Content-Type"]}
6538 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6539
6540 if response.status_code != 202:
6541 self.logger.error(
6542 "REST call {} failed reason : {}"
6543 "status code : {} ".format(
6544 url_rest_call, response.text, response.status_code
6545 )
6546 )
6547 raise vimconn.VimConnException(
6548 "connect_vapp_to_org_vdc_network : Failed to update "
6549 "network config section"
6550 )
6551 else:
6552 vapp_task = self.get_task_from_response(response.text)
6553 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6554 if result.get("status") == "success":
6555 self.logger.info(
6556 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6557 "network {}".format(vapp_id, net_name)
6558 )
6559 else:
6560 self.logger.error(
6561 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6562 "connect to network {}".format(vapp_id, net_name)
6563 )
6564
6565 def remove_primary_network_adapter_from_all_vms(self, vapp):
6566 """
6567 Method to remove network adapter type to vm
6568 Args :
6569 vapp - VApp
6570 Returns:
6571 None
6572 """
6573 self.logger.info("Removing network adapter from all VMs")
6574
6575 for vms in vapp.get_all_vms():
6576 vm_id = vms.get("id").split(":")[-1]
6577
6578 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6579 self.url, vm_id
6580 )
6581
6582 headers = {
6583 "Accept": "application/*+xml;version=" + API_VERSION,
6584 "x-vcloud-authorization": self.client._session.headers[
6585 "x-vcloud-authorization"
6586 ],
6587 }
6588 response = self.perform_request(
6589 req_type="GET", url=url_rest_call, headers=headers
6590 )
6591
6592 if response.status_code == 403:
6593 response = self.retry_rest("GET", url_rest_call)
6594
6595 if response.status_code != 200:
6596 self.logger.error(
6597 "REST call {} failed reason : {}"
6598 "status code : {}".format(
6599 url_rest_call, response.text, response.status_code
6600 )
6601 )
6602 raise vimconn.VimConnException(
6603 "remove_primary_network_adapter : Failed to get "
6604 "network connection section"
6605 )
6606
6607 data = response.text
6608 data = data.split('<Link rel="edit"')[0]
6609
6610 headers[
6611 "Content-Type"
6612 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6613
6614 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6615 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6616 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6617 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6618 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6619 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6620 xmlns:vmw="http://www.vmware.com/schema/ovf"
6621 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6622 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6623 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6624 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6625 ovf:required="false">
6626 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6627 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6628 <Link rel="edit" href="{url}"
6629 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6630 </NetworkConnectionSection>""".format(
6631 url=url_rest_call
6632 )
6633 response = self.perform_request(
6634 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6635 )
6636
6637 if response.status_code == 403:
6638 add_headers = {"Content-Type": headers["Content-Type"]}
6639 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6640
6641 if response.status_code != 202:
6642 self.logger.error(
6643 "REST call {} failed reason : {}"
6644 "status code : {} ".format(
6645 url_rest_call, response.text, response.status_code
6646 )
6647 )
6648 raise vimconn.VimConnException(
6649 "remove_primary_network_adapter : Failed to update "
6650 "network connection section"
6651 )
6652 else:
6653 nic_task = self.get_task_from_response(response.text)
6654 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6655 if result.get("status") == "success":
6656 self.logger.info(
6657 "remove_primary_network_adapter(): VM {} conneced to "
6658 "default NIC type".format(vm_id)
6659 )
6660 else:
6661 self.logger.error(
6662 "remove_primary_network_adapter(): VM {} failed to "
6663 "connect NIC type".format(vm_id)
6664 )
6665
6666 def add_network_adapter_to_vms(
6667 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6668 ):
6669 """
6670 Method to add network adapter type to vm
6671 Args :
6672 network_name - name of network
6673 primary_nic_index - int value for primary nic index
6674 nicIndex - int value for nic index
6675 nic_type - specify model name to which add to vm
6676 Returns:
6677 None
6678 """
6679
6680 self.logger.info(
6681 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6682 network_name, nicIndex, nic_type
6683 )
6684 )
6685 try:
6686 ip_address = None
6687 floating_ip = False
6688 mac_address = None
6689 if "floating_ip" in net:
6690 floating_ip = net["floating_ip"]
6691
6692 # Stub for ip_address feature
6693 if "ip_address" in net:
6694 ip_address = net["ip_address"]
6695
6696 if "mac_address" in net:
6697 mac_address = net["mac_address"]
6698
6699 if floating_ip:
6700 allocation_mode = "POOL"
6701 elif ip_address:
6702 allocation_mode = "MANUAL"
6703 else:
6704 allocation_mode = "DHCP"
6705
6706 if not nic_type:
6707 for vms in vapp.get_all_vms():
6708 vm_id = vms.get("id").split(":")[-1]
6709
6710 url_rest_call = (
6711 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6712 self.url, vm_id
6713 )
6714 )
6715
6716 headers = {
6717 "Accept": "application/*+xml;version=" + API_VERSION,
6718 "x-vcloud-authorization": self.client._session.headers[
6719 "x-vcloud-authorization"
6720 ],
6721 }
6722 response = self.perform_request(
6723 req_type="GET", url=url_rest_call, headers=headers
6724 )
6725
6726 if response.status_code == 403:
6727 response = self.retry_rest("GET", url_rest_call)
6728
6729 if response.status_code != 200:
6730 self.logger.error(
6731 "REST call {} failed reason : {}"
6732 "status code : {}".format(
6733 url_rest_call, response.text, response.status_code
6734 )
6735 )
6736 raise vimconn.VimConnException(
6737 "add_network_adapter_to_vms : Failed to get "
6738 "network connection section"
6739 )
6740
6741 data = response.text
6742 data = data.split('<Link rel="edit"')[0]
6743 if "<PrimaryNetworkConnectionIndex>" not in data:
6744 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6745 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6746 <NetworkConnection network="{}">
6747 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6748 <IsConnected>true</IsConnected>
6749 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6750 </NetworkConnection>""".format(
6751 primary_nic_index, network_name, nicIndex, allocation_mode
6752 )
6753
6754 # Stub for ip_address feature
6755 if ip_address:
6756 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6757 item = item.replace(
6758 "</NetworkConnectionIndex>\n",
6759 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6760 )
6761
6762 if mac_address:
6763 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6764 item = item.replace(
6765 "</IsConnected>\n",
6766 "</IsConnected>\n{}\n".format(mac_tag),
6767 )
6768
6769 data = data.replace(
6770 "</ovf:Info>\n",
6771 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6772 )
6773 else:
6774 self.logger.debug("add_network_adapter PrimaryNIC in data")
6775 new_item = """<NetworkConnection network="{}">
6776 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6777 <IsConnected>true</IsConnected>
6778 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6779 </NetworkConnection>""".format(
6780 network_name, nicIndex, allocation_mode
6781 )
6782
6783 # Stub for ip_address feature
6784 if ip_address:
6785 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6786 new_item = new_item.replace(
6787 "</NetworkConnectionIndex>\n",
6788 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6789 )
6790
6791 if mac_address:
6792 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6793 new_item = new_item.replace(
6794 "</IsConnected>\n",
6795 "</IsConnected>\n{}\n".format(mac_tag),
6796 )
6797
6798 data = data + new_item + "</NetworkConnectionSection>"
6799
6800 headers[
6801 "Content-Type"
6802 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6803
6804 response = self.perform_request(
6805 req_type="PUT", url=url_rest_call, headers=headers, data=data
6806 )
6807
6808 if response.status_code == 403:
6809 add_headers = {"Content-Type": headers["Content-Type"]}
6810 response = self.retry_rest(
6811 "PUT", url_rest_call, add_headers, data
6812 )
6813
6814 if response.status_code != 202:
6815 self.logger.error(
6816 "REST call {} failed reason : {}"
6817 "status code : {} ".format(
6818 url_rest_call, response.text, response.status_code
6819 )
6820 )
6821 raise vimconn.VimConnException(
6822 "add_network_adapter_to_vms : Failed to update "
6823 "network connection section"
6824 )
6825 else:
6826 nic_task = self.get_task_from_response(response.text)
6827 result = self.client.get_task_monitor().wait_for_success(
6828 task=nic_task
6829 )
6830
6831 if result.get("status") == "success":
6832 self.logger.info(
6833 "add_network_adapter_to_vms(): VM {} conneced to "
6834 "default NIC type".format(vm_id)
6835 )
6836 else:
6837 self.logger.error(
6838 "add_network_adapter_to_vms(): VM {} failed to "
6839 "connect NIC type".format(vm_id)
6840 )
6841 else:
6842 for vms in vapp.get_all_vms():
6843 vm_id = vms.get("id").split(":")[-1]
6844
6845 url_rest_call = (
6846 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6847 self.url, vm_id
6848 )
6849 )
6850
6851 headers = {
6852 "Accept": "application/*+xml;version=" + API_VERSION,
6853 "x-vcloud-authorization": self.client._session.headers[
6854 "x-vcloud-authorization"
6855 ],
6856 }
6857 response = self.perform_request(
6858 req_type="GET", url=url_rest_call, headers=headers
6859 )
6860
6861 if response.status_code == 403:
6862 response = self.retry_rest("GET", url_rest_call)
6863
6864 if response.status_code != 200:
6865 self.logger.error(
6866 "REST call {} failed reason : {}"
6867 "status code : {}".format(
6868 url_rest_call, response.text, response.status_code
6869 )
6870 )
6871 raise vimconn.VimConnException(
6872 "add_network_adapter_to_vms : Failed to get "
6873 "network connection section"
6874 )
6875 data = response.text
6876 data = data.split('<Link rel="edit"')[0]
6877 vcd_netadapter_type = nic_type
6878
6879 if nic_type in ["SR-IOV", "VF"]:
6880 vcd_netadapter_type = "SRIOVETHERNETCARD"
6881
6882 if "<PrimaryNetworkConnectionIndex>" not in data:
6883 self.logger.debug(
6884 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6885 nic_type
6886 )
6887 )
6888 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6889 <NetworkConnection network="{}">
6890 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6891 <IsConnected>true</IsConnected>
6892 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6893 <NetworkAdapterType>{}</NetworkAdapterType>
6894 </NetworkConnection>""".format(
6895 primary_nic_index,
6896 network_name,
6897 nicIndex,
6898 allocation_mode,
6899 vcd_netadapter_type,
6900 )
6901
6902 # Stub for ip_address feature
6903 if ip_address:
6904 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6905 item = item.replace(
6906 "</NetworkConnectionIndex>\n",
6907 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6908 )
6909
6910 if mac_address:
6911 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6912 item = item.replace(
6913 "</IsConnected>\n",
6914 "</IsConnected>\n{}\n".format(mac_tag),
6915 )
6916
6917 data = data.replace(
6918 "</ovf:Info>\n",
6919 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6920 )
6921 else:
6922 self.logger.debug(
6923 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6924 nic_type
6925 )
6926 )
6927 new_item = """<NetworkConnection network="{}">
6928 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6929 <IsConnected>true</IsConnected>
6930 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6931 <NetworkAdapterType>{}</NetworkAdapterType>
6932 </NetworkConnection>""".format(
6933 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6934 )
6935
6936 # Stub for ip_address feature
6937 if ip_address:
6938 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6939 new_item = new_item.replace(
6940 "</NetworkConnectionIndex>\n",
6941 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6942 )
6943
6944 if mac_address:
6945 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6946 new_item = new_item.replace(
6947 "</IsConnected>\n",
6948 "</IsConnected>\n{}\n".format(mac_tag),
6949 )
6950
6951 data = data + new_item + "</NetworkConnectionSection>"
6952
6953 headers[
6954 "Content-Type"
6955 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6956
6957 response = self.perform_request(
6958 req_type="PUT", url=url_rest_call, headers=headers, data=data
6959 )
6960
6961 if response.status_code == 403:
6962 add_headers = {"Content-Type": headers["Content-Type"]}
6963 response = self.retry_rest(
6964 "PUT", url_rest_call, add_headers, data
6965 )
6966
6967 if response.status_code != 202:
6968 self.logger.error(
6969 "REST call {} failed reason : {}"
6970 "status code : {}".format(
6971 url_rest_call, response.text, response.status_code
6972 )
6973 )
6974 raise vimconn.VimConnException(
6975 "add_network_adapter_to_vms : Failed to update "
6976 "network connection section"
6977 )
6978 else:
6979 nic_task = self.get_task_from_response(response.text)
6980 result = self.client.get_task_monitor().wait_for_success(
6981 task=nic_task
6982 )
6983
6984 if result.get("status") == "success":
6985 self.logger.info(
6986 "add_network_adapter_to_vms(): VM {} "
6987 "conneced to NIC type {}".format(vm_id, nic_type)
6988 )
6989 else:
6990 self.logger.error(
6991 "add_network_adapter_to_vms(): VM {} "
6992 "failed to connect NIC type {}".format(vm_id, nic_type)
6993 )
6994 except Exception as exp:
6995 self.logger.error(
6996 "add_network_adapter_to_vms() : exception occurred "
6997 "while adding Network adapter"
6998 )
6999
7000 raise vimconn.VimConnException(message=exp)
7001
7002 def set_numa_affinity(self, vmuuid, paired_threads_id):
7003 """
7004 Method to assign numa affinity in vm configuration parammeters
7005 Args :
7006 vmuuid - vm uuid
7007 paired_threads_id - one or more virtual processor
7008 numbers
7009 Returns:
7010 return if True
7011 """
7012 try:
7013 vcenter_conect, content = self.get_vcenter_content()
7014 vm_moref_id = self.get_vm_moref_id(vmuuid)
7015 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7016
7017 if vm_obj:
7018 config_spec = vim.vm.ConfigSpec()
7019 config_spec.extraConfig = []
7020 opt = vim.option.OptionValue()
7021 opt.key = "numa.nodeAffinity"
7022 opt.value = str(paired_threads_id)
7023 config_spec.extraConfig.append(opt)
7024 task = vm_obj.ReconfigVM_Task(config_spec)
7025
7026 if task:
7027 self.wait_for_vcenter_task(task, vcenter_conect)
7028 extra_config = vm_obj.config.extraConfig
7029 flag = False
7030
7031 for opts in extra_config:
7032 if "numa.nodeAffinity" in opts.key:
7033 flag = True
7034 self.logger.info(
7035 "set_numa_affinity: Sucessfully assign numa affinity "
7036 "value {} for vm {}".format(opt.value, vm_obj)
7037 )
7038
7039 if flag:
7040 return
7041 else:
7042 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
7043 except Exception as exp:
7044 self.logger.error(
7045 "set_numa_affinity : exception occurred while setting numa affinity "
7046 "for VM {} : {}".format(vm_obj, vm_moref_id)
7047 )
7048
7049 raise vimconn.VimConnException(
7050 "set_numa_affinity : Error {} failed to assign numa "
7051 "affinity".format(exp)
7052 )
7053
7054 def cloud_init(self, vapp, cloud_config):
7055 """
7056 Method to inject ssh-key
7057 vapp - vapp object
7058 cloud_config a dictionary with:
7059 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
7060 'users': (optional) list of users to be inserted, each item is a dict with:
7061 'name': (mandatory) user name,
7062 'key-pairs': (optional) list of strings with the public key to be inserted to the user
7063 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
7064 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
7065 'config-files': (optional). List of files to be transferred. Each item is a dict with:
7066 'dest': (mandatory) string with the destination absolute path
7067 'encoding': (optional, by default text). Can be one of:
7068 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
7069 'content' (mandatory): string with the content of the file
7070 'permissions': (optional) string with file permissions, typically octal notation '0644'
7071 'owner': (optional) file owner, string with the format 'owner:group'
7072 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
7073 """
7074 try:
7075 if not isinstance(cloud_config, dict):
7076 raise Exception(
7077 "cloud_init : parameter cloud_config is not a dictionary"
7078 )
7079 else:
7080 key_pairs = []
7081 userdata = []
7082
7083 if "key-pairs" in cloud_config:
7084 key_pairs = cloud_config["key-pairs"]
7085
7086 if "users" in cloud_config:
7087 userdata = cloud_config["users"]
7088
7089 self.logger.debug("cloud_init : Guest os customization started..")
7090 customize_script = self.format_script(
7091 key_pairs=key_pairs, users_list=userdata
7092 )
7093 customize_script = customize_script.replace("&", "&amp;")
7094 self.guest_customization(vapp, customize_script)
7095 except Exception as exp:
7096 self.logger.error(
7097 "cloud_init : exception occurred while injecting " "ssh-key"
7098 )
7099
7100 raise vimconn.VimConnException(
7101 "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
7102 )
7103
7104 def format_script(self, key_pairs=[], users_list=[]):
7105 bash_script = """#!/bin/sh
7106 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
7107 if [ "$1" = "precustomization" ];then
7108 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
7109 """
7110
7111 keys = "\n".join(key_pairs)
7112 if keys:
7113 keys_data = """
7114 if [ ! -d /root/.ssh ];then
7115 mkdir /root/.ssh
7116 chown root:root /root/.ssh
7117 chmod 700 /root/.ssh
7118 touch /root/.ssh/authorized_keys
7119 chown root:root /root/.ssh/authorized_keys
7120 chmod 600 /root/.ssh/authorized_keys
7121 # make centos with selinux happy
7122 which restorecon && restorecon -Rv /root/.ssh
7123 else
7124 touch /root/.ssh/authorized_keys
7125 chown root:root /root/.ssh/authorized_keys
7126 chmod 600 /root/.ssh/authorized_keys
7127 fi
7128 echo '{key}' >> /root/.ssh/authorized_keys
7129 """.format(
7130 key=keys
7131 )
7132
7133 bash_script += keys_data
7134
7135 for user in users_list:
7136 if "name" in user:
7137 user_name = user["name"]
7138
7139 if "key-pairs" in user:
7140 user_keys = "\n".join(user["key-pairs"])
7141 else:
7142 user_keys = None
7143
7144 add_user_name = """
7145 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
7146 """.format(
7147 user_name=user_name
7148 )
7149
7150 bash_script += add_user_name
7151
7152 if user_keys:
7153 user_keys_data = """
7154 mkdir /home/{user_name}/.ssh
7155 chown {user_name}:{user_name} /home/{user_name}/.ssh
7156 chmod 700 /home/{user_name}/.ssh
7157 touch /home/{user_name}/.ssh/authorized_keys
7158 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
7159 chmod 600 /home/{user_name}/.ssh/authorized_keys
7160 # make centos with selinux happy
7161 which restorecon && restorecon -Rv /home/{user_name}/.ssh
7162 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
7163 """.format(
7164 user_name=user_name, user_key=user_keys
7165 )
7166 bash_script += user_keys_data
7167
7168 return bash_script + "\n\tfi"
7169
7170 def guest_customization(self, vapp, customize_script):
7171 """
7172 Method to customize guest os
7173 vapp - Vapp object
7174 customize_script - Customize script to be run at first boot of VM.
7175 """
7176 for vm in vapp.get_all_vms():
7177 vm_id = vm.get("id").split(":")[-1]
7178 vm_name = vm.get("name")
7179 vm_name = vm_name.replace("_", "-")
7180
7181 vm_customization_url = (
7182 "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
7183 )
7184 headers = {
7185 "Accept": "application/*+xml;version=" + API_VERSION,
7186 "x-vcloud-authorization": self.client._session.headers[
7187 "x-vcloud-authorization"
7188 ],
7189 }
7190
7191 headers[
7192 "Content-Type"
7193 ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
7194
7195 data = """<GuestCustomizationSection
7196 xmlns="http://www.vmware.com/vcloud/v1.5"
7197 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
7198 ovf:required="false" href="{}"
7199 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
7200 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
7201 <Enabled>true</Enabled>
7202 <ChangeSid>false</ChangeSid>
7203 <VirtualMachineId>{}</VirtualMachineId>
7204 <JoinDomainEnabled>false</JoinDomainEnabled>
7205 <UseOrgSettings>false</UseOrgSettings>
7206 <AdminPasswordEnabled>false</AdminPasswordEnabled>
7207 <AdminPasswordAuto>true</AdminPasswordAuto>
7208 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
7209 <AdminAutoLogonCount>0</AdminAutoLogonCount>
7210 <ResetPasswordRequired>false</ResetPasswordRequired>
7211 <CustomizationScript>{}</CustomizationScript>
7212 <ComputerName>{}</ComputerName>
7213 <Link href="{}"
7214 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
7215 </GuestCustomizationSection>
7216 """.format(
7217 vm_customization_url,
7218 vm_id,
7219 customize_script,
7220 vm_name,
7221 vm_customization_url,
7222 )
7223
7224 response = self.perform_request(
7225 req_type="PUT", url=vm_customization_url, headers=headers, data=data
7226 )
7227 if response.status_code == 202:
7228 guest_task = self.get_task_from_response(response.text)
7229 self.client.get_task_monitor().wait_for_success(task=guest_task)
7230 self.logger.info(
7231 "guest_customization : customized guest os task "
7232 "completed for VM {}".format(vm_name)
7233 )
7234 else:
7235 self.logger.error(
7236 "guest_customization : task for customized guest os"
7237 "failed for VM {}".format(vm_name)
7238 )
7239
7240 raise vimconn.VimConnException(
7241 "guest_customization : failed to perform"
7242 "guest os customization on VM {}".format(vm_name)
7243 )
7244
7245 def add_new_disk(self, vapp_uuid, disk_size):
7246 """
7247 Method to create an empty vm disk
7248
7249 Args:
7250 vapp_uuid - is vapp identifier.
7251 disk_size - size of disk to be created in GB
7252
7253 Returns:
7254 None
7255 """
7256 status = False
7257 vm_details = None
7258 try:
7259 # Disk size in GB, convert it into MB
7260 if disk_size is not None:
7261 disk_size_mb = int(disk_size) * 1024
7262 vm_details = self.get_vapp_details_rest(vapp_uuid)
7263
7264 if vm_details and "vm_virtual_hardware" in vm_details:
7265 self.logger.info(
7266 "Adding disk to VM: {} disk size:{}GB".format(
7267 vm_details["name"], disk_size
7268 )
7269 )
7270 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
7271 status = self.add_new_disk_rest(disk_href, disk_size_mb)
7272 except Exception as exp:
7273 msg = "Error occurred while creating new disk {}.".format(exp)
7274 self.rollback_newvm(vapp_uuid, msg)
7275
7276 if status:
7277 self.logger.info(
7278 "Added new disk to VM: {} disk size:{}GB".format(
7279 vm_details["name"], disk_size
7280 )
7281 )
7282 else:
7283 # If failed to add disk, delete VM
7284 msg = "add_new_disk: Failed to add new disk to {}".format(
7285 vm_details["name"]
7286 )
7287 self.rollback_newvm(vapp_uuid, msg)
7288
7289 def add_new_disk_rest(self, disk_href, disk_size_mb):
7290 """
7291 Retrives vApp Disks section & add new empty disk
7292
7293 Args:
7294 disk_href: Disk section href to addd disk
7295 disk_size_mb: Disk size in MB
7296
7297 Returns: Status of add new disk task
7298 """
7299 status = False
7300 if self.client._session:
7301 headers = {
7302 "Accept": "application/*+xml;version=" + API_VERSION,
7303 "x-vcloud-authorization": self.client._session.headers[
7304 "x-vcloud-authorization"
7305 ],
7306 }
7307 response = self.perform_request(
7308 req_type="GET", url=disk_href, headers=headers
7309 )
7310
7311 if response.status_code == 403:
7312 response = self.retry_rest("GET", disk_href)
7313
7314 if response.status_code != requests.codes.ok:
7315 self.logger.error(
7316 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
7317 disk_href, response.status_code
7318 )
7319 )
7320
7321 return status
7322
7323 try:
7324 # Find but type & max of instance IDs assigned to disks
7325 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
7326 namespaces = {
7327 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
7328 }
7329 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
7330 instance_id = 0
7331
7332 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
7333 if item.find("rasd:Description", namespaces).text == "Hard disk":
7334 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
7335
7336 if inst_id > instance_id:
7337 instance_id = inst_id
7338 disk_item = item.find("rasd:HostResource", namespaces)
7339 bus_subtype = disk_item.attrib[
7340 "{" + namespaces["xmlns"] + "}busSubType"
7341 ]
7342 bus_type = disk_item.attrib[
7343 "{" + namespaces["xmlns"] + "}busType"
7344 ]
7345
7346 instance_id = instance_id + 1
7347 new_item = """<Item>
7348 <rasd:Description>Hard disk</rasd:Description>
7349 <rasd:ElementName>New disk</rasd:ElementName>
7350 <rasd:HostResource
7351 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
7352 vcloud:capacity="{}"
7353 vcloud:busSubType="{}"
7354 vcloud:busType="{}"></rasd:HostResource>
7355 <rasd:InstanceID>{}</rasd:InstanceID>
7356 <rasd:ResourceType>17</rasd:ResourceType>
7357 </Item>""".format(
7358 disk_size_mb, bus_subtype, bus_type, instance_id
7359 )
7360
7361 new_data = response.text
7362 # Add new item at the bottom
7363 new_data = new_data.replace(
7364 "</Item>\n</RasdItemsList>",
7365 "</Item>\n{}\n</RasdItemsList>".format(new_item),
7366 )
7367
7368 # Send PUT request to modify virtual hardware section with new disk
7369 headers[
7370 "Content-Type"
7371 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
7372
7373 response = self.perform_request(
7374 req_type="PUT", url=disk_href, data=new_data, headers=headers
7375 )
7376
7377 if response.status_code == 403:
7378 add_headers = {"Content-Type": headers["Content-Type"]}
7379 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
7380
7381 if response.status_code != 202:
7382 self.logger.error(
7383 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
7384 disk_href, response.status_code, response.text
7385 )
7386 )
7387 else:
7388 add_disk_task = self.get_task_from_response(response.text)
7389 result = self.client.get_task_monitor().wait_for_success(
7390 task=add_disk_task
7391 )
7392
7393 if result.get("status") == "success":
7394 status = True
7395 else:
7396 self.logger.error(
7397 "Add new disk REST task failed to add {} MB disk".format(
7398 disk_size_mb
7399 )
7400 )
7401 except Exception as exp:
7402 self.logger.error(
7403 "Error occurred calling rest api for creating new disk {}".format(exp)
7404 )
7405
7406 return status
7407
7408 def add_existing_disk(
7409 self,
7410 catalogs=None,
7411 image_id=None,
7412 size=None,
7413 template_name=None,
7414 vapp_uuid=None,
7415 ):
7416 """
7417 Method to add existing disk to vm
7418 Args :
7419 catalogs - List of VDC catalogs
7420 image_id - Catalog ID
7421 template_name - Name of template in catalog
7422 vapp_uuid - UUID of vApp
7423 Returns:
7424 None
7425 """
7426 disk_info = None
7427 vcenter_conect, content = self.get_vcenter_content()
7428 # find moref-id of vm in image
7429 catalog_vm_info = self.get_vapp_template_details(
7430 catalogs=catalogs,
7431 image_id=image_id,
7432 )
7433
7434 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
7435 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
7436 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
7437 "vm_moref_id", None
7438 )
7439
7440 if catalog_vm_moref_id:
7441 self.logger.info(
7442 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
7443 )
7444 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
7445
7446 if catalog_vm_obj:
7447 # find existing disk
7448 disk_info = self.find_disk(catalog_vm_obj)
7449 else:
7450 exp_msg = "No VM with image id {} found".format(image_id)
7451 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7452 else:
7453 exp_msg = "No Image found with image ID {} ".format(image_id)
7454 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7455
7456 if disk_info:
7457 self.logger.info("Existing disk_info : {}".format(disk_info))
7458 # get VM
7459 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7460 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7461
7462 if vm_obj:
7463 status = self.add_disk(
7464 vcenter_conect=vcenter_conect,
7465 vm=vm_obj,
7466 disk_info=disk_info,
7467 size=size,
7468 vapp_uuid=vapp_uuid,
7469 )
7470
7471 if status:
7472 self.logger.info(
7473 "Disk from image id {} added to {}".format(
7474 image_id, vm_obj.config.name
7475 )
7476 )
7477 else:
7478 msg = "No disk found with image id {} to add in VM {}".format(
7479 image_id, vm_obj.config.name
7480 )
7481 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
7482
7483 def find_disk(self, vm_obj):
7484 """
7485 Method to find details of existing disk in VM
7486 Args:
7487 vm_obj - vCenter object of VM
7488 Returns:
7489 disk_info : dict of disk details
7490 """
7491 disk_info = {}
7492 if vm_obj:
7493 try:
7494 devices = vm_obj.config.hardware.device
7495
7496 for device in devices:
7497 if type(device) is vim.vm.device.VirtualDisk:
7498 if isinstance(
7499 device.backing,
7500 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
7501 ) and hasattr(device.backing, "fileName"):
7502 disk_info["full_path"] = device.backing.fileName
7503 disk_info["datastore"] = device.backing.datastore
7504 disk_info["capacityKB"] = device.capacityInKB
7505 break
7506 except Exception as exp:
7507 self.logger.error(
7508 "find_disk() : exception occurred while "
7509 "getting existing disk details :{}".format(exp)
7510 )
7511
7512 return disk_info
7513
7514 def add_disk(
7515 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
7516 ):
7517 """
7518 Method to add existing disk in VM
7519 Args :
7520 vcenter_conect - vCenter content object
7521 vm - vCenter vm object
7522 disk_info : dict of disk details
7523 Returns:
7524 status : status of add disk task
7525 """
7526 datastore = disk_info["datastore"] if "datastore" in disk_info else None
7527 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
7528 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
7529 if size is not None:
7530 # Convert size from GB to KB
7531 sizeKB = int(size) * 1024 * 1024
7532 # compare size of existing disk and user given size.Assign whicherver is greater
7533 self.logger.info(
7534 "Add Existing disk : sizeKB {} , capacityKB {}".format(
7535 sizeKB, capacityKB
7536 )
7537 )
7538
7539 if sizeKB > capacityKB:
7540 capacityKB = sizeKB
7541
7542 if datastore and fullpath and capacityKB:
7543 try:
7544 spec = vim.vm.ConfigSpec()
7545 # get all disks on a VM, set unit_number to the next available
7546 unit_number = 0
7547 for dev in vm.config.hardware.device:
7548 if hasattr(dev.backing, "fileName"):
7549 unit_number = int(dev.unitNumber) + 1
7550 # unit_number 7 reserved for scsi controller
7551
7552 if unit_number == 7:
7553 unit_number += 1
7554
7555 if isinstance(dev, vim.vm.device.VirtualDisk):
7556 # vim.vm.device.VirtualSCSIController
7557 controller_key = dev.controllerKey
7558
7559 self.logger.info(
7560 "Add Existing disk : unit number {} , controller key {}".format(
7561 unit_number, controller_key
7562 )
7563 )
7564 # add disk here
7565 dev_changes = []
7566 disk_spec = vim.vm.device.VirtualDeviceSpec()
7567 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7568 disk_spec.device = vim.vm.device.VirtualDisk()
7569 disk_spec.device.backing = (
7570 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
7571 )
7572 disk_spec.device.backing.thinProvisioned = True
7573 disk_spec.device.backing.diskMode = "persistent"
7574 disk_spec.device.backing.datastore = datastore
7575 disk_spec.device.backing.fileName = fullpath
7576
7577 disk_spec.device.unitNumber = unit_number
7578 disk_spec.device.capacityInKB = capacityKB
7579 disk_spec.device.controllerKey = controller_key
7580 dev_changes.append(disk_spec)
7581 spec.deviceChange = dev_changes
7582 task = vm.ReconfigVM_Task(spec=spec)
7583 status = self.wait_for_vcenter_task(task, vcenter_conect)
7584
7585 return status
7586 except Exception as exp:
7587 exp_msg = (
7588 "add_disk() : exception {} occurred while adding disk "
7589 "{} to vm {}".format(exp, fullpath, vm.config.name)
7590 )
7591 self.rollback_newvm(vapp_uuid, exp_msg)
7592 else:
7593 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
7594 disk_info
7595 )
7596 self.rollback_newvm(vapp_uuid, msg)
7597
7598 def get_vcenter_content(self):
7599 """
7600 Get the vsphere content object
7601 """
7602 try:
7603 vm_vcenter_info = self.get_vm_vcenter_info()
7604 except Exception as exp:
7605 self.logger.error(
7606 "Error occurred while getting vCenter infromationn"
7607 " for VM : {}".format(exp)
7608 )
7609
7610 raise vimconn.VimConnException(message=exp)
7611
7612 context = None
7613 if hasattr(ssl, "_create_unverified_context"):
7614 context = ssl._create_unverified_context()
7615
7616 vcenter_conect = SmartConnect(
7617 host=vm_vcenter_info["vm_vcenter_ip"],
7618 user=vm_vcenter_info["vm_vcenter_user"],
7619 pwd=vm_vcenter_info["vm_vcenter_password"],
7620 port=int(vm_vcenter_info["vm_vcenter_port"]),
7621 sslContext=context,
7622 )
7623 atexit.register(Disconnect, vcenter_conect)
7624 content = vcenter_conect.RetrieveContent()
7625
7626 return vcenter_conect, content
7627
7628 def get_vm_moref_id(self, vapp_uuid):
7629 """
7630 Get the moref_id of given VM
7631 """
7632 try:
7633 if vapp_uuid:
7634 vm_details = self.get_vapp_details_rest(
7635 vapp_uuid, need_admin_access=True
7636 )
7637
7638 if vm_details and "vm_vcenter_info" in vm_details:
7639 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
7640
7641 return vm_moref_id
7642 except Exception as exp:
7643 self.logger.error(
7644 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
7645 )
7646
7647 return None
7648
7649 def get_vapp_template_details(
7650 self, catalogs=None, image_id=None, template_name=None
7651 ):
7652 """
7653 Method to get vApp template details
7654 Args :
7655 catalogs - list of VDC catalogs
7656 image_id - Catalog ID to find
7657 template_name : template name in catalog
7658 Returns:
7659 parsed_respond : dict of vApp tempalte details
7660 """
7661 parsed_response = {}
7662
7663 vca = self.connect_as_admin()
7664 if not vca:
7665 raise vimconn.VimConnConnectionException("Failed to connect vCD")
7666
7667 try:
7668 org, _ = self.get_vdc_details()
7669 catalog = self.get_catalog_obj(image_id, catalogs)
7670 if catalog:
7671 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
7672 catalog_items = [items.attrib]
7673
7674 if len(catalog_items) == 1:
7675 headers = {
7676 "Accept": "application/*+xml;version=" + API_VERSION,
7677 "x-vcloud-authorization": vca._session.headers[
7678 "x-vcloud-authorization"
7679 ],
7680 }
7681 response = self.perform_request(
7682 req_type="GET",
7683 url=catalog_items[0].get("href"),
7684 headers=headers,
7685 )
7686 catalogItem = XmlElementTree.fromstring(response.text)
7687 entity = [
7688 child
7689 for child in catalogItem
7690 if child.get("type")
7691 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
7692 ][0]
7693 vapp_tempalte_href = entity.get("href")
7694 # get vapp details and parse moref id
7695
7696 namespaces = {
7697 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7698 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7699 "vmw": "http://www.vmware.com/schema/ovf",
7700 "vm": "http://www.vmware.com/vcloud/v1.5",
7701 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7702 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7703 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7704 }
7705
7706 if vca._session:
7707 response = self.perform_request(
7708 req_type="GET", url=vapp_tempalte_href, headers=headers
7709 )
7710
7711 if response.status_code != requests.codes.ok:
7712 self.logger.debug(
7713 "REST API call {} failed. Return status code {}".format(
7714 vapp_tempalte_href, response.status_code
7715 )
7716 )
7717 else:
7718 xmlroot_respond = XmlElementTree.fromstring(response.text)
7719 children_section = xmlroot_respond.find(
7720 "vm:Children/", namespaces
7721 )
7722
7723 if children_section is not None:
7724 vCloud_extension_section = children_section.find(
7725 "xmlns:VCloudExtension", namespaces
7726 )
7727
7728 if vCloud_extension_section is not None:
7729 vm_vcenter_info = {}
7730 vim_info = vCloud_extension_section.find(
7731 "vmext:VmVimInfo", namespaces
7732 )
7733 vmext = vim_info.find(
7734 "vmext:VmVimObjectRef", namespaces
7735 )
7736
7737 if vmext is not None:
7738 vm_vcenter_info["vm_moref_id"] = vmext.find(
7739 "vmext:MoRef", namespaces
7740 ).text
7741
7742 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7743 except Exception as exp:
7744 self.logger.info(
7745 "Error occurred calling rest api for getting vApp details {}".format(
7746 exp
7747 )
7748 )
7749
7750 return parsed_response
7751
7752 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7753 """
7754 Method to delete vApp
7755 Args :
7756 vapp_uuid - vApp UUID
7757 msg - Error message to be logged
7758 exp_type : Exception type
7759 Returns:
7760 None
7761 """
7762 if vapp_uuid:
7763 self.delete_vminstance(vapp_uuid)
7764 else:
7765 msg = "No vApp ID"
7766
7767 self.logger.error(msg)
7768
7769 if exp_type == "Genric":
7770 raise vimconn.VimConnException(msg)
7771 elif exp_type == "NotFound":
7772 raise vimconn.VimConnNotFoundException(message=msg)
7773
7774 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
7775 """
7776 Method to attach SRIOV adapters to VM
7777
7778 Args:
7779 vapp_uuid - uuid of vApp/VM
7780 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
7781 vmname_andid - vmname
7782
7783 Returns:
7784 The status of add SRIOV adapter task , vm object and
7785 vcenter_conect object
7786 """
7787 vm_obj = None
7788 vcenter_conect, content = self.get_vcenter_content()
7789 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7790
7791 if vm_moref_id:
7792 try:
7793 no_of_sriov_devices = len(sriov_nets)
7794 if no_of_sriov_devices > 0:
7795 # Get VM and its host
7796 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
7797 self.logger.info(
7798 "VM {} is currently on host {}".format(vm_obj, host_obj)
7799 )
7800
7801 if host_obj and vm_obj:
7802 # get SRIOV devies from host on which vapp is currently installed
7803 avilable_sriov_devices = self.get_sriov_devices(
7804 host_obj,
7805 no_of_sriov_devices,
7806 )
7807
7808 if len(avilable_sriov_devices) == 0:
7809 # find other hosts with active pci devices
7810 (
7811 new_host_obj,
7812 avilable_sriov_devices,
7813 ) = self.get_host_and_sriov_devices(
7814 content,
7815 no_of_sriov_devices,
7816 )
7817
7818 if (
7819 new_host_obj is not None
7820 and len(avilable_sriov_devices) > 0
7821 ):
7822 # Migrate vm to the host where SRIOV devices are available
7823 self.logger.info(
7824 "Relocate VM {} on new host {}".format(
7825 vm_obj, new_host_obj
7826 )
7827 )
7828 task = self.relocate_vm(new_host_obj, vm_obj)
7829
7830 if task is not None:
7831 result = self.wait_for_vcenter_task(
7832 task, vcenter_conect
7833 )
7834 self.logger.info(
7835 "Migrate VM status: {}".format(result)
7836 )
7837 host_obj = new_host_obj
7838 else:
7839 self.logger.info(
7840 "Fail to migrate VM : {}".format(result)
7841 )
7842
7843 raise vimconn.VimConnNotFoundException(
7844 "Fail to migrate VM : {} to host {}".format(
7845 vmname_andid, new_host_obj
7846 )
7847 )
7848
7849 if (
7850 host_obj is not None
7851 and avilable_sriov_devices is not None
7852 and len(avilable_sriov_devices) > 0
7853 ):
7854 # Add SRIOV devices one by one
7855 for sriov_net in sriov_nets:
7856 network_name = sriov_net.get("net_id")
7857 self.create_dvPort_group(network_name)
7858
7859 if (
7860 sriov_net.get("type") == "VF"
7861 or sriov_net.get("type") == "SR-IOV"
7862 ):
7863 # add vlan ID ,Modify portgroup for vlan ID
7864 self.configure_vlanID(
7865 content, vcenter_conect, network_name
7866 )
7867
7868 task = self.add_sriov_to_vm(
7869 content,
7870 vm_obj,
7871 host_obj,
7872 network_name,
7873 avilable_sriov_devices[0],
7874 )
7875
7876 if task:
7877 status = self.wait_for_vcenter_task(
7878 task, vcenter_conect
7879 )
7880
7881 if status:
7882 self.logger.info(
7883 "Added SRIOV {} to VM {}".format(
7884 no_of_sriov_devices, str(vm_obj)
7885 )
7886 )
7887 else:
7888 self.logger.error(
7889 "Fail to add SRIOV {} to VM {}".format(
7890 no_of_sriov_devices, str(vm_obj)
7891 )
7892 )
7893
7894 raise vimconn.VimConnUnexpectedResponse(
7895 "Fail to add SRIOV adapter in VM {}".format(
7896 str(vm_obj)
7897 )
7898 )
7899
7900 return True, vm_obj, vcenter_conect
7901 else:
7902 self.logger.error(
7903 "Currently there is no host with"
7904 " {} number of avaialble SRIOV "
7905 "VFs required for VM {}".format(
7906 no_of_sriov_devices, vmname_andid
7907 )
7908 )
7909
7910 raise vimconn.VimConnNotFoundException(
7911 "Currently there is no host with {} "
7912 "number of avaialble SRIOV devices required for VM {}".format(
7913 no_of_sriov_devices, vmname_andid
7914 )
7915 )
7916 else:
7917 self.logger.debug(
7918 "No infromation about SRIOV devices {} ", sriov_nets
7919 )
7920 except vmodl.MethodFault as error:
7921 self.logger.error("Error occurred while adding SRIOV {} ", error)
7922
7923 return None, vm_obj, vcenter_conect
7924
7925 def get_sriov_devices(self, host, no_of_vfs):
7926 """
7927 Method to get the details of SRIOV devices on given host
7928 Args:
7929 host - vSphere host object
7930 no_of_vfs - number of VFs needed on host
7931
7932 Returns:
7933 array of SRIOV devices
7934 """
7935 sriovInfo = []
7936
7937 if host:
7938 for device in host.config.pciPassthruInfo:
7939 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7940 if device.numVirtualFunction >= no_of_vfs:
7941 sriovInfo.append(device)
7942 break
7943
7944 return sriovInfo
7945
7946 def get_host_and_sriov_devices(self, content, no_of_vfs):
7947 """
7948 Method to get the details of SRIOV devices infromation on all hosts
7949
7950 Args:
7951 content - vSphere host object
7952 no_of_vfs - number of pci VFs needed on host
7953
7954 Returns:
7955 array of SRIOV devices and host object
7956 """
7957 host_obj = None
7958 sriov_device_objs = None
7959
7960 try:
7961 if content:
7962 container = content.viewManager.CreateContainerView(
7963 content.rootFolder, [vim.HostSystem], True
7964 )
7965
7966 for host in container.view:
7967 devices = self.get_sriov_devices(host, no_of_vfs)
7968
7969 if devices:
7970 host_obj = host
7971 sriov_device_objs = devices
7972 break
7973 except Exception as exp:
7974 self.logger.error(
7975 "Error {} occurred while finding SRIOV devices on host: {}".format(
7976 exp, host_obj
7977 )
7978 )
7979
7980 return host_obj, sriov_device_objs
7981
7982 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
7983 """
7984 Method to add SRIOV adapter to vm
7985
7986 Args:
7987 host_obj - vSphere host object
7988 vm_obj - vSphere vm object
7989 content - vCenter content object
7990 network_name - name of distributed virtaul portgroup
7991 sriov_device - SRIOV device info
7992
7993 Returns:
7994 task object
7995 """
7996 devices = []
7997 vnic_label = "sriov nic"
7998
7999 try:
8000 dvs_portgr = self.get_dvport_group(network_name)
8001 network_name = dvs_portgr.name
8002 nic = vim.vm.device.VirtualDeviceSpec()
8003 # VM device
8004 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
8005 nic.device = vim.vm.device.VirtualSriovEthernetCard()
8006 nic.device.addressType = "assigned"
8007 # nic.device.key = 13016
8008 nic.device.deviceInfo = vim.Description()
8009 nic.device.deviceInfo.label = vnic_label
8010 nic.device.deviceInfo.summary = network_name
8011 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
8012
8013 nic.device.backing.network = self.get_obj(
8014 content, [vim.Network], network_name
8015 )
8016 nic.device.backing.deviceName = network_name
8017 nic.device.backing.useAutoDetect = False
8018 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
8019 nic.device.connectable.startConnected = True
8020 nic.device.connectable.allowGuestControl = True
8021
8022 nic.device.sriovBacking = (
8023 vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
8024 )
8025 nic.device.sriovBacking.physicalFunctionBacking = (
8026 vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
8027 )
8028 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
8029
8030 devices.append(nic)
8031 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
8032 task = vm_obj.ReconfigVM_Task(vmconf)
8033
8034 return task
8035 except Exception as exp:
8036 self.logger.error(
8037 "Error {} occurred while adding SRIOV adapter in VM: {}".format(
8038 exp, vm_obj
8039 )
8040 )
8041
8042 return None
8043
8044 def create_dvPort_group(self, network_name):
8045 """
8046 Method to create disributed virtual portgroup
8047
8048 Args:
8049 network_name - name of network/portgroup
8050
8051 Returns:
8052 portgroup key
8053 """
8054 try:
8055 new_network_name = [network_name, "-", str(uuid.uuid4())]
8056 network_name = "".join(new_network_name)
8057 vcenter_conect, content = self.get_vcenter_content()
8058
8059 dv_switch = self.get_obj(
8060 content, [vim.DistributedVirtualSwitch], self.dvs_name
8061 )
8062
8063 if dv_switch:
8064 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8065 dv_pg_spec.name = network_name
8066
8067 dv_pg_spec.type = (
8068 vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
8069 )
8070 dv_pg_spec.defaultPortConfig = (
8071 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8072 )
8073 dv_pg_spec.defaultPortConfig.securityPolicy = (
8074 vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
8075 )
8076 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
8077 vim.BoolPolicy(value=False)
8078 )
8079 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
8080 vim.BoolPolicy(value=False)
8081 )
8082 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
8083 value=False
8084 )
8085
8086 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
8087 self.wait_for_vcenter_task(task, vcenter_conect)
8088
8089 dvPort_group = self.get_obj(
8090 content, [vim.dvs.DistributedVirtualPortgroup], network_name
8091 )
8092
8093 if dvPort_group:
8094 self.logger.info(
8095 "Created disributed virtaul port group: {}".format(dvPort_group)
8096 )
8097 return dvPort_group.key
8098 else:
8099 self.logger.debug(
8100 "No disributed virtual switch found with name {}".format(
8101 network_name
8102 )
8103 )
8104
8105 except Exception as exp:
8106 self.logger.error(
8107 "Error occurred while creating disributed virtaul port group {}"
8108 " : {}".format(network_name, exp)
8109 )
8110
8111 return None
8112
8113 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
8114 """
8115 Method to reconfigure disributed virtual portgroup
8116
8117 Args:
8118 dvPort_group_name - name of disributed virtual portgroup
8119 content - vCenter content object
8120 config_info - disributed virtual portgroup configuration
8121
8122 Returns:
8123 task object
8124 """
8125 try:
8126 dvPort_group = self.get_dvport_group(dvPort_group_name)
8127
8128 if dvPort_group:
8129 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8130 dv_pg_spec.configVersion = dvPort_group.config.configVersion
8131 dv_pg_spec.defaultPortConfig = (
8132 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8133 )
8134
8135 if "vlanID" in config_info:
8136 dv_pg_spec.defaultPortConfig.vlan = (
8137 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
8138 )
8139 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
8140
8141 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
8142
8143 return task
8144 else:
8145 return None
8146 except Exception as exp:
8147 self.logger.error(
8148 "Error occurred while reconfiguraing disributed virtaul port group {}"
8149 " : {}".format(dvPort_group_name, exp)
8150 )
8151
8152 return None
8153
8154 def destroy_dvport_group(self, dvPort_group_name):
8155 """
8156 Method to destroy disributed virtual portgroup
8157
8158 Args:
8159 network_name - name of network/portgroup
8160
8161 Returns:
8162 True if portgroup successfully got deleted else false
8163 """
8164 vcenter_conect, _ = self.get_vcenter_content()
8165
8166 try:
8167 status = None
8168 dvPort_group = self.get_dvport_group(dvPort_group_name)
8169
8170 if dvPort_group:
8171 task = dvPort_group.Destroy_Task()
8172 status = self.wait_for_vcenter_task(task, vcenter_conect)
8173
8174 return status
8175 except vmodl.MethodFault as exp:
8176 self.logger.error(
8177 "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
8178 exp, dvPort_group_name
8179 )
8180 )
8181
8182 return None
8183
8184 def get_dvport_group(self, dvPort_group_name):
8185 """
8186 Method to get disributed virtual portgroup
8187
8188 Args:
8189 network_name - name of network/portgroup
8190
8191 Returns:
8192 portgroup object
8193 """
8194 _, content = self.get_vcenter_content()
8195 dvPort_group = None
8196
8197 try:
8198 container = content.viewManager.CreateContainerView(
8199 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
8200 )
8201
8202 for item in container.view:
8203 if item.key == dvPort_group_name:
8204 dvPort_group = item
8205 break
8206
8207 return dvPort_group
8208 except vmodl.MethodFault as exp:
8209 self.logger.error(
8210 "Caught vmodl fault {} for disributed virtual port group {}".format(
8211 exp, dvPort_group_name
8212 )
8213 )
8214
8215 return None
8216
8217 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
8218 """
8219 Method to get disributed virtual portgroup vlanID
8220
8221 Args:
8222 network_name - name of network/portgroup
8223
8224 Returns:
8225 vlan ID
8226 """
8227 vlanId = None
8228
8229 try:
8230 dvPort_group = self.get_dvport_group(dvPort_group_name)
8231
8232 if dvPort_group:
8233 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
8234 except vmodl.MethodFault as exp:
8235 self.logger.error(
8236 "Caught vmodl fault {} for disributed virtaul port group {}".format(
8237 exp, dvPort_group_name
8238 )
8239 )
8240
8241 return vlanId
8242
8243 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
8244 """
8245 Method to configure vlanID in disributed virtual portgroup vlanID
8246
8247 Args:
8248 network_name - name of network/portgroup
8249
8250 Returns:
8251 None
8252 """
8253 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
8254
8255 if vlanID == 0:
8256 # configure vlanID
8257 vlanID = self.genrate_vlanID(dvPort_group_name)
8258 config = {"vlanID": vlanID}
8259 task = self.reconfig_portgroup(
8260 content, dvPort_group_name, config_info=config
8261 )
8262
8263 if task:
8264 status = self.wait_for_vcenter_task(task, vcenter_conect)
8265
8266 if status:
8267 self.logger.info(
8268 "Reconfigured Port group {} for vlan ID {}".format(
8269 dvPort_group_name, vlanID
8270 )
8271 )
8272 else:
8273 self.logger.error(
8274 "Fail reconfigure portgroup {} for vlanID{}".format(
8275 dvPort_group_name, vlanID
8276 )
8277 )
8278
8279 def genrate_vlanID(self, network_name):
8280 """
8281 Method to get unused vlanID
8282 Args:
8283 network_name - name of network/portgroup
8284 Returns:
8285 vlanID
8286 """
8287 vlan_id = None
8288 used_ids = []
8289
8290 if self.config.get("vlanID_range") is None:
8291 raise vimconn.VimConnConflictException(
8292 "You must provide a 'vlanID_range' "
8293 "at config value before creating sriov network with vlan tag"
8294 )
8295
8296 if "used_vlanIDs" not in self.persistent_info:
8297 self.persistent_info["used_vlanIDs"] = {}
8298 else:
8299 used_ids = list(self.persistent_info["used_vlanIDs"].values())
8300
8301 for vlanID_range in self.config.get("vlanID_range"):
8302 start_vlanid, end_vlanid = vlanID_range.split("-")
8303
8304 if start_vlanid > end_vlanid:
8305 raise vimconn.VimConnConflictException(
8306 "Invalid vlan ID range {}".format(vlanID_range)
8307 )
8308
8309 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
8310 if vid not in used_ids:
8311 vlan_id = vid
8312 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
8313 return vlan_id
8314
8315 if vlan_id is None:
8316 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
8317
8318 def get_obj(self, content, vimtype, name):
8319 """
8320 Get the vsphere object associated with a given text name
8321 """
8322 obj = None
8323 container = content.viewManager.CreateContainerView(
8324 content.rootFolder, vimtype, True
8325 )
8326
8327 for item in container.view:
8328 if item.name == name:
8329 obj = item
8330 break
8331
8332 return obj
8333
8334 def insert_media_to_vm(self, vapp, image_id):
8335 """
8336 Method to insert media CD-ROM (ISO image) from catalog to vm.
8337 vapp - vapp object to get vm id
8338 Image_id - image id for cdrom to be inerted to vm
8339 """
8340 # create connection object
8341 vca = self.connect()
8342 try:
8343 # fetching catalog details
8344 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
8345
8346 if vca._session:
8347 headers = {
8348 "Accept": "application/*+xml;version=" + API_VERSION,
8349 "x-vcloud-authorization": vca._session.headers[
8350 "x-vcloud-authorization"
8351 ],
8352 }
8353 response = self.perform_request(
8354 req_type="GET", url=rest_url, headers=headers
8355 )
8356
8357 if response.status_code != 200:
8358 self.logger.error(
8359 "REST call {} failed reason : {}"
8360 "status code : {}".format(
8361 rest_url, response.text, response.status_code
8362 )
8363 )
8364
8365 raise vimconn.VimConnException(
8366 "insert_media_to_vm(): Failed to get " "catalog details"
8367 )
8368
8369 # searching iso name and id
8370 iso_name, media_id = self.get_media_details(vca, response.text)
8371
8372 if iso_name and media_id:
8373 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
8374 <ns6:MediaInsertOrEjectParams
8375 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
8376 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
8377 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
8378 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
8379 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
8380 xmlns:ns7="http://www.vmware.com/schema/ovf"
8381 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
8382 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
8383 <ns6:Media
8384 type="application/vnd.vmware.vcloud.media+xml"
8385 name="{}"
8386 id="urn:vcloud:media:{}"
8387 href="https://{}/api/media/{}"/>
8388 </ns6:MediaInsertOrEjectParams>""".format(
8389 iso_name, media_id, self.url, media_id
8390 )
8391
8392 for vms in vapp.get_all_vms():
8393 vm_id = vms.get("id").split(":")[-1]
8394
8395 headers[
8396 "Content-Type"
8397 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
8398 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
8399 self.url, vm_id
8400 )
8401
8402 response = self.perform_request(
8403 req_type="POST", url=rest_url, data=data, headers=headers
8404 )
8405
8406 if response.status_code != 202:
8407 error_msg = (
8408 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
8409 "Status code {}".format(response.text, response.status_code)
8410 )
8411 self.logger.error(error_msg)
8412
8413 raise vimconn.VimConnException(error_msg)
8414 else:
8415 task = self.get_task_from_response(response.text)
8416 result = self.client.get_task_monitor().wait_for_success(
8417 task=task
8418 )
8419
8420 if result.get("status") == "success":
8421 self.logger.info(
8422 "insert_media_to_vm(): Sucessfully inserted media ISO"
8423 " image to vm {}".format(vm_id)
8424 )
8425 except Exception as exp:
8426 self.logger.error(
8427 "insert_media_to_vm() : exception occurred "
8428 "while inserting media CD-ROM"
8429 )
8430
8431 raise vimconn.VimConnException(message=exp)
8432
8433 def get_media_details(self, vca, content):
8434 """
8435 Method to get catalog item details
8436 vca - connection object
8437 content - Catalog details
8438 Return - Media name, media id
8439 """
8440 cataloghref_list = []
8441 try:
8442 if content:
8443 vm_list_xmlroot = XmlElementTree.fromstring(content)
8444
8445 for child in vm_list_xmlroot.iter():
8446 if "CatalogItem" in child.tag:
8447 cataloghref_list.append(child.attrib.get("href"))
8448
8449 if cataloghref_list is not None:
8450 for href in cataloghref_list:
8451 if href:
8452 headers = {
8453 "Accept": "application/*+xml;version=" + API_VERSION,
8454 "x-vcloud-authorization": vca._session.headers[
8455 "x-vcloud-authorization"
8456 ],
8457 }
8458 response = self.perform_request(
8459 req_type="GET", url=href, headers=headers
8460 )
8461
8462 if response.status_code != 200:
8463 self.logger.error(
8464 "REST call {} failed reason : {}"
8465 "status code : {}".format(
8466 href, response.text, response.status_code
8467 )
8468 )
8469
8470 raise vimconn.VimConnException(
8471 "get_media_details : Failed to get "
8472 "catalogitem details"
8473 )
8474
8475 list_xmlroot = XmlElementTree.fromstring(response.text)
8476
8477 for child in list_xmlroot.iter():
8478 if "Entity" in child.tag:
8479 if "media" in child.attrib.get("href"):
8480 name = child.attrib.get("name")
8481 media_id = (
8482 child.attrib.get("href").split("/").pop()
8483 )
8484
8485 return name, media_id
8486 else:
8487 self.logger.debug("Media name and id not found")
8488
8489 return False, False
8490 except Exception as exp:
8491 self.logger.error(
8492 "get_media_details : exception occurred " "getting media details"
8493 )
8494
8495 raise vimconn.VimConnException(message=exp)
8496
8497 def retry_rest(self, method, url, add_headers=None, data=None):
8498 """Method to get Token & retry respective REST request
8499 Args:
8500 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
8501 url - request url to be used
8502 add_headers - Additional headers (optional)
8503 data - Request payload data to be passed in request
8504 Returns:
8505 response - Response of request
8506 """
8507 response = None
8508
8509 # Get token
8510 self.get_token()
8511
8512 if self.client._session:
8513 headers = {
8514 "Accept": "application/*+xml;version=" + API_VERSION,
8515 "x-vcloud-authorization": self.client._session.headers[
8516 "x-vcloud-authorization"
8517 ],
8518 }
8519
8520 if add_headers:
8521 headers.update(add_headers)
8522
8523 if method == "GET":
8524 response = self.perform_request(req_type="GET", url=url, headers=headers)
8525 elif method == "PUT":
8526 response = self.perform_request(
8527 req_type="PUT", url=url, headers=headers, data=data
8528 )
8529 elif method == "POST":
8530 response = self.perform_request(
8531 req_type="POST", url=url, headers=headers, data=data
8532 )
8533 elif method == "DELETE":
8534 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
8535
8536 return response
8537
8538 def get_token(self):
8539 """Generate a new token if expired
8540
8541 Returns:
8542 The return client object that letter can be used to connect to vCloud director as admin for VDC
8543 """
8544 self.client = self.connect()
8545
8546 def get_vdc_details(self):
8547 """Get VDC details using pyVcloud Lib
8548
8549 Returns org and vdc object
8550 """
8551 vdc = None
8552
8553 try:
8554 org = Org(self.client, resource=self.client.get_org())
8555 vdc = org.get_vdc(self.tenant_name)
8556 except Exception as e:
8557 # pyvcloud not giving a specific exception, Refresh nevertheless
8558 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
8559
8560 # Retry once, if failed by refreshing token
8561 if vdc is None:
8562 self.get_token()
8563 org = Org(self.client, resource=self.client.get_org())
8564 vdc = org.get_vdc(self.tenant_name)
8565
8566 return org, vdc
8567
8568 def perform_request(self, req_type, url, headers=None, data=None):
8569 """Perform the POST/PUT/GET/DELETE request."""
8570 # Log REST request details
8571 self.log_request(req_type, url=url, headers=headers, data=data)
8572 # perform request and return its result
8573
8574 if req_type == "GET":
8575 response = requests.get(url=url, headers=headers, verify=False)
8576 elif req_type == "PUT":
8577 response = requests.put(url=url, headers=headers, data=data, verify=False)
8578 elif req_type == "POST":
8579 response = requests.post(url=url, headers=headers, data=data, verify=False)
8580 elif req_type == "DELETE":
8581 response = requests.delete(url=url, headers=headers, verify=False)
8582
8583 # Log the REST response
8584 self.log_response(response)
8585
8586 return response
8587
8588 def log_request(self, req_type, url=None, headers=None, data=None):
8589 """Logs REST request details"""
8590
8591 if req_type is not None:
8592 self.logger.debug("Request type: {}".format(req_type))
8593
8594 if url is not None:
8595 self.logger.debug("Request url: {}".format(url))
8596
8597 if headers is not None:
8598 for header in headers:
8599 self.logger.debug(
8600 "Request header: {}: {}".format(header, headers[header])
8601 )
8602
8603 if data is not None:
8604 self.logger.debug("Request data: {}".format(data))
8605
8606 def log_response(self, response):
8607 """Logs REST response details"""
8608
8609 self.logger.debug("Response status code: {} ".format(response.status_code))
8610
8611 def get_task_from_response(self, content):
8612 """
8613 content - API response.text(response.text)
8614 return task object
8615 """
8616 xmlroot = XmlElementTree.fromstring(content)
8617
8618 if xmlroot.tag.split("}")[1] == "Task":
8619 return xmlroot
8620 else:
8621 for ele in xmlroot:
8622 if ele.tag.split("}")[1] == "Tasks":
8623 task = ele[0]
8624 break
8625
8626 return task
8627
8628 def power_on_vapp(self, vapp_id, vapp_name):
8629 """
8630 vapp_id - vApp uuid
8631 vapp_name - vAapp name
8632 return - Task object
8633 """
8634 headers = {
8635 "Accept": "application/*+xml;version=" + API_VERSION,
8636 "x-vcloud-authorization": self.client._session.headers[
8637 "x-vcloud-authorization"
8638 ],
8639 }
8640
8641 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
8642 self.url, vapp_id
8643 )
8644 response = self.perform_request(
8645 req_type="POST", url=poweron_href, headers=headers
8646 )
8647
8648 if response.status_code != 202:
8649 self.logger.error(
8650 "REST call {} failed reason : {}"
8651 "status code : {} ".format(
8652 poweron_href, response.text, response.status_code
8653 )
8654 )
8655
8656 raise vimconn.VimConnException(
8657 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
8658 )
8659 else:
8660 poweron_task = self.get_task_from_response(response.text)
8661
8662 return poweron_task