1e161806a5b83b1fba5de8bc866b1abdceb95810
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_vcd_network_list(self):
697 """Method available organization for a logged in tenant
698
699 Returns:
700 The return vca object that letter can be used to connect to vcloud direct as admin
701 """
702
703 self.logger.debug(
704 "get_vcd_network_list(): retrieving network list for vcd {}".format(
705 self.tenant_name
706 )
707 )
708
709 if not self.tenant_name:
710 raise vimconn.VimConnConnectionException("Tenant name is empty.")
711
712 _, vdc = self.get_vdc_details()
713 if vdc is None:
714 raise vimconn.VimConnConnectionException(
715 "Can't retrieve information for a VDC {}".format(self.tenant_name)
716 )
717
718 vdc_uuid = vdc.get("id").split(":")[3]
719 if self.client._session:
720 headers = {
721 "Accept": "application/*+xml;version=" + API_VERSION,
722 "x-vcloud-authorization": self.client._session.headers[
723 "x-vcloud-authorization"
724 ],
725 }
726 response = self.perform_request(
727 req_type="GET", url=vdc.get("href"), headers=headers
728 )
729
730 if response.status_code != 200:
731 self.logger.error("Failed to get vdc content")
732 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
733 else:
734 content = XmlElementTree.fromstring(response.text)
735
736 network_list = []
737 try:
738 for item in content:
739 if item.tag.split("}")[-1] == "AvailableNetworks":
740 for net in item:
741 response = self.perform_request(
742 req_type="GET", url=net.get("href"), headers=headers
743 )
744
745 if response.status_code != 200:
746 self.logger.error("Failed to get network content")
747 raise vimconn.VimConnNotFoundException(
748 "Failed to get network content"
749 )
750 else:
751 net_details = XmlElementTree.fromstring(response.text)
752
753 filter_dict = {}
754 net_uuid = net_details.get("id").split(":")
755
756 if len(net_uuid) != 4:
757 continue
758 else:
759 net_uuid = net_uuid[3]
760 # create dict entry
761 self.logger.debug(
762 "get_vcd_network_list(): Adding network {} "
763 "to a list vcd id {} network {}".format(
764 net_uuid, vdc_uuid, net_details.get("name")
765 )
766 )
767 filter_dict["name"] = net_details.get("name")
768 filter_dict["id"] = net_uuid
769
770 if [
771 i.text
772 for i in net_details
773 if i.tag.split("}")[-1] == "IsShared"
774 ][0] == "true":
775 shared = True
776 else:
777 shared = False
778
779 filter_dict["shared"] = shared
780 filter_dict["tenant_id"] = vdc_uuid
781
782 if int(net_details.get("status")) == 1:
783 filter_dict["admin_state_up"] = True
784 else:
785 filter_dict["admin_state_up"] = False
786
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 network_list.append(filter_dict)
790 self.logger.debug(
791 "get_vcd_network_list adding entry {}".format(
792 filter_dict
793 )
794 )
795 except Exception:
796 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
797 pass
798
799 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
800
801 return network_list
802
803 def get_network_list(self, filter_dict={}):
804 """Obtain tenant networks of VIM
805 Filter_dict can be:
806 name: network name OR/AND
807 id: network uuid OR/AND
808 shared: boolean OR/AND
809 tenant_id: tenant OR/AND
810 admin_state_up: boolean
811 status: 'ACTIVE'
812
813 [{key : value , key : value}]
814
815 Returns the network list of dictionaries:
816 [{<the fields at Filter_dict plus some VIM specific>}, ...]
817 List can be empty
818 """
819
820 self.logger.debug(
821 "get_network_list(): retrieving network list for vcd {}".format(
822 self.tenant_name
823 )
824 )
825
826 if not self.tenant_name:
827 raise vimconn.VimConnConnectionException("Tenant name is empty.")
828
829 _, vdc = self.get_vdc_details()
830 if vdc is None:
831 raise vimconn.VimConnConnectionException(
832 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
833 )
834
835 try:
836 vdcid = vdc.get("id").split(":")[3]
837
838 if self.client._session:
839 headers = {
840 "Accept": "application/*+xml;version=" + API_VERSION,
841 "x-vcloud-authorization": self.client._session.headers[
842 "x-vcloud-authorization"
843 ],
844 }
845 response = self.perform_request(
846 req_type="GET", url=vdc.get("href"), headers=headers
847 )
848
849 if response.status_code != 200:
850 self.logger.error("Failed to get vdc content")
851 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
852 else:
853 content = XmlElementTree.fromstring(response.text)
854
855 network_list = []
856 for item in content:
857 if item.tag.split("}")[-1] == "AvailableNetworks":
858 for net in item:
859 response = self.perform_request(
860 req_type="GET", url=net.get("href"), headers=headers
861 )
862
863 if response.status_code != 200:
864 self.logger.error("Failed to get network content")
865 raise vimconn.VimConnNotFoundException(
866 "Failed to get network content"
867 )
868 else:
869 net_details = XmlElementTree.fromstring(response.text)
870
871 filter_entry = {}
872 net_uuid = net_details.get("id").split(":")
873
874 if len(net_uuid) != 4:
875 continue
876 else:
877 net_uuid = net_uuid[3]
878 # create dict entry
879 self.logger.debug(
880 "get_network_list(): Adding net {}"
881 " to a list vcd id {} network {}".format(
882 net_uuid, vdcid, net_details.get("name")
883 )
884 )
885 filter_entry["name"] = net_details.get("name")
886 filter_entry["id"] = net_uuid
887
888 if [
889 i.text
890 for i in net_details
891 if i.tag.split("}")[-1] == "IsShared"
892 ][0] == "true":
893 shared = True
894 else:
895 shared = False
896
897 filter_entry["shared"] = shared
898 filter_entry["tenant_id"] = vdcid
899
900 if int(net_details.get("status")) == 1:
901 filter_entry["admin_state_up"] = True
902 else:
903 filter_entry["admin_state_up"] = False
904
905 filter_entry["status"] = "ACTIVE"
906 filter_entry["type"] = "bridge"
907 filtered_entry = filter_entry.copy()
908
909 if filter_dict is not None and filter_dict:
910 # we remove all the key : value we don't care and match only
911 # respected field
912 filtered_dict = set(filter_entry.keys()) - set(
913 filter_dict
914 )
915
916 for unwanted_key in filtered_dict:
917 del filter_entry[unwanted_key]
918
919 if filter_dict == filter_entry:
920 network_list.append(filtered_entry)
921 else:
922 network_list.append(filtered_entry)
923 except Exception as e:
924 self.logger.debug("Error in get_network_list", exc_info=True)
925
926 if isinstance(e, vimconn.VimConnException):
927 raise
928 else:
929 raise vimconn.VimConnNotFoundException(
930 "Failed : Networks list not found {} ".format(e)
931 )
932
933 self.logger.debug("Returning {}".format(network_list))
934
935 return network_list
936
937 def get_network(self, net_id):
938 """Method obtains network details of net_id VIM network
939 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]
940 """
941 try:
942 _, vdc = self.get_vdc_details()
943 vdc_id = vdc.get("id").split(":")[3]
944
945 if self.client._session:
946 headers = {
947 "Accept": "application/*+xml;version=" + API_VERSION,
948 "x-vcloud-authorization": self.client._session.headers[
949 "x-vcloud-authorization"
950 ],
951 }
952 response = self.perform_request(
953 req_type="GET", url=vdc.get("href"), headers=headers
954 )
955
956 if response.status_code != 200:
957 self.logger.error("Failed to get vdc content")
958 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
959 else:
960 content = XmlElementTree.fromstring(response.text)
961
962 filter_dict = {}
963
964 for item in content:
965 if item.tag.split("}")[-1] == "AvailableNetworks":
966 for net in item:
967 response = self.perform_request(
968 req_type="GET", url=net.get("href"), headers=headers
969 )
970
971 if response.status_code != 200:
972 self.logger.error("Failed to get network content")
973 raise vimconn.VimConnNotFoundException(
974 "Failed to get network content"
975 )
976 else:
977 net_details = XmlElementTree.fromstring(response.text)
978
979 vdc_network_id = net_details.get("id").split(":")
980 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
981 filter_dict["name"] = net_details.get("name")
982 filter_dict["id"] = vdc_network_id[3]
983
984 if [
985 i.text
986 for i in net_details
987 if i.tag.split("}")[-1] == "IsShared"
988 ][0] == "true":
989 shared = True
990 else:
991 shared = False
992
993 filter_dict["shared"] = shared
994 filter_dict["tenant_id"] = vdc_id
995
996 if int(net_details.get("status")) == 1:
997 filter_dict["admin_state_up"] = True
998 else:
999 filter_dict["admin_state_up"] = False
1000
1001 filter_dict["status"] = "ACTIVE"
1002 filter_dict["type"] = "bridge"
1003 self.logger.debug("Returning {}".format(filter_dict))
1004
1005 return filter_dict
1006 else:
1007 raise vimconn.VimConnNotFoundException(
1008 "Network {} not found".format(net_id)
1009 )
1010 except Exception as e:
1011 self.logger.debug("Error in get_network")
1012 self.logger.debug(traceback.format_exc())
1013
1014 if isinstance(e, vimconn.VimConnException):
1015 raise
1016 else:
1017 raise vimconn.VimConnNotFoundException(
1018 "Failed : Network not found {} ".format(e)
1019 )
1020
1021 return filter_dict
1022
1023 def delete_network(self, net_id, created_items=None):
1024 """
1025 Removes a tenant network from VIM and its associated elements
1026 :param net_id: VIM identifier of the network, provided by method new_network
1027 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1028 Returns the network identifier or raises an exception upon error or when network is not found
1029 """
1030
1031 # ############# Stub code for SRIOV #################
1032 # dvport_group = self.get_dvport_group(net_id)
1033 # if dvport_group:
1034 # #delete portgroup
1035 # status = self.destroy_dvport_group(net_id)
1036 # if status:
1037 # # Remove vlanID from persistent info
1038 # if net_id in self.persistent_info["used_vlanIDs"]:
1039 # del self.persistent_info["used_vlanIDs"][net_id]
1040 #
1041 # return net_id
1042
1043 vcd_network = self.get_vcd_network(network_uuid=net_id)
1044 if vcd_network is not None and vcd_network:
1045 if self.delete_network_action(network_uuid=net_id):
1046 return net_id
1047 else:
1048 raise vimconn.VimConnNotFoundException(
1049 "Network {} not found".format(net_id)
1050 )
1051
1052 def refresh_nets_status(self, net_list):
1053 """Get the status of the networks
1054 Params: the list of network identifiers
1055 Returns a dictionary with:
1056 net_id: #VIM id of this network
1057 status: #Mandatory. Text with one of:
1058 # DELETED (not found at vim)
1059 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1060 # OTHER (Vim reported other status not understood)
1061 # ERROR (VIM indicates an ERROR status)
1062 # ACTIVE, INACTIVE, DOWN (admin down),
1063 # BUILD (on building process)
1064 #
1065 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1066 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1067
1068 """
1069 dict_entry = {}
1070 try:
1071 for net in net_list:
1072 errormsg = ""
1073 vcd_network = self.get_vcd_network(network_uuid=net)
1074 if vcd_network is not None and vcd_network:
1075 if vcd_network["status"] == "1":
1076 status = "ACTIVE"
1077 else:
1078 status = "DOWN"
1079 else:
1080 status = "DELETED"
1081 errormsg = "Network not found."
1082
1083 dict_entry[net] = {
1084 "status": status,
1085 "error_msg": errormsg,
1086 "vim_info": yaml.safe_dump(vcd_network),
1087 }
1088 except Exception:
1089 self.logger.debug("Error in refresh_nets_status")
1090 self.logger.debug(traceback.format_exc())
1091
1092 return dict_entry
1093
1094 def get_flavor(self, flavor_id):
1095 """Obtain flavor details from the VIM
1096 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
1097 """
1098 if flavor_id not in vimconnector.flavorlist:
1099 raise vimconn.VimConnNotFoundException("Flavor not found.")
1100
1101 return vimconnector.flavorlist[flavor_id]
1102
1103 def new_flavor(self, flavor_data):
1104 """Adds a tenant flavor to VIM
1105 flavor_data contains a dictionary with information, keys:
1106 name: flavor name
1107 ram: memory (cloud type) in MBytes
1108 vpcus: cpus (cloud type)
1109 extended: EPA parameters
1110 - numas: #items requested in same NUMA
1111 memory: number of 1G huge pages memory
1112 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
1113 threads
1114 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
1115 - name: interface name
1116 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
1117 bandwidth: X Gbps; requested guarantee bandwidth
1118 vpci: requested virtual PCI address
1119 disk: disk size
1120 is_public:
1121 #TODO to concrete
1122 Returns the flavor identifier"""
1123
1124 # generate a new uuid put to internal dict and return it.
1125 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1126 new_flavor = flavor_data
1127 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1128 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1129 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1130
1131 if not isinstance(ram, int):
1132 raise vimconn.VimConnException("Non-integer value for ram")
1133 elif not isinstance(cpu, int):
1134 raise vimconn.VimConnException("Non-integer value for cpu")
1135 elif not isinstance(disk, int):
1136 raise vimconn.VimConnException("Non-integer value for disk")
1137
1138 extended_flv = flavor_data.get("extended")
1139 if extended_flv:
1140 numas = extended_flv.get("numas")
1141 if numas:
1142 for numa in numas:
1143 # overwrite ram and vcpus
1144 if "memory" in numa:
1145 ram = numa["memory"] * 1024
1146
1147 if "paired-threads" in numa:
1148 cpu = numa["paired-threads"] * 2
1149 elif "cores" in numa:
1150 cpu = numa["cores"]
1151 elif "threads" in numa:
1152 cpu = numa["threads"]
1153
1154 new_flavor[FLAVOR_RAM_KEY] = ram
1155 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1156 new_flavor[FLAVOR_DISK_KEY] = disk
1157 # generate a new uuid put to internal dict and return it.
1158 flavor_id = uuid.uuid4()
1159 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1160 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1161
1162 return str(flavor_id)
1163
1164 def delete_flavor(self, flavor_id):
1165 """Deletes a tenant flavor from VIM identify by its id
1166
1167 Returns the used id or raise an exception
1168 """
1169 if flavor_id not in vimconnector.flavorlist:
1170 raise vimconn.VimConnNotFoundException("Flavor not found.")
1171
1172 vimconnector.flavorlist.pop(flavor_id, None)
1173
1174 return flavor_id
1175
1176 def new_image(self, image_dict):
1177 """
1178 Adds a tenant image to VIM
1179 Returns:
1180 200, image-id if the image is created
1181 <0, message if there is an error
1182 """
1183 return self.get_image_id_from_path(image_dict["location"])
1184
1185 def delete_image(self, image_id):
1186 """
1187 Deletes a tenant image from VIM
1188 Args:
1189 image_id is ID of Image to be deleted
1190 Return:
1191 returns the image identifier in UUID format or raises an exception on error
1192 """
1193 conn = self.connect_as_admin()
1194
1195 if not conn:
1196 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1197
1198 # Get Catalog details
1199 url_list = [self.url, "/api/catalog/", image_id]
1200 catalog_herf = "".join(url_list)
1201
1202 headers = {
1203 "Accept": "application/*+xml;version=" + API_VERSION,
1204 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1205 }
1206
1207 response = self.perform_request(
1208 req_type="GET", url=catalog_herf, headers=headers
1209 )
1210
1211 if response.status_code != requests.codes.ok:
1212 self.logger.debug(
1213 "delete_image():GET REST API call {} failed. "
1214 "Return status code {}".format(catalog_herf, response.status_code)
1215 )
1216
1217 raise vimconn.VimConnNotFoundException(
1218 "Fail to get image {}".format(image_id)
1219 )
1220
1221 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1222 namespaces = {
1223 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1224 }
1225 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1226
1227 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1228 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1229
1230 for catalogItem in catalogItems:
1231 catalogItem_href = catalogItem.attrib["href"]
1232
1233 response = self.perform_request(
1234 req_type="GET", url=catalogItem_href, headers=headers
1235 )
1236
1237 if response.status_code != requests.codes.ok:
1238 self.logger.debug(
1239 "delete_image():GET REST API call {} failed. "
1240 "Return status code {}".format(catalog_herf, response.status_code)
1241 )
1242 raise vimconn.VimConnNotFoundException(
1243 "Fail to get catalogItem {} for catalog {}".format(
1244 catalogItem, image_id
1245 )
1246 )
1247
1248 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1249 namespaces = {
1250 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1251 }
1252 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1253 catalogitem_remove_href = lxmlroot_respond.find(
1254 "xmlns:Link[@rel='remove']", namespaces
1255 ).attrib["href"]
1256
1257 # Remove catalogItem
1258 response = self.perform_request(
1259 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1260 )
1261
1262 if response.status_code == requests.codes.no_content:
1263 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1264 else:
1265 raise vimconn.VimConnException(
1266 "Fail to delete Catalog Item {}".format(catalogItem)
1267 )
1268
1269 # Remove catalog
1270 url_list = [self.url, "/api/admin/catalog/", image_id]
1271 catalog_remove_herf = "".join(url_list)
1272 response = self.perform_request(
1273 req_type="DELETE", url=catalog_remove_herf, headers=headers
1274 )
1275
1276 if response.status_code == requests.codes.no_content:
1277 self.logger.debug("Deleted Catalog {}".format(image_id))
1278
1279 return image_id
1280 else:
1281 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1282
1283 def catalog_exists(self, catalog_name, catalogs):
1284 """
1285
1286 :param catalog_name:
1287 :param catalogs:
1288 :return:
1289 """
1290 for catalog in catalogs:
1291 if catalog["name"] == catalog_name:
1292 return catalog["id"]
1293
1294 def create_vimcatalog(self, vca=None, catalog_name=None):
1295 """Create new catalog entry in vCloud director.
1296
1297 Args
1298 vca: vCloud director.
1299 catalog_name catalog that client wish to create. Note no validation done for a name.
1300 Client must make sure that provide valid string representation.
1301
1302 Returns catalog id if catalog created else None.
1303
1304 """
1305 try:
1306 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1307
1308 if lxml_catalog_element:
1309 id_attr_value = lxml_catalog_element.get("id")
1310 return id_attr_value.split(":")[-1]
1311
1312 catalogs = vca.list_catalogs()
1313 except Exception as ex:
1314 self.logger.error(
1315 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1316 catalog_name, ex
1317 )
1318 )
1319 raise
1320 return self.catalog_exists(catalog_name, catalogs)
1321
1322 # noinspection PyIncorrectDocstring
1323 def upload_ovf(
1324 self,
1325 vca=None,
1326 catalog_name=None,
1327 image_name=None,
1328 media_file_name=None,
1329 description="",
1330 progress=False,
1331 chunk_bytes=128 * 1024,
1332 ):
1333 """
1334 Uploads a OVF file to a vCloud catalog
1335
1336 :param chunk_bytes:
1337 :param progress:
1338 :param description:
1339 :param image_name:
1340 :param vca:
1341 :param catalog_name: (str): The name of the catalog to upload the media.
1342 :param media_file_name: (str): The name of the local media file to upload.
1343 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1344 """
1345 os.path.isfile(media_file_name)
1346 statinfo = os.stat(media_file_name)
1347
1348 # find a catalog entry where we upload OVF.
1349 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1350 # status change.
1351 # if VCD can parse OVF we upload VMDK file
1352 try:
1353 for catalog in vca.list_catalogs():
1354 if catalog_name != catalog["name"]:
1355 continue
1356 catalog_href = "{}/api/catalog/{}/action/upload".format(
1357 self.url, catalog["id"]
1358 )
1359 data = """
1360 <UploadVAppTemplateParams name="{}"
1361 xmlns="http://www.vmware.com/vcloud/v1.5"
1362 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1363 <Description>{} vApp Template</Description>
1364 </UploadVAppTemplateParams>
1365 """.format(
1366 catalog_name, description
1367 )
1368
1369 if self.client:
1370 headers = {
1371 "Accept": "application/*+xml;version=" + API_VERSION,
1372 "x-vcloud-authorization": self.client._session.headers[
1373 "x-vcloud-authorization"
1374 ],
1375 }
1376 headers[
1377 "Content-Type"
1378 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1379
1380 response = self.perform_request(
1381 req_type="POST", url=catalog_href, headers=headers, data=data
1382 )
1383
1384 if response.status_code == requests.codes.created:
1385 catalogItem = XmlElementTree.fromstring(response.text)
1386 entity = [
1387 child
1388 for child in catalogItem
1389 if child.get("type")
1390 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1391 ][0]
1392 href = entity.get("href")
1393 template = href
1394
1395 response = self.perform_request(
1396 req_type="GET", url=href, headers=headers
1397 )
1398
1399 if response.status_code == requests.codes.ok:
1400 headers["Content-Type"] = "Content-Type text/xml"
1401 result = re.search(
1402 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1403 response.text,
1404 )
1405
1406 if result:
1407 transfer_href = result.group(1)
1408
1409 response = self.perform_request(
1410 req_type="PUT",
1411 url=transfer_href,
1412 headers=headers,
1413 data=open(media_file_name, "rb"),
1414 )
1415
1416 if response.status_code != requests.codes.ok:
1417 self.logger.debug(
1418 "Failed create vApp template for catalog name {} and image {}".format(
1419 catalog_name, media_file_name
1420 )
1421 )
1422 return False
1423
1424 # TODO fix this with aync block
1425 time.sleep(5)
1426
1427 self.logger.debug(
1428 "vApp template for catalog name {} and image {}".format(
1429 catalog_name, media_file_name
1430 )
1431 )
1432
1433 # uploading VMDK file
1434 # check status of OVF upload and upload remaining files.
1435 response = self.perform_request(
1436 req_type="GET", url=template, headers=headers
1437 )
1438
1439 if response.status_code == requests.codes.ok:
1440 result = re.search(
1441 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1442 )
1443
1444 if result:
1445 link_href = result.group(1)
1446
1447 # we skip ovf since it already uploaded.
1448 if "ovf" in link_href:
1449 continue
1450
1451 # The OVF file and VMDK must be in a same directory
1452 head, _ = os.path.split(media_file_name)
1453 file_vmdk = head + "/" + link_href.split("/")[-1]
1454
1455 if not os.path.isfile(file_vmdk):
1456 return False
1457
1458 statinfo = os.stat(file_vmdk)
1459 if statinfo.st_size == 0:
1460 return False
1461
1462 hrefvmdk = link_href
1463
1464 if progress:
1465 widgets = [
1466 "Uploading file: ",
1467 Percentage(),
1468 " ",
1469 Bar(),
1470 " ",
1471 ETA(),
1472 " ",
1473 FileTransferSpeed(),
1474 ]
1475 progress_bar = ProgressBar(
1476 widgets=widgets, maxval=statinfo.st_size
1477 ).start()
1478
1479 bytes_transferred = 0
1480 f = open(file_vmdk, "rb")
1481
1482 while bytes_transferred < statinfo.st_size:
1483 my_bytes = f.read(chunk_bytes)
1484 if len(my_bytes) <= chunk_bytes:
1485 headers["Content-Range"] = "bytes {}-{}/{}".format(
1486 bytes_transferred,
1487 len(my_bytes) - 1,
1488 statinfo.st_size,
1489 )
1490 headers["Content-Length"] = str(len(my_bytes))
1491 response = requests.put(
1492 url=hrefvmdk,
1493 headers=headers,
1494 data=my_bytes,
1495 verify=False,
1496 )
1497
1498 if response.status_code == requests.codes.ok:
1499 bytes_transferred += len(my_bytes)
1500 if progress:
1501 progress_bar.update(bytes_transferred)
1502 else:
1503 self.logger.debug(
1504 "file upload failed with error: [{}] {}".format(
1505 response.status_code, response.text
1506 )
1507 )
1508
1509 f.close()
1510
1511 return False
1512
1513 f.close()
1514 if progress:
1515 progress_bar.finish()
1516 time.sleep(10)
1517
1518 return True
1519 else:
1520 self.logger.debug(
1521 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1522 catalog_name, media_file_name
1523 )
1524 )
1525 return False
1526 except Exception as exp:
1527 self.logger.debug(
1528 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1529 catalog_name, media_file_name, exp
1530 )
1531 )
1532
1533 raise vimconn.VimConnException(
1534 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1535 catalog_name, media_file_name, exp
1536 )
1537 )
1538
1539 self.logger.debug(
1540 "Failed retrieve catalog name {} for OVF file {}".format(
1541 catalog_name, media_file_name
1542 )
1543 )
1544
1545 return False
1546
1547 def upload_vimimage(
1548 self,
1549 vca=None,
1550 catalog_name=None,
1551 media_name=None,
1552 medial_file_name=None,
1553 progress=False,
1554 ):
1555 """Upload media file"""
1556 # TODO add named parameters for readability
1557 return self.upload_ovf(
1558 vca=vca,
1559 catalog_name=catalog_name,
1560 image_name=media_name.split(".")[0],
1561 media_file_name=medial_file_name,
1562 description="medial_file_name",
1563 progress=progress,
1564 )
1565
1566 def validate_uuid4(self, uuid_string=None):
1567 """Method validate correct format of UUID.
1568
1569 Return: true if string represent valid uuid
1570 """
1571 try:
1572 uuid.UUID(uuid_string, version=4)
1573 except ValueError:
1574 return False
1575
1576 return True
1577
1578 def get_catalogid(self, catalog_name=None, catalogs=None):
1579 """Method check catalog and return catalog ID in UUID format.
1580
1581 Args
1582 catalog_name: catalog name as string
1583 catalogs: list of catalogs.
1584
1585 Return: catalogs uuid
1586 """
1587 for catalog in catalogs:
1588 if catalog["name"] == catalog_name:
1589 catalog_id = catalog["id"]
1590 return catalog_id
1591
1592 return None
1593
1594 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1595 """Method check catalog and return catalog name lookup done by catalog UUID.
1596
1597 Args
1598 catalog_name: catalog name as string
1599 catalogs: list of catalogs.
1600
1601 Return: catalogs name or None
1602 """
1603 if not self.validate_uuid4(uuid_string=catalog_uuid):
1604 return None
1605
1606 for catalog in catalogs:
1607 catalog_id = catalog.get("id")
1608
1609 if catalog_id == catalog_uuid:
1610 return catalog.get("name")
1611
1612 return None
1613
1614 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1615 """Method check catalog and return catalog name lookup done by catalog UUID.
1616
1617 Args
1618 catalog_name: catalog name as string
1619 catalogs: list of catalogs.
1620
1621 Return: catalogs name or None
1622 """
1623 if not self.validate_uuid4(uuid_string=catalog_uuid):
1624 return None
1625
1626 for catalog in catalogs:
1627 catalog_id = catalog.get("id")
1628
1629 if catalog_id == catalog_uuid:
1630 return catalog
1631
1632 return None
1633
1634 def get_image_id_from_path(self, path=None, progress=False):
1635 """Method upload OVF image to vCloud director.
1636
1637 Each OVF image represented as single catalog entry in vcloud director.
1638 The method check for existing catalog entry. The check done by file name without file extension.
1639
1640 if given catalog name already present method will respond with existing catalog uuid otherwise
1641 it will create new catalog entry and upload OVF file to newly created catalog.
1642
1643 If method can't create catalog entry or upload a file it will throw exception.
1644
1645 Method accept boolean flag progress that will output progress bar. It useful method
1646 for standalone upload use case. In case to test large file upload.
1647
1648 Args
1649 path: - valid path to OVF file.
1650 progress - boolean progress bar show progress bar.
1651
1652 Return: if image uploaded correct method will provide image catalog UUID.
1653 """
1654 if not path:
1655 raise vimconn.VimConnException("Image path can't be None.")
1656
1657 if not os.path.isfile(path):
1658 raise vimconn.VimConnException("Can't read file. File not found.")
1659
1660 if not os.access(path, os.R_OK):
1661 raise vimconn.VimConnException(
1662 "Can't read file. Check file permission to read."
1663 )
1664
1665 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1666
1667 _, filename = os.path.split(path)
1668 _, file_extension = os.path.splitext(path)
1669 if file_extension != ".ovf":
1670 self.logger.debug(
1671 "Wrong file extension {} connector support only OVF container.".format(
1672 file_extension
1673 )
1674 )
1675
1676 raise vimconn.VimConnException(
1677 "Wrong container. vCloud director supports only OVF."
1678 )
1679
1680 catalog_name = os.path.splitext(filename)[0]
1681 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1682 self.logger.debug(
1683 "File name {} Catalog Name {} file path {} "
1684 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1685 )
1686
1687 try:
1688 org, _ = self.get_vdc_details()
1689 catalogs = org.list_catalogs()
1690 except Exception as exp:
1691 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1692
1693 raise vimconn.VimConnException(
1694 "Failed get catalogs() with Exception {} ".format(exp)
1695 )
1696
1697 if len(catalogs) == 0:
1698 self.logger.info(
1699 "Creating a new catalog entry {} in vcloud director".format(
1700 catalog_name
1701 )
1702 )
1703
1704 if self.create_vimcatalog(org, catalog_md5_name) is None:
1705 raise vimconn.VimConnException(
1706 "Failed create new catalog {} ".format(catalog_md5_name)
1707 )
1708
1709 result = self.upload_vimimage(
1710 vca=org,
1711 catalog_name=catalog_md5_name,
1712 media_name=filename,
1713 medial_file_name=path,
1714 progress=progress,
1715 )
1716
1717 if not result:
1718 raise vimconn.VimConnException(
1719 "Failed create vApp template for catalog {} ".format(catalog_name)
1720 )
1721
1722 return self.get_catalogid(catalog_name, catalogs)
1723 else:
1724 for catalog in catalogs:
1725 # search for existing catalog if we find same name we return ID
1726 # TODO optimize this
1727 if catalog["name"] == catalog_md5_name:
1728 self.logger.debug(
1729 "Found existing catalog entry for {} "
1730 "catalog id {}".format(
1731 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1732 )
1733 )
1734
1735 return self.get_catalogid(catalog_md5_name, catalogs)
1736
1737 # if we didn't find existing catalog we create a new one and upload image.
1738 self.logger.debug(
1739 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1740 )
1741 if self.create_vimcatalog(org, catalog_md5_name) is None:
1742 raise vimconn.VimConnException(
1743 "Failed create new catalog {} ".format(catalog_md5_name)
1744 )
1745
1746 result = self.upload_vimimage(
1747 vca=org,
1748 catalog_name=catalog_md5_name,
1749 media_name=filename,
1750 medial_file_name=path,
1751 progress=progress,
1752 )
1753 if not result:
1754 raise vimconn.VimConnException(
1755 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1756 )
1757
1758 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1759
1760 def get_image_list(self, filter_dict={}):
1761 """Obtain tenant images from VIM
1762 Filter_dict can be:
1763 name: image name
1764 id: image uuid
1765 checksum: image checksum
1766 location: image path
1767 Returns the image list of dictionaries:
1768 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1769 List can be empty
1770 """
1771 try:
1772 org, _ = self.get_vdc_details()
1773 image_list = []
1774 catalogs = org.list_catalogs()
1775
1776 if len(catalogs) == 0:
1777 return image_list
1778 else:
1779 for catalog in catalogs:
1780 catalog_uuid = catalog.get("id")
1781 name = catalog.get("name")
1782 filtered_dict = {}
1783
1784 if filter_dict.get("name") and filter_dict["name"] != name:
1785 continue
1786
1787 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1788 continue
1789
1790 filtered_dict["name"] = name
1791 filtered_dict["id"] = catalog_uuid
1792 image_list.append(filtered_dict)
1793
1794 self.logger.debug(
1795 "List of already created catalog items: {}".format(image_list)
1796 )
1797
1798 return image_list
1799 except Exception as exp:
1800 raise vimconn.VimConnException(
1801 "Exception occured while retriving catalog items {}".format(exp)
1802 )
1803
1804 def get_vappid(self, vdc=None, vapp_name=None):
1805 """Method takes vdc object and vApp name and returns vapp uuid or None
1806
1807 Args:
1808 vdc: The VDC object.
1809 vapp_name: is application vappp name identifier
1810
1811 Returns:
1812 The return vApp name otherwise None
1813 """
1814 if vdc is None or vapp_name is None:
1815 return None
1816
1817 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1818 try:
1819 refs = [
1820 ref
1821 for ref in vdc.ResourceEntities.ResourceEntity
1822 if ref.name == vapp_name
1823 and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1824 ]
1825
1826 if len(refs) == 1:
1827 return refs[0].href.split("vapp")[1][1:]
1828 except Exception as e:
1829 self.logger.exception(e)
1830 return False
1831
1832 return None
1833
1834 def check_vapp(self, vdc=None, vapp_uuid=None):
1835 """Method Method returns True or False if vapp deployed in vCloud director
1836
1837 Args:
1838 vca: Connector to VCA
1839 vdc: The VDC object.
1840 vappid: vappid is application identifier
1841
1842 Returns:
1843 The return True if vApp deployed
1844 :param vdc:
1845 :param vapp_uuid:
1846 """
1847 try:
1848 refs = [
1849 ref
1850 for ref in vdc.ResourceEntities.ResourceEntity
1851 if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1852 ]
1853
1854 for ref in refs:
1855 vappid = ref.href.split("vapp")[1][1:]
1856 # find vapp with respected vapp uuid
1857
1858 if vappid == vapp_uuid:
1859 return True
1860 except Exception as e:
1861 self.logger.exception(e)
1862
1863 return False
1864
1865 return False
1866
1867 def get_namebyvappid(self, vapp_uuid=None):
1868 """Method returns vApp name from vCD and lookup done by vapp_id.
1869
1870 Args:
1871 vapp_uuid: vappid is application identifier
1872
1873 Returns:
1874 The return vApp name otherwise None
1875 """
1876 try:
1877 if self.client and vapp_uuid:
1878 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1879 headers = {
1880 "Accept": "application/*+xml;version=" + API_VERSION,
1881 "x-vcloud-authorization": self.client._session.headers[
1882 "x-vcloud-authorization"
1883 ],
1884 }
1885
1886 response = self.perform_request(
1887 req_type="GET", url=vapp_call, headers=headers
1888 )
1889
1890 # Retry login if session expired & retry sending request
1891 if response.status_code == 403:
1892 response = self.retry_rest("GET", vapp_call)
1893
1894 tree = XmlElementTree.fromstring(response.text)
1895
1896 return tree.attrib["name"] if "name" in tree.attrib else None
1897 except Exception as e:
1898 self.logger.exception(e)
1899
1900 return None
1901
1902 return None
1903
1904 def new_vminstance(
1905 self,
1906 name=None,
1907 description="",
1908 start=False,
1909 image_id=None,
1910 flavor_id=None,
1911 affinity_group_list=[],
1912 net_list=[],
1913 cloud_config=None,
1914 disk_list=None,
1915 availability_zone_index=None,
1916 availability_zone_list=None,
1917 ):
1918 """Adds a VM instance to VIM
1919 Params:
1920 'start': (boolean) indicates if VM must start or created in pause mode.
1921 'image_id','flavor_id': image and flavor VIM id to use for the VM
1922 'net_list': list of interfaces, each one is a dictionary with:
1923 'name': (optional) name for the interface.
1924 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1925 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1926 capabilities
1927 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1928 'mac_address': (optional) mac address to assign to this interface
1929 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1930 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1931 for tagging VF
1932 'type': (mandatory) can be one of:
1933 'virtual', in this case always connected to a network of type 'net_type=bridge'
1934 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1935 data/ptp network or it can created unconnected
1936 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1937 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1938 are allocated on the same physical NIC
1939 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1940 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1941 or True, it must apply the default VIM behaviour
1942 After execution the method will add the key:
1943 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1944 interface. 'net_list' is modified
1945 'cloud_config': (optional) dictionary with:
1946 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1947 'users': (optional) list of users to be inserted, each item is a dict with:
1948 'name': (mandatory) user name,
1949 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1950 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1951 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1952 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1953 'dest': (mandatory) string with the destination absolute path
1954 'encoding': (optional, by default text). Can be one of:
1955 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1956 'content' (mandatory): string with the content of the file
1957 'permissions': (optional) string with file permissions, typically octal notation '0644'
1958 'owner': (optional) file owner, string with the format 'owner:group'
1959 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1960 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1961 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1962 'size': (mandatory) string with the size of the disk in GB
1963 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1964 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1965 availability_zone_index is None
1966 Returns a tuple with the instance identifier and created_items or raises an exception on error
1967 created_items can be None or a dictionary where this method can include key-values that will be passed to
1968 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1969 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1970 as not present.
1971 """
1972 self.logger.info("Creating new instance for entry {}".format(name))
1973 self.logger.debug(
1974 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1975 "availability_zone_index {} availability_zone_list {}".format(
1976 description,
1977 start,
1978 image_id,
1979 flavor_id,
1980 net_list,
1981 cloud_config,
1982 disk_list,
1983 availability_zone_index,
1984 availability_zone_list,
1985 )
1986 )
1987
1988 # new vm name = vmname + tenant_id + uuid
1989 new_vm_name = [name, "-", str(uuid.uuid4())]
1990 vmname_andid = "".join(new_vm_name)
1991
1992 for net in net_list:
1993 if net["type"] == "PCI-PASSTHROUGH":
1994 raise vimconn.VimConnNotSupportedException(
1995 "Current vCD version does not support type : {}".format(net["type"])
1996 )
1997
1998 if len(net_list) > 10:
1999 raise vimconn.VimConnNotSupportedException(
2000 "The VM hardware versions 7 and above support upto 10 NICs only"
2001 )
2002
2003 # if vm already deployed we return existing uuid
2004 # we check for presence of VDC, Catalog entry and Flavor.
2005 org, vdc = self.get_vdc_details()
2006 if vdc is None:
2007 raise vimconn.VimConnNotFoundException(
2008 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
2009 name
2010 )
2011 )
2012
2013 catalogs = org.list_catalogs()
2014 if catalogs is None:
2015 # Retry once, if failed by refreshing token
2016 self.get_token()
2017 org = Org(self.client, resource=self.client.get_org())
2018 catalogs = org.list_catalogs()
2019
2020 if catalogs is None:
2021 raise vimconn.VimConnNotFoundException(
2022 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
2023 name
2024 )
2025 )
2026
2027 catalog_hash_name = self.get_catalogbyid(
2028 catalog_uuid=image_id, catalogs=catalogs
2029 )
2030 if catalog_hash_name:
2031 self.logger.info(
2032 "Found catalog entry {} for image id {}".format(
2033 catalog_hash_name, image_id
2034 )
2035 )
2036 else:
2037 raise vimconn.VimConnNotFoundException(
2038 "new_vminstance(): Failed create vApp {}: "
2039 "(Failed retrieve catalog information {})".format(name, image_id)
2040 )
2041
2042 # Set vCPU and Memory based on flavor.
2043 vm_cpus = None
2044 vm_memory = None
2045 vm_disk = None
2046 numas = None
2047
2048 if flavor_id is not None:
2049 if flavor_id not in vimconnector.flavorlist:
2050 raise vimconn.VimConnNotFoundException(
2051 "new_vminstance(): Failed create vApp {}: "
2052 "Failed retrieve flavor information "
2053 "flavor id {}".format(name, flavor_id)
2054 )
2055 else:
2056 try:
2057 flavor = vimconnector.flavorlist[flavor_id]
2058 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
2059 vm_memory = flavor[FLAVOR_RAM_KEY]
2060 vm_disk = flavor[FLAVOR_DISK_KEY]
2061 extended = flavor.get("extended", None)
2062
2063 if extended:
2064 numas = extended.get("numas", None)
2065 except Exception as exp:
2066 raise vimconn.VimConnException(
2067 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
2068 )
2069
2070 # image upload creates template name as catalog name space Template.
2071 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
2072 # power_on = 'false'
2073 # if start:
2074 # power_on = 'true'
2075
2076 # client must provide at least one entry in net_list if not we report error
2077 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
2078 # If no mgmt, then the 1st NN in netlist is considered as primary net.
2079 primary_net = None
2080 primary_netname = None
2081 primary_net_href = None
2082 # network_mode = 'bridged'
2083 if net_list is not None and len(net_list) > 0:
2084 for net in net_list:
2085 if "use" in net and net["use"] == "mgmt" and not primary_net:
2086 primary_net = net
2087
2088 if primary_net is None:
2089 primary_net = net_list[0]
2090
2091 try:
2092 primary_net_id = primary_net["net_id"]
2093 url_list = [self.url, "/api/network/", primary_net_id]
2094 primary_net_href = "".join(url_list)
2095 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
2096
2097 if "name" in network_dict:
2098 primary_netname = network_dict["name"]
2099 except KeyError:
2100 raise vimconn.VimConnException(
2101 "Corrupted flavor. {}".format(primary_net)
2102 )
2103 else:
2104 raise vimconn.VimConnUnexpectedResponse(
2105 "new_vminstance(): Failed network list is empty."
2106 )
2107
2108 # use: 'data', 'bridge', 'mgmt'
2109 # create vApp. Set vcpu and ram based on flavor id.
2110 try:
2111 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
2112 if not vdc_obj:
2113 raise vimconn.VimConnNotFoundException(
2114 "new_vminstance(): Failed to get VDC object"
2115 )
2116
2117 for retry in (1, 2):
2118 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
2119 catalog_items = [items.attrib]
2120
2121 if len(catalog_items) == 1:
2122 if self.client:
2123 headers = {
2124 "Accept": "application/*+xml;version=" + API_VERSION,
2125 "x-vcloud-authorization": self.client._session.headers[
2126 "x-vcloud-authorization"
2127 ],
2128 }
2129
2130 response = self.perform_request(
2131 req_type="GET",
2132 url=catalog_items[0].get("href"),
2133 headers=headers,
2134 )
2135 catalogItem = XmlElementTree.fromstring(response.text)
2136 entity = [
2137 child
2138 for child in catalogItem
2139 if child.get("type")
2140 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
2141 ][0]
2142 vapp_tempalte_href = entity.get("href")
2143
2144 response = self.perform_request(
2145 req_type="GET", url=vapp_tempalte_href, headers=headers
2146 )
2147
2148 if response.status_code != requests.codes.ok:
2149 self.logger.debug(
2150 "REST API call {} failed. Return status code {}".format(
2151 vapp_tempalte_href, response.status_code
2152 )
2153 )
2154 else:
2155 result = (response.text).replace("\n", " ")
2156
2157 vapp_template_tree = XmlElementTree.fromstring(response.text)
2158 children_element = [
2159 child for child in vapp_template_tree if "Children" in child.tag
2160 ][0]
2161 vm_element = [child for child in children_element if "Vm" in child.tag][
2162 0
2163 ]
2164 vm_name = vm_element.get("name")
2165 vm_id = vm_element.get("id")
2166 vm_href = vm_element.get("href")
2167
2168 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
2169 # result).group(1)
2170 memory_mb = re.search(
2171 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
2172 result,
2173 ).group(1)
2174 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
2175
2176 headers[
2177 "Content-Type"
2178 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
2179 vdc_id = vdc.get("id").split(":")[-1]
2180 instantiate_vapp_href = (
2181 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
2182 self.url, vdc_id
2183 )
2184 )
2185
2186 with open(
2187 os.path.join(
2188 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2189 ),
2190 "r",
2191 ) as f:
2192 template = f.read()
2193
2194 data = template.format(
2195 vmname_andid,
2196 primary_netname,
2197 primary_net_href,
2198 vapp_tempalte_href,
2199 vm_href,
2200 vm_id,
2201 vm_name,
2202 primary_netname,
2203 cpu=vm_cpus,
2204 core=1,
2205 memory=vm_memory,
2206 )
2207
2208 response = self.perform_request(
2209 req_type="POST",
2210 url=instantiate_vapp_href,
2211 headers=headers,
2212 data=data,
2213 )
2214
2215 if response.status_code != 201:
2216 self.logger.error(
2217 "REST call {} failed reason : {}"
2218 "status code : {}".format(
2219 instantiate_vapp_href, response.text, response.status_code
2220 )
2221 )
2222 raise vimconn.VimConnException(
2223 "new_vminstance(): Failed to create"
2224 "vAapp {}".format(vmname_andid)
2225 )
2226 else:
2227 vapptask = self.get_task_from_response(response.text)
2228
2229 if vapptask is None and retry == 1:
2230 self.get_token() # Retry getting token
2231 continue
2232 else:
2233 break
2234
2235 if vapptask is None or vapptask is False:
2236 raise vimconn.VimConnUnexpectedResponse(
2237 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2238 )
2239
2240 # wait for task to complete
2241 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2242
2243 if result.get("status") == "success":
2244 self.logger.debug(
2245 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2246 )
2247 else:
2248 raise vimconn.VimConnUnexpectedResponse(
2249 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2250 )
2251 except Exception as exp:
2252 raise vimconn.VimConnUnexpectedResponse(
2253 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2254 vmname_andid, exp
2255 )
2256 )
2257
2258 # we should have now vapp in undeployed state.
2259 try:
2260 vdc_obj = VDC(self.client, href=vdc.get("href"))
2261 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2262 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2263 vapp = VApp(self.client, resource=vapp_resource)
2264 except Exception as exp:
2265 raise vimconn.VimConnUnexpectedResponse(
2266 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2267 vmname_andid, exp
2268 )
2269 )
2270
2271 if vapp_uuid is None:
2272 raise vimconn.VimConnUnexpectedResponse(
2273 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2274 vmname_andid
2275 )
2276 )
2277
2278 # Add PCI passthrough/SRIOV configrations
2279 pci_devices_info = []
2280 reserve_memory = False
2281
2282 for net in net_list:
2283 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2284 pci_devices_info.append(net)
2285 elif (
2286 net["type"] == "VF"
2287 or net["type"] == "SR-IOV"
2288 or net["type"] == "VFnotShared"
2289 ) and "net_id" in net:
2290 reserve_memory = True
2291
2292 # Add PCI
2293 if len(pci_devices_info) > 0:
2294 self.logger.info(
2295 "Need to add PCI devices {} into VM {}".format(
2296 pci_devices_info, vmname_andid
2297 )
2298 )
2299 PCI_devices_status, _, _ = self.add_pci_devices(
2300 vapp_uuid, pci_devices_info, vmname_andid
2301 )
2302
2303 if PCI_devices_status:
2304 self.logger.info(
2305 "Added PCI devives {} to VM {}".format(
2306 pci_devices_info, vmname_andid
2307 )
2308 )
2309 reserve_memory = True
2310 else:
2311 self.logger.info(
2312 "Fail to add PCI devives {} to VM {}".format(
2313 pci_devices_info, vmname_andid
2314 )
2315 )
2316
2317 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2318 self.add_serial_device(vapp_uuid)
2319
2320 if vm_disk:
2321 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2322 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2323 if result:
2324 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2325
2326 # Add new or existing disks to vApp
2327 if disk_list:
2328 added_existing_disk = False
2329 for disk in disk_list:
2330 if "device_type" in disk and disk["device_type"] == "cdrom":
2331 image_id = disk["image_id"]
2332 # Adding CD-ROM to VM
2333 # will revisit code once specification ready to support this feature
2334 self.insert_media_to_vm(vapp, image_id)
2335 elif "image_id" in disk and disk["image_id"] is not None:
2336 self.logger.debug(
2337 "Adding existing disk from image {} to vm {} ".format(
2338 disk["image_id"], vapp_uuid
2339 )
2340 )
2341 self.add_existing_disk(
2342 catalogs=catalogs,
2343 image_id=disk["image_id"],
2344 size=disk["size"],
2345 template_name=templateName,
2346 vapp_uuid=vapp_uuid,
2347 )
2348 added_existing_disk = True
2349 else:
2350 # Wait till added existing disk gets reflected into vCD database/API
2351 if added_existing_disk:
2352 time.sleep(5)
2353 added_existing_disk = False
2354 self.add_new_disk(vapp_uuid, disk["size"])
2355
2356 if numas:
2357 # Assigning numa affinity setting
2358 for numa in numas:
2359 if "paired-threads-id" in numa:
2360 paired_threads_id = numa["paired-threads-id"]
2361 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2362
2363 # add NICs & connect to networks in netlist
2364 try:
2365 vdc_obj = VDC(self.client, href=vdc.get("href"))
2366 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2367 vapp = VApp(self.client, resource=vapp_resource)
2368 vapp_id = vapp_resource.get("id").split(":")[-1]
2369
2370 self.logger.info("Removing primary NIC: ")
2371 # First remove all NICs so that NIC properties can be adjusted as needed
2372 self.remove_primary_network_adapter_from_all_vms(vapp)
2373
2374 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2375 primary_nic_index = 0
2376 nicIndex = 0
2377 for net in net_list:
2378 # openmano uses network id in UUID format.
2379 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2380 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2381 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2382
2383 if "net_id" not in net:
2384 continue
2385
2386 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2387 # Same will be returned in refresh_vms_status() as vim_interface_id
2388 net["vim_id"] = net[
2389 "net_id"
2390 ] # Provide the same VIM identifier as the VIM network
2391
2392 interface_net_id = net["net_id"]
2393 interface_net_name = self.get_network_name_by_id(
2394 network_uuid=interface_net_id
2395 )
2396 interface_network_mode = net["use"]
2397
2398 if interface_network_mode == "mgmt":
2399 primary_nic_index = nicIndex
2400
2401 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2402 - DHCP (The IP address is obtained from a DHCP service.)
2403 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2404 - NONE (No IP addressing mode specified.)"""
2405
2406 if primary_netname is not None:
2407 self.logger.debug(
2408 "new_vminstance(): Filtering by net name {}".format(
2409 interface_net_name
2410 )
2411 )
2412 nets = [
2413 n
2414 for n in self.get_network_list()
2415 if n.get("name") == interface_net_name
2416 ]
2417
2418 if len(nets) == 1:
2419 self.logger.info(
2420 "new_vminstance(): Found requested network: {}".format(
2421 nets[0].get("name")
2422 )
2423 )
2424
2425 if interface_net_name != primary_netname:
2426 # connect network to VM - with all DHCP by default
2427 self.logger.info(
2428 "new_vminstance(): Attaching net {} to vapp".format(
2429 interface_net_name
2430 )
2431 )
2432 self.connect_vapp_to_org_vdc_network(
2433 vapp_id, nets[0].get("name")
2434 )
2435
2436 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2437 nic_type = "VMXNET3"
2438 if "type" in net and net["type"] not in type_list:
2439 # fetching nic type from vnf
2440 if "model" in net:
2441 if net["model"] is not None:
2442 if (
2443 net["model"].lower() == "paravirt"
2444 or net["model"].lower() == "virtio"
2445 ):
2446 nic_type = "VMXNET3"
2447 else:
2448 nic_type = net["model"]
2449
2450 self.logger.info(
2451 "new_vminstance(): adding network adapter "
2452 "to a network {}".format(nets[0].get("name"))
2453 )
2454 self.add_network_adapter_to_vms(
2455 vapp,
2456 nets[0].get("name"),
2457 primary_nic_index,
2458 nicIndex,
2459 net,
2460 nic_type=nic_type,
2461 )
2462 else:
2463 self.logger.info(
2464 "new_vminstance(): adding network adapter "
2465 "to a network {}".format(nets[0].get("name"))
2466 )
2467
2468 if net["type"] in ["SR-IOV", "VF"]:
2469 nic_type = net["type"]
2470 self.add_network_adapter_to_vms(
2471 vapp,
2472 nets[0].get("name"),
2473 primary_nic_index,
2474 nicIndex,
2475 net,
2476 nic_type=nic_type,
2477 )
2478 nicIndex += 1
2479
2480 # cloud-init for ssh-key injection
2481 if cloud_config:
2482 # Create a catalog which will be carrying the config drive ISO
2483 # This catalog is deleted during vApp deletion. The catalog name carries
2484 # vApp UUID and thats how it gets identified during its deletion.
2485 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2486 self.logger.info(
2487 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2488 config_drive_catalog_name
2489 )
2490 )
2491 config_drive_catalog_id = self.create_vimcatalog(
2492 org, config_drive_catalog_name
2493 )
2494
2495 if config_drive_catalog_id is None:
2496 error_msg = (
2497 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2498 "ISO".format(config_drive_catalog_name)
2499 )
2500 raise Exception(error_msg)
2501
2502 # Create config-drive ISO
2503 _, userdata = self._create_user_data(cloud_config)
2504 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2505 iso_path = self.create_config_drive_iso(userdata)
2506 self.logger.debug(
2507 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2508 iso_path
2509 )
2510 )
2511
2512 self.logger.info(
2513 "new_vminstance(): uploading iso to catalog {}".format(
2514 config_drive_catalog_name
2515 )
2516 )
2517 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2518 # Attach the config-drive ISO to the VM
2519 self.logger.info(
2520 "new_vminstance(): Attaching the config-drive ISO to the VM"
2521 )
2522 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2523 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2524
2525 # If VM has PCI devices or SRIOV reserve memory for VM
2526 if reserve_memory:
2527 self.reserve_memory_for_all_vms(vapp, memory_mb)
2528
2529 self.logger.debug(
2530 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2531 )
2532
2533 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2534 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2535 if result.get("status") == "success":
2536 self.logger.info(
2537 "new_vminstance(): Successfully power on "
2538 "vApp {}".format(vmname_andid)
2539 )
2540 else:
2541 self.logger.error(
2542 "new_vminstance(): failed to power on vApp "
2543 "{}".format(vmname_andid)
2544 )
2545
2546 except Exception as exp:
2547 try:
2548 self.delete_vminstance(vapp_uuid)
2549 except Exception as exp2:
2550 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2551 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2552 self.logger.error(
2553 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2554 name, exp
2555 )
2556 )
2557 raise vimconn.VimConnException(
2558 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2559 name, exp
2560 )
2561 )
2562 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2563 wait_time = 0
2564 vapp_uuid = None
2565 while wait_time <= MAX_WAIT_TIME:
2566 try:
2567 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2568 vapp = VApp(self.client, resource=vapp_resource)
2569 except Exception as exp:
2570 raise vimconn.VimConnUnexpectedResponse(
2571 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2572 vmname_andid, exp
2573 )
2574 )
2575
2576 # if vapp and vapp.me.deployed:
2577 if vapp and vapp_resource.get("deployed") == "true":
2578 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2579 break
2580 else:
2581 self.logger.debug(
2582 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2583 )
2584 time.sleep(INTERVAL_TIME)
2585
2586 wait_time += INTERVAL_TIME
2587
2588 # SET Affinity Rule for VM
2589 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2590 # While creating VIM account user has to pass the Host Group names in availability_zone list
2591 # "availability_zone" is a part of VIM "config" parameters
2592 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2593 # Host groups are referred as availability zones
2594 # With following procedure, deployed VM will be added into a VM group.
2595 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2596 if availability_zone_list:
2597 self.logger.debug(
2598 "Existing Host Groups in VIM {}".format(
2599 self.config.get("availability_zone")
2600 )
2601 )
2602 # Admin access required for creating Affinity rules
2603 client = self.connect_as_admin()
2604
2605 if not client:
2606 raise vimconn.VimConnConnectionException(
2607 "Failed to connect vCD as admin"
2608 )
2609 else:
2610 self.client = client
2611
2612 if self.client:
2613 headers = {
2614 "Accept": "application/*+xml;version=27.0",
2615 "x-vcloud-authorization": self.client._session.headers[
2616 "x-vcloud-authorization"
2617 ],
2618 }
2619
2620 # Step1: Get provider vdc details from organization
2621 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2622 if pvdc_href is not None:
2623 # Step2: Found required pvdc, now get resource pool information
2624 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2625 if respool_href is None:
2626 # Raise error if respool_href not found
2627 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2628 pvdc_href
2629 )
2630 self.log_message(msg)
2631
2632 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2633 # get availability Zone
2634 vm_az = self.get_vm_availability_zone(
2635 availability_zone_index, availability_zone_list
2636 )
2637
2638 # check if provided av zone(hostGroup) is present in vCD VIM
2639 status = self.check_availibility_zone(vm_az, respool_href, headers)
2640 if status is False:
2641 msg = (
2642 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2643 "resource pool {} status: {}"
2644 ).format(vm_az, respool_href, status)
2645 self.log_message(msg)
2646 else:
2647 self.logger.debug(
2648 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2649 )
2650
2651 # Step4: Find VM group references to create vm group
2652 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2653 if vmgrp_href is None:
2654 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2655 self.log_message(msg)
2656
2657 # Step5: Create a VmGroup with name az_VmGroup
2658 vmgrp_name = (
2659 vm_az + "_" + name
2660 ) # Formed VM Group name = Host Group name + VM name
2661 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2662 if status is not True:
2663 msg = "new_vminstance(): Error in creating VM group {}".format(
2664 vmgrp_name
2665 )
2666 self.log_message(msg)
2667
2668 # VM Group url to add vms to vm group
2669 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2670
2671 # Step6: Add VM to VM Group
2672 # Find VM uuid from vapp_uuid
2673 vm_details = self.get_vapp_details_rest(vapp_uuid)
2674 vm_uuid = vm_details["vmuuid"]
2675
2676 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2677 if status is not True:
2678 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2679 vmgrp_name
2680 )
2681 self.log_message(msg)
2682
2683 # Step7: Create VM to Host affinity rule
2684 addrule_href = self.get_add_rule_reference(respool_href, headers)
2685 if addrule_href is None:
2686 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2687 respool_href
2688 )
2689 self.log_message(msg)
2690
2691 status = self.create_vm_to_host_affinity_rule(
2692 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2693 )
2694 if status is False:
2695 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2696 name, vm_az
2697 )
2698 self.log_message(msg)
2699 else:
2700 self.logger.debug(
2701 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2702 name, vm_az
2703 )
2704 )
2705 # Reset token to a normal user to perform other operations
2706 self.get_token()
2707
2708 if vapp_uuid is not None:
2709 return vapp_uuid, None
2710 else:
2711 raise vimconn.VimConnUnexpectedResponse(
2712 "new_vminstance(): Failed create new vm instance {}".format(name)
2713 )
2714
2715 def create_config_drive_iso(self, user_data):
2716 tmpdir = tempfile.mkdtemp()
2717 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2718 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2719 os.makedirs(latest_dir)
2720 with open(
2721 os.path.join(latest_dir, "meta_data.json"), "w"
2722 ) as meta_file_obj, open(
2723 os.path.join(latest_dir, "user_data"), "w"
2724 ) as userdata_file_obj:
2725 userdata_file_obj.write(user_data)
2726 meta_file_obj.write(
2727 json.dumps(
2728 {
2729 "availability_zone": "nova",
2730 "launch_index": 0,
2731 "name": "ConfigDrive",
2732 "uuid": str(uuid.uuid4()),
2733 }
2734 )
2735 )
2736 genisoimage_cmd = (
2737 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2738 iso_path=iso_path, source_dir_path=tmpdir
2739 )
2740 )
2741 self.logger.info(
2742 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2743 genisoimage_cmd
2744 )
2745 )
2746
2747 try:
2748 FNULL = open(os.devnull, "w")
2749 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2750 except subprocess.CalledProcessError as e:
2751 shutil.rmtree(tmpdir, ignore_errors=True)
2752 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2753 e
2754 )
2755 self.logger.error(error_msg)
2756 raise Exception(error_msg)
2757
2758 return iso_path
2759
2760 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2761 if not os.path.isfile(iso_file_path):
2762 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2763 iso_file_path
2764 )
2765 self.logger.error(error_msg)
2766 raise Exception(error_msg)
2767
2768 iso_file_stat = os.stat(iso_file_path)
2769 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2770 <Media
2771 xmlns="http://www.vmware.com/vcloud/v1.5"
2772 name="{iso_name}"
2773 size="{iso_size}"
2774 imageType="iso">
2775 <Description>ISO image for config-drive</Description>
2776 </Media>""".format(
2777 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2778 )
2779 headers = {
2780 "Accept": "application/*+xml;version=" + API_VERSION,
2781 "x-vcloud-authorization": self.client._session.headers[
2782 "x-vcloud-authorization"
2783 ],
2784 }
2785 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2786 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2787 response = self.perform_request(
2788 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2789 )
2790
2791 if response.status_code != 201:
2792 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2793 catalog_href
2794 )
2795 self.logger.error(error_msg)
2796 raise Exception(error_msg)
2797
2798 catalogItem = XmlElementTree.fromstring(response.text)
2799 entity = [
2800 child
2801 for child in catalogItem
2802 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2803 ][0]
2804 entity_href = entity.get("href")
2805
2806 response = self.perform_request(
2807 req_type="GET", url=entity_href, headers=headers
2808 )
2809 if response.status_code != 200:
2810 raise Exception(
2811 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2812 entity_href
2813 )
2814 )
2815
2816 match = re.search(
2817 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2818 response.text,
2819 re.DOTALL,
2820 )
2821 if match:
2822 media_upload_href = match.group(1)
2823 else:
2824 raise Exception(
2825 "Could not parse the upload URL for the media file from the last response"
2826 )
2827 upload_iso_task = self.get_task_from_response(response.text)
2828 headers["Content-Type"] = "application/octet-stream"
2829 response = self.perform_request(
2830 req_type="PUT",
2831 url=media_upload_href,
2832 headers=headers,
2833 data=open(iso_file_path, "rb"),
2834 )
2835
2836 if response.status_code != 200:
2837 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2838
2839 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2840 if result.get("status") != "success":
2841 raise Exception(
2842 "The upload iso task failed with status {}".format(result.get("status"))
2843 )
2844
2845 def get_vcd_availibility_zones(self, respool_href, headers):
2846 """Method to find presence of av zone is VIM resource pool
2847
2848 Args:
2849 respool_href - resource pool href
2850 headers - header information
2851
2852 Returns:
2853 vcd_az - list of azone present in vCD
2854 """
2855 vcd_az = []
2856 url = respool_href
2857 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2858
2859 if resp.status_code != requests.codes.ok:
2860 self.logger.debug(
2861 "REST API call {} failed. Return status code {}".format(
2862 url, resp.status_code
2863 )
2864 )
2865 else:
2866 # Get the href to hostGroups and find provided hostGroup is present in it
2867 resp_xml = XmlElementTree.fromstring(resp.content)
2868 for child in resp_xml:
2869 if "VMWProviderVdcResourcePool" in child.tag:
2870 for schild in child:
2871 if "Link" in schild.tag:
2872 if (
2873 schild.attrib.get("type")
2874 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
2875 ):
2876 hostGroup = schild.attrib.get("href")
2877 hg_resp = self.perform_request(
2878 req_type="GET", url=hostGroup, headers=headers
2879 )
2880
2881 if hg_resp.status_code != requests.codes.ok:
2882 self.logger.debug(
2883 "REST API call {} failed. Return status code {}".format(
2884 hostGroup, hg_resp.status_code
2885 )
2886 )
2887 else:
2888 hg_resp_xml = XmlElementTree.fromstring(
2889 hg_resp.content
2890 )
2891 for hostGroup in hg_resp_xml:
2892 if "HostGroup" in hostGroup.tag:
2893 # append host group name to the list
2894 vcd_az.append(hostGroup.attrib.get("name"))
2895
2896 return vcd_az
2897
2898 def set_availability_zones(self):
2899 """
2900 Set vim availability zone
2901 """
2902 vim_availability_zones = None
2903 availability_zone = None
2904
2905 if "availability_zone" in self.config:
2906 vim_availability_zones = self.config.get("availability_zone")
2907
2908 if isinstance(vim_availability_zones, str):
2909 availability_zone = [vim_availability_zones]
2910 elif isinstance(vim_availability_zones, list):
2911 availability_zone = vim_availability_zones
2912 else:
2913 return availability_zone
2914
2915 return availability_zone
2916
2917 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2918 """
2919 Return the availability zone to be used by the created VM.
2920 returns: The VIM availability zone to be used or None
2921 """
2922 if availability_zone_index is None:
2923 if not self.config.get("availability_zone"):
2924 return None
2925 elif isinstance(self.config.get("availability_zone"), str):
2926 return self.config["availability_zone"]
2927 else:
2928 return self.config["availability_zone"][0]
2929
2930 vim_availability_zones = self.availability_zone
2931
2932 # check if VIM offer enough availability zones describe in the VNFD
2933 if vim_availability_zones and len(availability_zone_list) <= len(
2934 vim_availability_zones
2935 ):
2936 # check if all the names of NFV AV match VIM AV names
2937 match_by_index = False
2938 for av in availability_zone_list:
2939 if av not in vim_availability_zones:
2940 match_by_index = True
2941 break
2942
2943 if match_by_index:
2944 self.logger.debug(
2945 "Required Availability zone or Host Group not found in VIM config"
2946 )
2947 self.logger.debug(
2948 "Input Availability zone list: {}".format(availability_zone_list)
2949 )
2950 self.logger.debug(
2951 "VIM configured Availability zones: {}".format(
2952 vim_availability_zones
2953 )
2954 )
2955 self.logger.debug("VIM Availability zones will be used by index")
2956 return vim_availability_zones[availability_zone_index]
2957 else:
2958 return availability_zone_list[availability_zone_index]
2959 else:
2960 raise vimconn.VimConnConflictException(
2961 "No enough availability zones at VIM for this deployment"
2962 )
2963
2964 def create_vm_to_host_affinity_rule(
2965 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2966 ):
2967 """Method to create VM to Host Affinity rule in vCD
2968
2969 Args:
2970 addrule_href - href to make a POST request
2971 vmgrpname - name of the VM group created
2972 hostgrpnmae - name of the host group created earlier
2973 polarity - Affinity or Anti-affinity (default: Affinity)
2974 headers - headers to make REST call
2975
2976 Returns:
2977 True- if rule is created
2978 False- Failed to create rule due to some error
2979
2980 """
2981 task_status = False
2982 rule_name = polarity + "_" + vmgrpname
2983 payload = """<?xml version="1.0" encoding="UTF-8"?>
2984 <vmext:VMWVmHostAffinityRule
2985 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2986 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2987 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2988 <vcloud:Name>{}</vcloud:Name>
2989 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2990 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2991 <vcloud:Polarity>{}</vcloud:Polarity>
2992 <vmext:HostGroupName>{}</vmext:HostGroupName>
2993 <vmext:VmGroupName>{}</vmext:VmGroupName>
2994 </vmext:VMWVmHostAffinityRule>""".format(
2995 rule_name, polarity, hostgrpname, vmgrpname
2996 )
2997
2998 resp = self.perform_request(
2999 req_type="POST", url=addrule_href, headers=headers, data=payload
3000 )
3001
3002 if resp.status_code != requests.codes.accepted:
3003 self.logger.debug(
3004 "REST API call {} failed. Return status code {}".format(
3005 addrule_href, resp.status_code
3006 )
3007 )
3008 task_status = False
3009
3010 return task_status
3011 else:
3012 affinity_task = self.get_task_from_response(resp.content)
3013 self.logger.debug("affinity_task: {}".format(affinity_task))
3014
3015 if affinity_task is None or affinity_task is False:
3016 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
3017 # wait for task to complete
3018 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
3019
3020 if result.get("status") == "success":
3021 self.logger.debug(
3022 "Successfully created affinity rule {}".format(rule_name)
3023 )
3024 return True
3025 else:
3026 raise vimconn.VimConnUnexpectedResponse(
3027 "failed to create affinity rule {}".format(rule_name)
3028 )
3029
3030 def get_add_rule_reference(self, respool_href, headers):
3031 """This method finds href to add vm to host affinity rule to vCD
3032
3033 Args:
3034 respool_href- href to resource pool
3035 headers- header information to make REST call
3036
3037 Returns:
3038 None - if no valid href to add rule found or
3039 addrule_href - href to add vm to host affinity rule of resource pool
3040 """
3041 addrule_href = None
3042 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3043
3044 if resp.status_code != requests.codes.ok:
3045 self.logger.debug(
3046 "REST API call {} failed. Return status code {}".format(
3047 respool_href, resp.status_code
3048 )
3049 )
3050 else:
3051 resp_xml = XmlElementTree.fromstring(resp.content)
3052 for child in resp_xml:
3053 if "VMWProviderVdcResourcePool" in child.tag:
3054 for schild in child:
3055 if "Link" in schild.tag:
3056 if (
3057 schild.attrib.get("type")
3058 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
3059 and schild.attrib.get("rel") == "add"
3060 ):
3061 addrule_href = schild.attrib.get("href")
3062 break
3063
3064 return addrule_href
3065
3066 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
3067 """Method to add deployed VM to newly created VM Group.
3068 This is required to create VM to Host affinity in vCD
3069
3070 Args:
3071 vm_uuid- newly created vm uuid
3072 vmGroupNameURL- URL to VM Group name
3073 vmGroup_name- Name of VM group created
3074 headers- Headers for REST request
3075
3076 Returns:
3077 True- if VM added to VM group successfully
3078 False- if any error encounter
3079 """
3080 addvm_resp = self.perform_request(
3081 req_type="GET", url=vmGroupNameURL, headers=headers
3082 ) # , data=payload)
3083
3084 if addvm_resp.status_code != requests.codes.ok:
3085 self.logger.debug(
3086 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
3087 vmGroupNameURL, addvm_resp.status_code
3088 )
3089 )
3090 return False
3091 else:
3092 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
3093 for child in resp_xml:
3094 if child.tag.split("}")[1] == "Link":
3095 if child.attrib.get("rel") == "addVms":
3096 addvmtogrpURL = child.attrib.get("href")
3097
3098 # Get vm details
3099 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
3100 vmdetailsURL = "".join(url_list)
3101
3102 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
3103
3104 if resp.status_code != requests.codes.ok:
3105 self.logger.debug(
3106 "REST API call {} failed. Return status code {}".format(
3107 vmdetailsURL, resp.status_code
3108 )
3109 )
3110 return False
3111
3112 # Parse VM details
3113 resp_xml = XmlElementTree.fromstring(resp.content)
3114 if resp_xml.tag.split("}")[1] == "Vm":
3115 vm_id = resp_xml.attrib.get("id")
3116 vm_name = resp_xml.attrib.get("name")
3117 vm_href = resp_xml.attrib.get("href")
3118 # print vm_id, vm_name, vm_href
3119
3120 # Add VM into VMgroup
3121 payload = """<?xml version="1.0" encoding="UTF-8"?>\
3122 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
3123 xmlns="http://www.vmware.com/vcloud/versions" \
3124 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
3125 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
3126 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
3127 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
3128 xmlns:ns7="http://www.vmware.com/schema/ovf" \
3129 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
3130 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
3131 <ns2:VmReference href="{}" id="{}" name="{}" \
3132 type="application/vnd.vmware.vcloud.vm+xml" />\
3133 </ns2:Vms>""".format(
3134 vm_href, vm_id, vm_name
3135 )
3136
3137 addvmtogrp_resp = self.perform_request(
3138 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
3139 )
3140
3141 if addvmtogrp_resp.status_code != requests.codes.accepted:
3142 self.logger.debug(
3143 "REST API call {} failed. Return status code {}".format(
3144 addvmtogrpURL, addvmtogrp_resp.status_code
3145 )
3146 )
3147
3148 return False
3149 else:
3150 self.logger.debug(
3151 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
3152 )
3153
3154 return True
3155
3156 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
3157 """Method to create a VM group in vCD
3158
3159 Args:
3160 vmgroup_name : Name of VM group to be created
3161 vmgroup_href : href for vmgroup
3162 headers- Headers for REST request
3163 """
3164 # POST to add URL with required data
3165 vmgroup_status = False
3166 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
3167 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
3168 <vmCount>1</vmCount>\
3169 </VMWVmGroup>""".format(
3170 vmgroup_name
3171 )
3172 resp = self.perform_request(
3173 req_type="POST", url=vmgroup_href, headers=headers, data=payload
3174 )
3175
3176 if resp.status_code != requests.codes.accepted:
3177 self.logger.debug(
3178 "REST API call {} failed. Return status code {}".format(
3179 vmgroup_href, resp.status_code
3180 )
3181 )
3182
3183 return vmgroup_status
3184 else:
3185 vmgroup_task = self.get_task_from_response(resp.content)
3186 if vmgroup_task is None or vmgroup_task is False:
3187 raise vimconn.VimConnUnexpectedResponse(
3188 "create_vmgroup(): failed to create VM group {}".format(
3189 vmgroup_name
3190 )
3191 )
3192
3193 # wait for task to complete
3194 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
3195
3196 if result.get("status") == "success":
3197 self.logger.debug(
3198 "create_vmgroup(): Successfully created VM group {}".format(
3199 vmgroup_name
3200 )
3201 )
3202 # time.sleep(10)
3203 vmgroup_status = True
3204
3205 return vmgroup_status
3206 else:
3207 raise vimconn.VimConnUnexpectedResponse(
3208 "create_vmgroup(): failed to create VM group {}".format(
3209 vmgroup_name
3210 )
3211 )
3212
3213 def find_vmgroup_reference(self, url, headers):
3214 """Method to create a new VMGroup which is required to add created VM
3215 Args:
3216 url- resource pool href
3217 headers- header information
3218
3219 Returns:
3220 returns href to VM group to create VM group
3221 """
3222 # Perform GET on resource pool to find 'add' link to create VMGroup
3223 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
3224 vmgrp_href = None
3225 resp = self.perform_request(req_type="GET", url=url, headers=headers)
3226
3227 if resp.status_code != requests.codes.ok:
3228 self.logger.debug(
3229 "REST API call {} failed. Return status code {}".format(
3230 url, resp.status_code
3231 )
3232 )
3233 else:
3234 # Get the href to add vmGroup to vCD
3235 resp_xml = XmlElementTree.fromstring(resp.content)
3236 for child in resp_xml:
3237 if "VMWProviderVdcResourcePool" in child.tag:
3238 for schild in child:
3239 if "Link" in schild.tag:
3240 # Find href with type VMGroup and rel with add
3241 if (
3242 schild.attrib.get("type")
3243 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3244 and schild.attrib.get("rel") == "add"
3245 ):
3246 vmgrp_href = schild.attrib.get("href")
3247
3248 return vmgrp_href
3249
3250 def check_availibility_zone(self, az, respool_href, headers):
3251 """Method to verify requested av zone is present or not in provided
3252 resource pool
3253
3254 Args:
3255 az - name of hostgroup (availibility_zone)
3256 respool_href - Resource Pool href
3257 headers - Headers to make REST call
3258 Returns:
3259 az_found - True if availibility_zone is found else False
3260 """
3261 az_found = False
3262 headers["Accept"] = "application/*+xml;version=27.0"
3263 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3264
3265 if resp.status_code != requests.codes.ok:
3266 self.logger.debug(
3267 "REST API call {} failed. Return status code {}".format(
3268 respool_href, resp.status_code
3269 )
3270 )
3271 else:
3272 # Get the href to hostGroups and find provided hostGroup is present in it
3273 resp_xml = XmlElementTree.fromstring(resp.content)
3274
3275 for child in resp_xml:
3276 if "VMWProviderVdcResourcePool" in child.tag:
3277 for schild in child:
3278 if "Link" in schild.tag:
3279 if (
3280 schild.attrib.get("type")
3281 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3282 ):
3283 hostGroup_href = schild.attrib.get("href")
3284 hg_resp = self.perform_request(
3285 req_type="GET", url=hostGroup_href, headers=headers
3286 )
3287
3288 if hg_resp.status_code != requests.codes.ok:
3289 self.logger.debug(
3290 "REST API call {} failed. Return status code {}".format(
3291 hostGroup_href, hg_resp.status_code
3292 )
3293 )
3294 else:
3295 hg_resp_xml = XmlElementTree.fromstring(
3296 hg_resp.content
3297 )
3298 for hostGroup in hg_resp_xml:
3299 if "HostGroup" in hostGroup.tag:
3300 if hostGroup.attrib.get("name") == az:
3301 az_found = True
3302 break
3303
3304 return az_found
3305
3306 def get_pvdc_for_org(self, org_vdc, headers):
3307 """This method gets provider vdc references from organisation
3308
3309 Args:
3310 org_vdc - name of the organisation VDC to find pvdc
3311 headers - headers to make REST call
3312
3313 Returns:
3314 None - if no pvdc href found else
3315 pvdc_href - href to pvdc
3316 """
3317 # Get provider VDC references from vCD
3318 pvdc_href = None
3319 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3320 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3321 url = "".join(url_list)
3322
3323 response = self.perform_request(req_type="GET", url=url, headers=headers)
3324 if response.status_code != requests.codes.ok:
3325 self.logger.debug(
3326 "REST API call {} failed. Return status code {}".format(
3327 url, response.status_code
3328 )
3329 )
3330 else:
3331 xmlroot_response = XmlElementTree.fromstring(response.text)
3332 for child in xmlroot_response:
3333 if "ProviderVdcReference" in child.tag:
3334 pvdc_href = child.attrib.get("href")
3335 # Get vdcReferences to find org
3336 pvdc_resp = self.perform_request(
3337 req_type="GET", url=pvdc_href, headers=headers
3338 )
3339
3340 if pvdc_resp.status_code != requests.codes.ok:
3341 raise vimconn.VimConnException(
3342 "REST API call {} failed. "
3343 "Return status code {}".format(url, pvdc_resp.status_code)
3344 )
3345
3346 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3347 for child in pvdc_resp_xml:
3348 if "Link" in child.tag:
3349 if (
3350 child.attrib.get("type")
3351 == "application/vnd.vmware.admin.vdcReferences+xml"
3352 ):
3353 vdc_href = child.attrib.get("href")
3354
3355 # Check if provided org is present in vdc
3356 vdc_resp = self.perform_request(
3357 req_type="GET", url=vdc_href, headers=headers
3358 )
3359
3360 if vdc_resp.status_code != requests.codes.ok:
3361 raise vimconn.VimConnException(
3362 "REST API call {} failed. "
3363 "Return status code {}".format(
3364 url, vdc_resp.status_code
3365 )
3366 )
3367 vdc_resp_xml = XmlElementTree.fromstring(
3368 vdc_resp.content
3369 )
3370
3371 for child in vdc_resp_xml:
3372 if "VdcReference" in child.tag:
3373 if child.attrib.get("name") == org_vdc:
3374 return pvdc_href
3375
3376 def get_resource_pool_details(self, pvdc_href, headers):
3377 """Method to get resource pool information.
3378 Host groups are property of resource group.
3379 To get host groups, we need to GET details of resource pool.
3380
3381 Args:
3382 pvdc_href: href to pvdc details
3383 headers: headers
3384
3385 Returns:
3386 respool_href - Returns href link reference to resource pool
3387 """
3388 respool_href = None
3389 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3390
3391 if resp.status_code != requests.codes.ok:
3392 self.logger.debug(
3393 "REST API call {} failed. Return status code {}".format(
3394 pvdc_href, resp.status_code
3395 )
3396 )
3397 else:
3398 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3399 for child in respool_resp_xml:
3400 if "Link" in child.tag:
3401 if (
3402 child.attrib.get("type")
3403 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3404 ):
3405 respool_href = child.attrib.get("href")
3406 break
3407
3408 return respool_href
3409
3410 def log_message(self, msg):
3411 """
3412 Method to log error messages related to Affinity rule creation
3413 in new_vminstance & raise Exception
3414 Args :
3415 msg - Error message to be logged
3416
3417 """
3418 # get token to connect vCD as a normal user
3419 self.get_token()
3420 self.logger.debug(msg)
3421
3422 raise vimconn.VimConnException(msg)
3423
3424 # #
3425 # #
3426 # # based on current discussion
3427 # #
3428 # #
3429 # # server:
3430 # created: '2016-09-08T11:51:58'
3431 # description: simple-instance.linux1.1
3432 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
3433 # hostId: e836c036-74e7-11e6-b249-0800273e724c
3434 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
3435 # status: ACTIVE
3436 # error_msg:
3437 # interfaces: …
3438 #
3439 def get_vminstance(self, vim_vm_uuid=None):
3440 """Returns the VM instance information from VIM"""
3441 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3442
3443 _, vdc = self.get_vdc_details()
3444 if vdc is None:
3445 raise vimconn.VimConnConnectionException(
3446 "Failed to get a reference of VDC for a tenant {}".format(
3447 self.tenant_name
3448 )
3449 )
3450
3451 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3452 if not vm_info_dict:
3453 self.logger.debug(
3454 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3455 vim_vm_uuid
3456 )
3457 )
3458 raise vimconn.VimConnNotFoundException(
3459 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3460 )
3461
3462 status_key = vm_info_dict["status"]
3463 error = ""
3464 try:
3465 vm_dict = {
3466 "created": vm_info_dict["created"],
3467 "description": vm_info_dict["name"],
3468 "status": vcdStatusCode2manoFormat[int(status_key)],
3469 "hostId": vm_info_dict["vmuuid"],
3470 "error_msg": error,
3471 "vim_info": yaml.safe_dump(vm_info_dict),
3472 "interfaces": [],
3473 }
3474
3475 if "interfaces" in vm_info_dict:
3476 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3477 else:
3478 vm_dict["interfaces"] = []
3479 except KeyError:
3480 vm_dict = {
3481 "created": "",
3482 "description": "",
3483 "status": vcdStatusCode2manoFormat[int(-1)],
3484 "hostId": vm_info_dict["vmuuid"],
3485 "error_msg": "Inconsistency state",
3486 "vim_info": yaml.safe_dump(vm_info_dict),
3487 "interfaces": [],
3488 }
3489
3490 return vm_dict
3491
3492 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
3493 """Method poweroff and remove VM instance from vcloud director network.
3494
3495 Args:
3496 vm_id: VM UUID
3497
3498 Returns:
3499 Returns the instance identifier
3500 """
3501 self.logger.debug("Client requesting delete vm instance {} ".format(vm_id))
3502
3503 _, vdc = self.get_vdc_details()
3504 vdc_obj = VDC(self.client, href=vdc.get("href"))
3505 if vdc_obj is None:
3506 self.logger.debug(
3507 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3508 self.tenant_name
3509 )
3510 )
3511 raise vimconn.VimConnException(
3512 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3513 self.tenant_name
3514 )
3515 )
3516
3517 try:
3518 vapp_name = self.get_namebyvappid(vm_id)
3519 if vapp_name is None:
3520 self.logger.debug(
3521 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3522 vm_id
3523 )
3524 )
3525
3526 return (
3527 -1,
3528 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3529 vm_id
3530 ),
3531 )
3532
3533 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm_id))
3534 vapp_resource = vdc_obj.get_vapp(vapp_name)
3535 vapp = VApp(self.client, resource=vapp_resource)
3536
3537 # Delete vApp and wait for status change if task executed and vApp is None.
3538 if vapp:
3539 if vapp_resource.get("deployed") == "true":
3540 self.logger.info("Powering off vApp {}".format(vapp_name))
3541 # Power off vApp
3542 powered_off = False
3543 wait_time = 0
3544
3545 while wait_time <= MAX_WAIT_TIME:
3546 power_off_task = vapp.power_off()
3547 result = self.client.get_task_monitor().wait_for_success(
3548 task=power_off_task
3549 )
3550
3551 if result.get("status") == "success":
3552 powered_off = True
3553 break
3554 else:
3555 self.logger.info(
3556 "Wait for vApp {} to power off".format(vapp_name)
3557 )
3558 time.sleep(INTERVAL_TIME)
3559
3560 wait_time += INTERVAL_TIME
3561
3562 if not powered_off:
3563 self.logger.debug(
3564 "delete_vminstance(): Failed to power off VM instance {} ".format(
3565 vm_id
3566 )
3567 )
3568 else:
3569 self.logger.info(
3570 "delete_vminstance(): Powered off VM instance {} ".format(
3571 vm_id
3572 )
3573 )
3574
3575 # Undeploy vApp
3576 self.logger.info("Undeploy vApp {}".format(vapp_name))
3577 wait_time = 0
3578 undeployed = False
3579 while wait_time <= MAX_WAIT_TIME:
3580 vapp = VApp(self.client, resource=vapp_resource)
3581 if not vapp:
3582 self.logger.debug(
3583 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3584 vm_id
3585 )
3586 )
3587
3588 return (
3589 -1,
3590 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3591 vm_id
3592 ),
3593 )
3594
3595 undeploy_task = vapp.undeploy()
3596 result = self.client.get_task_monitor().wait_for_success(
3597 task=undeploy_task
3598 )
3599
3600 if result.get("status") == "success":
3601 undeployed = True
3602 break
3603 else:
3604 self.logger.debug(
3605 "Wait for vApp {} to undeploy".format(vapp_name)
3606 )
3607 time.sleep(INTERVAL_TIME)
3608
3609 wait_time += INTERVAL_TIME
3610
3611 if not undeployed:
3612 self.logger.debug(
3613 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3614 vm_id
3615 )
3616 )
3617
3618 # delete vapp
3619 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3620 if vapp is not None:
3621 wait_time = 0
3622 result = False
3623
3624 while wait_time <= MAX_WAIT_TIME:
3625 vapp = VApp(self.client, resource=vapp_resource)
3626 if not vapp:
3627 self.logger.debug(
3628 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3629 vm_id
3630 )
3631 )
3632
3633 return (
3634 -1,
3635 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3636 vm_id
3637 ),
3638 )
3639
3640 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3641 result = self.client.get_task_monitor().wait_for_success(
3642 task=delete_task
3643 )
3644 if result.get("status") == "success":
3645 break
3646 else:
3647 self.logger.debug(
3648 "Wait for vApp {} to delete".format(vapp_name)
3649 )
3650 time.sleep(INTERVAL_TIME)
3651
3652 wait_time += INTERVAL_TIME
3653
3654 if result is None:
3655 self.logger.debug(
3656 "delete_vminstance(): Failed delete uuid {} ".format(vm_id)
3657 )
3658 else:
3659 self.logger.info(
3660 "Deleted vm instance {} successfully".format(vm_id)
3661 )
3662 config_drive_catalog_name, config_drive_catalog_id = (
3663 "cfg_drv-" + vm_id,
3664 None,
3665 )
3666 catalog_list = self.get_image_list()
3667
3668 try:
3669 config_drive_catalog_id = [
3670 catalog_["id"]
3671 for catalog_ in catalog_list
3672 if catalog_["name"] == config_drive_catalog_name
3673 ][0]
3674 except IndexError:
3675 pass
3676
3677 if config_drive_catalog_id:
3678 self.logger.debug(
3679 "delete_vminstance(): Found a config drive catalog {} matching "
3680 'vapp_name"{}". Deleting it.'.format(
3681 config_drive_catalog_id, vapp_name
3682 )
3683 )
3684 self.delete_image(config_drive_catalog_id)
3685
3686 return vm_id
3687 except Exception:
3688 self.logger.debug(traceback.format_exc())
3689
3690 raise vimconn.VimConnException(
3691 "delete_vminstance(): Failed delete vm instance {}".format(vm_id)
3692 )
3693
3694 def refresh_vms_status(self, vm_list):
3695 """Get the status of the virtual machines and their interfaces/ports
3696 Params: the list of VM identifiers
3697 Returns a dictionary with:
3698 vm_id: #VIM id of this Virtual Machine
3699 status: #Mandatory. Text with one of:
3700 # DELETED (not found at vim)
3701 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3702 # OTHER (Vim reported other status not understood)
3703 # ERROR (VIM indicates an ERROR status)
3704 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3705 # CREATING (on building process), ERROR
3706 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3707 #
3708 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3709 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3710 interfaces:
3711 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3712 mac_address: #Text format XX:XX:XX:XX:XX:XX
3713 vim_net_id: #network id where this interface is connected
3714 vim_interface_id: #interface/port VIM id
3715 ip_address: #null, or text with IPv4, IPv6 address
3716 """
3717 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3718
3719 _, vdc = self.get_vdc_details()
3720 if vdc is None:
3721 raise vimconn.VimConnException(
3722 "Failed to get a reference of VDC for a tenant {}".format(
3723 self.tenant_name
3724 )
3725 )
3726
3727 vms_dict = {}
3728 nsx_edge_list = []
3729 for vmuuid in vm_list:
3730 vapp_name = self.get_namebyvappid(vmuuid)
3731 if vapp_name is not None:
3732 try:
3733 vm_pci_details = self.get_vm_pci_details(vmuuid)
3734 vdc_obj = VDC(self.client, href=vdc.get("href"))
3735 vapp_resource = vdc_obj.get_vapp(vapp_name)
3736 the_vapp = VApp(self.client, resource=vapp_resource)
3737
3738 vm_details = {}
3739 for vm in the_vapp.get_all_vms():
3740 headers = {
3741 "Accept": "application/*+xml;version=" + API_VERSION,
3742 "x-vcloud-authorization": self.client._session.headers[
3743 "x-vcloud-authorization"
3744 ],
3745 }
3746 response = self.perform_request(
3747 req_type="GET", url=vm.get("href"), headers=headers
3748 )
3749
3750 if response.status_code != 200:
3751 self.logger.error(
3752 "refresh_vms_status : REST call {} failed reason : {}"
3753 "status code : {}".format(
3754 vm.get("href"), response.text, response.status_code
3755 )
3756 )
3757 raise vimconn.VimConnException(
3758 "refresh_vms_status : Failed to get VM details"
3759 )
3760
3761 xmlroot = XmlElementTree.fromstring(response.text)
3762 result = response.text.replace("\n", " ")
3763 hdd_match = re.search(
3764 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3765 result,
3766 )
3767
3768 if hdd_match:
3769 hdd_mb = hdd_match.group(1)
3770 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3771
3772 cpus_match = re.search(
3773 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3774 result,
3775 )
3776
3777 if cpus_match:
3778 cpus = cpus_match.group(1)
3779 vm_details["cpus"] = int(cpus) if cpus else None
3780
3781 memory_mb = re.search(
3782 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3783 result,
3784 ).group(1)
3785 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3786 vm_details["status"] = vcdStatusCode2manoFormat[
3787 int(xmlroot.get("status"))
3788 ]
3789 vm_details["id"] = xmlroot.get("id")
3790 vm_details["name"] = xmlroot.get("name")
3791 vm_info = [vm_details]
3792
3793 if vm_pci_details:
3794 vm_info[0].update(vm_pci_details)
3795
3796 vm_dict = {
3797 "status": vcdStatusCode2manoFormat[
3798 int(vapp_resource.get("status"))
3799 ],
3800 "error_msg": vcdStatusCode2manoFormat[
3801 int(vapp_resource.get("status"))
3802 ],
3803 "vim_info": yaml.safe_dump(vm_info),
3804 "interfaces": [],
3805 }
3806
3807 # get networks
3808 vm_ip = None
3809 vm_mac = None
3810 networks = re.findall(
3811 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3812 result,
3813 )
3814
3815 for network in networks:
3816 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3817 vm_mac = mac_s.group(1) if mac_s else None
3818 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3819 vm_ip = ip_s.group(1) if ip_s else None
3820
3821 if vm_ip is None:
3822 if not nsx_edge_list:
3823 nsx_edge_list = self.get_edge_details()
3824 if nsx_edge_list is None:
3825 raise vimconn.VimConnException(
3826 "refresh_vms_status:"
3827 "Failed to get edge details from NSX Manager"
3828 )
3829
3830 if vm_mac is not None:
3831 vm_ip = self.get_ipaddr_from_NSXedge(
3832 nsx_edge_list, vm_mac
3833 )
3834
3835 net_s = re.search('network="(.*?)"', network)
3836 network_name = net_s.group(1) if net_s else None
3837 vm_net_id = self.get_network_id_by_name(network_name)
3838 interface = {
3839 "mac_address": vm_mac,
3840 "vim_net_id": vm_net_id,
3841 "vim_interface_id": vm_net_id,
3842 "ip_address": vm_ip,
3843 }
3844 vm_dict["interfaces"].append(interface)
3845
3846 # add a vm to vm dict
3847 vms_dict.setdefault(vmuuid, vm_dict)
3848 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3849 except Exception as exp:
3850 self.logger.debug("Error in response {}".format(exp))
3851 self.logger.debug(traceback.format_exc())
3852
3853 return vms_dict
3854
3855 def get_edge_details(self):
3856 """Get the NSX edge list from NSX Manager
3857 Returns list of NSX edges
3858 """
3859 edge_list = []
3860 rheaders = {"Content-Type": "application/xml"}
3861 nsx_api_url = "/api/4.0/edges"
3862
3863 self.logger.debug(
3864 "Get edge details from NSX Manager {} {}".format(
3865 self.nsx_manager, nsx_api_url
3866 )
3867 )
3868
3869 try:
3870 resp = requests.get(
3871 self.nsx_manager + nsx_api_url,
3872 auth=(self.nsx_user, self.nsx_password),
3873 verify=False,
3874 headers=rheaders,
3875 )
3876 if resp.status_code == requests.codes.ok:
3877 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3878 for edge_pages in paged_Edge_List:
3879 if edge_pages.tag == "edgePage":
3880 for edge_summary in edge_pages:
3881 if edge_summary.tag == "pagingInfo":
3882 for element in edge_summary:
3883 if (
3884 element.tag == "totalCount"
3885 and element.text == "0"
3886 ):
3887 raise vimconn.VimConnException(
3888 "get_edge_details: No NSX edges details found: {}".format(
3889 self.nsx_manager
3890 )
3891 )
3892
3893 if edge_summary.tag == "edgeSummary":
3894 for element in edge_summary:
3895 if element.tag == "id":
3896 edge_list.append(element.text)
3897 else:
3898 raise vimconn.VimConnException(
3899 "get_edge_details: No NSX edge details found: {}".format(
3900 self.nsx_manager
3901 )
3902 )
3903
3904 if not edge_list:
3905 raise vimconn.VimConnException(
3906 "get_edge_details: "
3907 "No NSX edge details found: {}".format(self.nsx_manager)
3908 )
3909 else:
3910 self.logger.debug(
3911 "get_edge_details: Found NSX edges {}".format(edge_list)
3912 )
3913
3914 return edge_list
3915 else:
3916 self.logger.debug(
3917 "get_edge_details: "
3918 "Failed to get NSX edge details from NSX Manager: {}".format(
3919 resp.content
3920 )
3921 )
3922
3923 return None
3924
3925 except Exception as exp:
3926 self.logger.debug(
3927 "get_edge_details: "
3928 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3929 )
3930 raise vimconn.VimConnException(
3931 "get_edge_details: "
3932 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3933 )
3934
3935 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3936 """Get IP address details from NSX edges, using the MAC address
3937 PARAMS: nsx_edges : List of NSX edges
3938 mac_address : Find IP address corresponding to this MAC address
3939 Returns: IP address corrresponding to the provided MAC address
3940 """
3941 ip_addr = None
3942 rheaders = {"Content-Type": "application/xml"}
3943
3944 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3945
3946 try:
3947 for edge in nsx_edges:
3948 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3949
3950 resp = requests.get(
3951 self.nsx_manager + nsx_api_url,
3952 auth=(self.nsx_user, self.nsx_password),
3953 verify=False,
3954 headers=rheaders,
3955 )
3956
3957 if resp.status_code == requests.codes.ok:
3958 dhcp_leases = XmlElementTree.fromstring(resp.text)
3959 for child in dhcp_leases:
3960 if child.tag == "dhcpLeaseInfo":
3961 dhcpLeaseInfo = child
3962 for leaseInfo in dhcpLeaseInfo:
3963 for elem in leaseInfo:
3964 if (elem.tag) == "macAddress":
3965 edge_mac_addr = elem.text
3966
3967 if (elem.tag) == "ipAddress":
3968 ip_addr = elem.text
3969
3970 if edge_mac_addr is not None:
3971 if edge_mac_addr == mac_address:
3972 self.logger.debug(
3973 "Found ip addr {} for mac {} at NSX edge {}".format(
3974 ip_addr, mac_address, edge
3975 )
3976 )
3977
3978 return ip_addr
3979 else:
3980 self.logger.debug(
3981 "get_ipaddr_from_NSXedge: "
3982 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3983 resp.content
3984 )
3985 )
3986
3987 self.logger.debug(
3988 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3989 )
3990
3991 return None
3992
3993 except XmlElementTree.ParseError as Err:
3994 self.logger.debug(
3995 "ParseError in response from NSX Manager {}".format(Err.message),
3996 exc_info=True,
3997 )
3998
3999 def action_vminstance(self, vm_id=None, action_dict=None, created_items={}):
4000 """Send and action over a VM instance from VIM
4001 Returns the vm_id if the action was successfully sent to the VIM"""
4002
4003 self.logger.debug(
4004 "Received action for vm {} and action dict {}".format(vm_id, action_dict)
4005 )
4006
4007 if vm_id is None or action_dict is None:
4008 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
4009
4010 _, vdc = self.get_vdc_details()
4011 if vdc is None:
4012 raise vimconn.VimConnException(
4013 "Failed to get a reference of VDC for a tenant {}".format(
4014 self.tenant_name
4015 )
4016 )
4017
4018 vapp_name = self.get_namebyvappid(vm_id)
4019 if vapp_name is None:
4020 self.logger.debug(
4021 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
4022 vm_id
4023 )
4024 )
4025
4026 raise vimconn.VimConnException(
4027 "Failed to get vm by given {} vm uuid".format(vm_id)
4028 )
4029 else:
4030 self.logger.info(
4031 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm_id)
4032 )
4033
4034 try:
4035 vdc_obj = VDC(self.client, href=vdc.get("href"))
4036 vapp_resource = vdc_obj.get_vapp(vapp_name)
4037 vapp = VApp(self.client, resource=vapp_resource)
4038
4039 if "start" in action_dict:
4040 self.logger.info(
4041 "action_vminstance: Power on vApp: {}".format(vapp_name)
4042 )
4043 poweron_task = self.power_on_vapp(vm_id, vapp_name)
4044 result = self.client.get_task_monitor().wait_for_success(
4045 task=poweron_task
4046 )
4047 self.instance_actions_result("start", result, vapp_name)
4048 elif "rebuild" in action_dict:
4049 self.logger.info(
4050 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
4051 )
4052 rebuild_task = vapp.deploy(power_on=True)
4053 result = self.client.get_task_monitor().wait_for_success(
4054 task=rebuild_task
4055 )
4056 self.instance_actions_result("rebuild", result, vapp_name)
4057 elif "pause" in action_dict:
4058 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
4059 pause_task = vapp.undeploy(action="suspend")
4060 result = self.client.get_task_monitor().wait_for_success(
4061 task=pause_task
4062 )
4063 self.instance_actions_result("pause", result, vapp_name)
4064 elif "resume" in action_dict:
4065 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
4066 poweron_task = self.power_on_vapp(vm_id, vapp_name)
4067 result = self.client.get_task_monitor().wait_for_success(
4068 task=poweron_task
4069 )
4070 self.instance_actions_result("resume", result, vapp_name)
4071 elif "shutoff" in action_dict or "shutdown" in action_dict:
4072 action_name, _ = list(action_dict.items())[0]
4073 self.logger.info(
4074 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
4075 )
4076 shutdown_task = vapp.shutdown()
4077 result = self.client.get_task_monitor().wait_for_success(
4078 task=shutdown_task
4079 )
4080 if action_name == "shutdown":
4081 self.instance_actions_result("shutdown", result, vapp_name)
4082 else:
4083 self.instance_actions_result("shutoff", result, vapp_name)
4084 elif "forceOff" in action_dict:
4085 result = vapp.undeploy(action="powerOff")
4086 self.instance_actions_result("forceOff", result, vapp_name)
4087 elif "reboot" in action_dict:
4088 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
4089 reboot_task = vapp.reboot()
4090 self.client.get_task_monitor().wait_for_success(task=reboot_task)
4091 else:
4092 raise vimconn.VimConnException(
4093 "action_vminstance: Invalid action {} or action is None.".format(
4094 action_dict
4095 )
4096 )
4097
4098 return vm_id
4099 except Exception as exp:
4100 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
4101
4102 raise vimconn.VimConnException(
4103 "action_vminstance: Failed with Exception {}".format(exp)
4104 )
4105
4106 def instance_actions_result(self, action, result, vapp_name):
4107 if result.get("status") == "success":
4108 self.logger.info(
4109 "action_vminstance: Sucessfully {} the vApp: {}".format(
4110 action, vapp_name
4111 )
4112 )
4113 else:
4114 self.logger.error(
4115 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
4116 )
4117
4118 def get_vminstance_console(self, vm_id, console_type="novnc"):
4119 """
4120 Get a console for the virtual machine
4121 Params:
4122 vm_id: uuid of the VM
4123 console_type, can be:
4124 "novnc" (by default), "xvpvnc" for VNC types,
4125 "rdp-html5" for RDP types, "spice-html5" for SPICE types
4126 Returns dict with the console parameters:
4127 protocol: ssh, ftp, http, https, ...
4128 server: usually ip address
4129 port: the http, ssh, ... port
4130 suffix: extra text, e.g. the http path and query string
4131 """
4132 console_dict = {}
4133
4134 if console_type is None or console_type == "novnc":
4135 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
4136 self.url, vm_id
4137 )
4138 headers = {
4139 "Accept": "application/*+xml;version=" + API_VERSION,
4140 "x-vcloud-authorization": self.client._session.headers[
4141 "x-vcloud-authorization"
4142 ],
4143 }
4144 response = self.perform_request(
4145 req_type="POST", url=url_rest_call, headers=headers
4146 )
4147
4148 if response.status_code == 403:
4149 response = self.retry_rest("GET", url_rest_call)
4150
4151 if response.status_code != 200:
4152 self.logger.error(
4153 "REST call {} failed reason : {}"
4154 "status code : {}".format(
4155 url_rest_call, response.text, response.status_code
4156 )
4157 )
4158 raise vimconn.VimConnException(
4159 "get_vminstance_console : Failed to get " "VM Mks ticket details"
4160 )
4161
4162 s = re.search("<Host>(.*?)</Host>", response.text)
4163 console_dict["server"] = s.group(1) if s else None
4164 s1 = re.search("<Port>(\d+)</Port>", response.text)
4165 console_dict["port"] = s1.group(1) if s1 else None
4166 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
4167 self.url, vm_id
4168 )
4169 headers = {
4170 "Accept": "application/*+xml;version=" + API_VERSION,
4171 "x-vcloud-authorization": self.client._session.headers[
4172 "x-vcloud-authorization"
4173 ],
4174 }
4175 response = self.perform_request(
4176 req_type="POST", url=url_rest_call, headers=headers
4177 )
4178
4179 if response.status_code == 403:
4180 response = self.retry_rest("GET", url_rest_call)
4181
4182 if response.status_code != 200:
4183 self.logger.error(
4184 "REST call {} failed reason : {}"
4185 "status code : {}".format(
4186 url_rest_call, response.text, response.status_code
4187 )
4188 )
4189 raise vimconn.VimConnException(
4190 "get_vminstance_console : Failed to get " "VM console details"
4191 )
4192
4193 s = re.search(">.*?/(vm-\d+.*)</", response.text)
4194 console_dict["suffix"] = s.group(1) if s else None
4195 console_dict["protocol"] = "https"
4196
4197 return console_dict
4198
4199 # NOT USED METHODS in current version
4200
4201 def host_vim2gui(self, host, server_dict):
4202 """Transform host dictionary from VIM format to GUI format,
4203 and append to the server_dict
4204 """
4205 raise vimconn.VimConnNotImplemented("Should have implemented this")
4206
4207 def get_hosts_info(self):
4208 """Get the information of deployed hosts
4209 Returns the hosts content"""
4210 raise vimconn.VimConnNotImplemented("Should have implemented this")
4211
4212 def get_hosts(self, vim_tenant):
4213 """Get the hosts and deployed instances
4214 Returns the hosts content"""
4215 raise vimconn.VimConnNotImplemented("Should have implemented this")
4216
4217 def get_processor_rankings(self):
4218 """Get the processor rankings in the VIM database"""
4219 raise vimconn.VimConnNotImplemented("Should have implemented this")
4220
4221 def new_host(self, host_data):
4222 """Adds a new host to VIM"""
4223 """Returns status code of the VIM response"""
4224 raise vimconn.VimConnNotImplemented("Should have implemented this")
4225
4226 def new_external_port(self, port_data):
4227 """Adds a external port to VIM"""
4228 """Returns the port identifier"""
4229 raise vimconn.VimConnNotImplemented("Should have implemented this")
4230
4231 def new_external_network(self, net_name, net_type):
4232 """Adds a external network to VIM (shared)"""
4233 """Returns the network identifier"""
4234 raise vimconn.VimConnNotImplemented("Should have implemented this")
4235
4236 def connect_port_network(self, port_id, network_id, admin=False):
4237 """Connects a external port to a network"""
4238 """Returns status code of the VIM response"""
4239 raise vimconn.VimConnNotImplemented("Should have implemented this")
4240
4241 def new_vminstancefromJSON(self, vm_data):
4242 """Adds a VM instance to VIM"""
4243 """Returns the instance identifier"""
4244 raise vimconn.VimConnNotImplemented("Should have implemented this")
4245
4246 def get_network_name_by_id(self, network_uuid=None):
4247 """Method gets vcloud director network named based on supplied uuid.
4248
4249 Args:
4250 network_uuid: network_id
4251
4252 Returns:
4253 The return network name.
4254 """
4255
4256 if not network_uuid:
4257 return None
4258
4259 try:
4260 org_dict = self.get_org(self.org_uuid)
4261 if "networks" in org_dict:
4262 org_network_dict = org_dict["networks"]
4263
4264 for net_uuid in org_network_dict:
4265 if net_uuid == network_uuid:
4266 return org_network_dict[net_uuid]
4267 except Exception:
4268 self.logger.debug("Exception in get_network_name_by_id")
4269 self.logger.debug(traceback.format_exc())
4270
4271 return None
4272
4273 def get_network_id_by_name(self, network_name=None):
4274 """Method gets vcloud director network uuid based on supplied name.
4275
4276 Args:
4277 network_name: network_name
4278 Returns:
4279 The return network uuid.
4280 network_uuid: network_id
4281 """
4282 if not network_name:
4283 self.logger.debug("get_network_id_by_name() : Network name is empty")
4284 return None
4285
4286 try:
4287 org_dict = self.get_org(self.org_uuid)
4288 if org_dict and "networks" in org_dict:
4289 org_network_dict = org_dict["networks"]
4290
4291 for net_uuid, net_name in org_network_dict.items():
4292 if net_name == network_name:
4293 return net_uuid
4294
4295 except KeyError as exp:
4296 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4297
4298 return None
4299
4300 def get_physical_network_by_name(self, physical_network_name):
4301 """
4302 Methos returns uuid of physical network which passed
4303 Args:
4304 physical_network_name: physical network name
4305 Returns:
4306 UUID of physical_network_name
4307 """
4308 try:
4309 client_as_admin = self.connect_as_admin()
4310
4311 if not client_as_admin:
4312 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4313
4314 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4315 vm_list_rest_call = "".join(url_list)
4316
4317 if client_as_admin._session:
4318 headers = {
4319 "Accept": "application/*+xml;version=" + API_VERSION,
4320 "x-vcloud-authorization": client_as_admin._session.headers[
4321 "x-vcloud-authorization"
4322 ],
4323 }
4324 response = self.perform_request(
4325 req_type="GET", url=vm_list_rest_call, headers=headers
4326 )
4327 provider_network = None
4328 available_network = None
4329 # add_vdc_rest_url = None
4330
4331 if response.status_code != requests.codes.ok:
4332 self.logger.debug(
4333 "REST API call {} failed. Return status code {}".format(
4334 vm_list_rest_call, response.status_code
4335 )
4336 )
4337 return None
4338 else:
4339 try:
4340 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4341 for child in vm_list_xmlroot:
4342 if child.tag.split("}")[1] == "ProviderVdcReference":
4343 provider_network = child.attrib.get("href")
4344 # application/vnd.vmware.admin.providervdc+xml
4345
4346 if child.tag.split("}")[1] == "Link":
4347 if (
4348 child.attrib.get("type")
4349 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4350 and child.attrib.get("rel") == "add"
4351 ):
4352 child.attrib.get("href")
4353 except Exception:
4354 self.logger.debug(
4355 "Failed parse respond for rest api call {}".format(
4356 vm_list_rest_call
4357 )
4358 )
4359 self.logger.debug("Respond body {}".format(response.text))
4360
4361 return None
4362
4363 # find pvdc provided available network
4364 response = self.perform_request(
4365 req_type="GET", url=provider_network, headers=headers
4366 )
4367
4368 if response.status_code != requests.codes.ok:
4369 self.logger.debug(
4370 "REST API call {} failed. Return status code {}".format(
4371 vm_list_rest_call, response.status_code
4372 )
4373 )
4374
4375 return None
4376
4377 try:
4378 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4379 for child in vm_list_xmlroot.iter():
4380 if child.tag.split("}")[1] == "AvailableNetworks":
4381 for networks in child.iter():
4382 if (
4383 networks.attrib.get("href") is not None
4384 and networks.attrib.get("name") is not None
4385 ):
4386 if (
4387 networks.attrib.get("name")
4388 == physical_network_name
4389 ):
4390 network_url = networks.attrib.get("href")
4391 available_network = network_url[
4392 network_url.rindex("/") + 1 :
4393 ]
4394 break
4395 except Exception:
4396 return None
4397
4398 return available_network
4399 except Exception as e:
4400 self.logger.error("Error while getting physical network: {}".format(e))
4401
4402 def list_org_action(self):
4403 """
4404 Method leverages vCloud director and query for available organization for particular user
4405
4406 Args:
4407 vca - is active VCA connection.
4408 vdc_name - is a vdc name that will be used to query vms action
4409
4410 Returns:
4411 The return XML respond
4412 """
4413 url_list = [self.url, "/api/org"]
4414 vm_list_rest_call = "".join(url_list)
4415
4416 if self.client._session:
4417 headers = {
4418 "Accept": "application/*+xml;version=" + API_VERSION,
4419 "x-vcloud-authorization": self.client._session.headers[
4420 "x-vcloud-authorization"
4421 ],
4422 }
4423
4424 response = self.perform_request(
4425 req_type="GET", url=vm_list_rest_call, headers=headers
4426 )
4427
4428 if response.status_code == 403:
4429 response = self.retry_rest("GET", vm_list_rest_call)
4430
4431 if response.status_code == requests.codes.ok:
4432 return response.text
4433
4434 return None
4435
4436 def get_org_action(self, org_uuid=None):
4437 """
4438 Method leverages vCloud director and retrieve available object for organization.
4439
4440 Args:
4441 org_uuid - vCD organization uuid
4442 self.client - is active connection.
4443
4444 Returns:
4445 The return XML respond
4446 """
4447
4448 if org_uuid is None:
4449 return None
4450
4451 url_list = [self.url, "/api/org/", org_uuid]
4452 vm_list_rest_call = "".join(url_list)
4453
4454 if self.client._session:
4455 headers = {
4456 "Accept": "application/*+xml;version=" + API_VERSION,
4457 "x-vcloud-authorization": self.client._session.headers[
4458 "x-vcloud-authorization"
4459 ],
4460 }
4461
4462 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4463 response = self.perform_request(
4464 req_type="GET", url=vm_list_rest_call, headers=headers
4465 )
4466
4467 if response.status_code == 403:
4468 response = self.retry_rest("GET", vm_list_rest_call)
4469
4470 if response.status_code == requests.codes.ok:
4471 return response.text
4472
4473 return None
4474
4475 def get_org(self, org_uuid=None):
4476 """
4477 Method retrieves available organization in vCloud Director
4478
4479 Args:
4480 org_uuid - is a organization uuid.
4481
4482 Returns:
4483 The return dictionary with following key
4484 "network" - for network list under the org
4485 "catalogs" - for network list under the org
4486 "vdcs" - for vdc list under org
4487 """
4488
4489 org_dict = {}
4490
4491 if org_uuid is None:
4492 return org_dict
4493
4494 content = self.get_org_action(org_uuid=org_uuid)
4495 try:
4496 vdc_list = {}
4497 network_list = {}
4498 catalog_list = {}
4499 vm_list_xmlroot = XmlElementTree.fromstring(content)
4500 for child in vm_list_xmlroot:
4501 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4502 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4503 "name"
4504 ]
4505 org_dict["vdcs"] = vdc_list
4506
4507 if (
4508 child.attrib["type"]
4509 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4510 ):
4511 network_list[
4512 child.attrib["href"].split("/")[-1:][0]
4513 ] = child.attrib["name"]
4514 org_dict["networks"] = network_list
4515
4516 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4517 catalog_list[
4518 child.attrib["href"].split("/")[-1:][0]
4519 ] = child.attrib["name"]
4520 org_dict["catalogs"] = catalog_list
4521 except Exception:
4522 pass
4523
4524 return org_dict
4525
4526 def get_org_list(self):
4527 """
4528 Method retrieves available organization in vCloud Director
4529
4530 Args:
4531 vca - is active VCA connection.
4532
4533 Returns:
4534 The return dictionary and key for each entry VDC UUID
4535 """
4536 org_dict = {}
4537
4538 content = self.list_org_action()
4539 try:
4540 vm_list_xmlroot = XmlElementTree.fromstring(content)
4541
4542 for vm_xml in vm_list_xmlroot:
4543 if vm_xml.tag.split("}")[1] == "Org":
4544 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4545 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4546 except Exception:
4547 pass
4548
4549 return org_dict
4550
4551 def vms_view_action(self, vdc_name=None):
4552 """Method leverages vCloud director vms query call
4553
4554 Args:
4555 vca - is active VCA connection.
4556 vdc_name - is a vdc name that will be used to query vms action
4557
4558 Returns:
4559 The return XML respond
4560 """
4561 vca = self.connect()
4562 if vdc_name is None:
4563 return None
4564
4565 url_list = [vca.host, "/api/vms/query"]
4566 vm_list_rest_call = "".join(url_list)
4567
4568 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4569 refs = [
4570 ref
4571 for ref in vca.vcloud_session.organization.Link
4572 if ref.name == vdc_name
4573 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4574 ]
4575
4576 if len(refs) == 1:
4577 response = self.perform_request(
4578 req_type="GET",
4579 url=vm_list_rest_call,
4580 headers=vca.vcloud_session.get_vcloud_headers(),
4581 verify=vca.verify,
4582 logger=vca.logger,
4583 )
4584
4585 if response.status_code == requests.codes.ok:
4586 return response.text
4587
4588 return None
4589
4590 def get_vapp_list(self, vdc_name=None):
4591 """
4592 Method retrieves vApp list deployed vCloud director and returns a dictionary
4593 contains a list of all vapp deployed for queried VDC.
4594 The key for a dictionary is vApp UUID
4595
4596
4597 Args:
4598 vca - is active VCA connection.
4599 vdc_name - is a vdc name that will be used to query vms action
4600
4601 Returns:
4602 The return dictionary and key for each entry vapp UUID
4603 """
4604 vapp_dict = {}
4605
4606 if vdc_name is None:
4607 return vapp_dict
4608
4609 content = self.vms_view_action(vdc_name=vdc_name)
4610 try:
4611 vm_list_xmlroot = XmlElementTree.fromstring(content)
4612 for vm_xml in vm_list_xmlroot:
4613 if vm_xml.tag.split("}")[1] == "VMRecord":
4614 if vm_xml.attrib["isVAppTemplate"] == "true":
4615 rawuuid = vm_xml.attrib["container"].split("/")[-1:]
4616 if "vappTemplate-" in rawuuid[0]:
4617 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4618 # vm and use raw UUID as key
4619 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
4620 except Exception:
4621 pass
4622
4623 return vapp_dict
4624
4625 def get_vm_list(self, vdc_name=None):
4626 """
4627 Method retrieves VM's list deployed vCloud director. It returns a dictionary
4628 contains a list of all VM's deployed for queried VDC.
4629 The key for a dictionary is VM UUID
4630
4631
4632 Args:
4633 vca - is active VCA connection.
4634 vdc_name - is a vdc name that will be used to query vms action
4635
4636 Returns:
4637 The return dictionary and key for each entry vapp UUID
4638 """
4639 vm_dict = {}
4640
4641 if vdc_name is None:
4642 return vm_dict
4643
4644 content = self.vms_view_action(vdc_name=vdc_name)
4645 try:
4646 vm_list_xmlroot = XmlElementTree.fromstring(content)
4647 for vm_xml in vm_list_xmlroot:
4648 if vm_xml.tag.split("}")[1] == "VMRecord":
4649 if vm_xml.attrib["isVAppTemplate"] == "false":
4650 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4651 if "vm-" in rawuuid[0]:
4652 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4653 # vm and use raw UUID as key
4654 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4655 except Exception:
4656 pass
4657
4658 return vm_dict
4659
4660 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4661 """
4662 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4663 contains a list of all VM's deployed for queried VDC.
4664 The key for a dictionary is VM UUID
4665
4666
4667 Args:
4668 vca - is active VCA connection.
4669 vdc_name - is a vdc name that will be used to query vms action
4670
4671 Returns:
4672 The return dictionary and key for each entry vapp UUID
4673 """
4674 vm_dict = {}
4675 vca = self.connect()
4676
4677 if not vca:
4678 raise vimconn.VimConnConnectionException("self.connect() is failed")
4679
4680 if vdc_name is None:
4681 return vm_dict
4682
4683 content = self.vms_view_action(vdc_name=vdc_name)
4684 try:
4685 vm_list_xmlroot = XmlElementTree.fromstring(content)
4686 for vm_xml in vm_list_xmlroot:
4687 if (
4688 vm_xml.tag.split("}")[1] == "VMRecord"
4689 and vm_xml.attrib["isVAppTemplate"] == "false"
4690 ):
4691 # lookup done by UUID
4692 if isuuid:
4693 if vapp_name in vm_xml.attrib["container"]:
4694 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4695 if "vm-" in rawuuid[0]:
4696 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4697 break
4698 # lookup done by Name
4699 else:
4700 if vapp_name in vm_xml.attrib["name"]:
4701 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4702 if "vm-" in rawuuid[0]:
4703 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4704 break
4705 except Exception:
4706 pass
4707
4708 return vm_dict
4709
4710 def get_network_action(self, network_uuid=None):
4711 """
4712 Method leverages vCloud director and query network based on network uuid
4713
4714 Args:
4715 vca - is active VCA connection.
4716 network_uuid - is a network uuid
4717
4718 Returns:
4719 The return XML respond
4720 """
4721 if network_uuid is None:
4722 return None
4723
4724 url_list = [self.url, "/api/network/", network_uuid]
4725 vm_list_rest_call = "".join(url_list)
4726
4727 if self.client._session:
4728 headers = {
4729 "Accept": "application/*+xml;version=" + API_VERSION,
4730 "x-vcloud-authorization": self.client._session.headers[
4731 "x-vcloud-authorization"
4732 ],
4733 }
4734 response = self.perform_request(
4735 req_type="GET", url=vm_list_rest_call, headers=headers
4736 )
4737
4738 # Retry login if session expired & retry sending request
4739 if response.status_code == 403:
4740 response = self.retry_rest("GET", vm_list_rest_call)
4741
4742 if response.status_code == requests.codes.ok:
4743 return response.text
4744
4745 return None
4746
4747 def get_vcd_network(self, network_uuid=None):
4748 """
4749 Method retrieves available network from vCloud Director
4750
4751 Args:
4752 network_uuid - is VCD network UUID
4753
4754 Each element serialized as key : value pair
4755
4756 Following keys available for access. network_configuration['Gateway'}
4757 <Configuration>
4758 <IpScopes>
4759 <IpScope>
4760 <IsInherited>true</IsInherited>
4761 <Gateway>172.16.252.100</Gateway>
4762 <Netmask>255.255.255.0</Netmask>
4763 <Dns1>172.16.254.201</Dns1>
4764 <Dns2>172.16.254.202</Dns2>
4765 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4766 <IsEnabled>true</IsEnabled>
4767 <IpRanges>
4768 <IpRange>
4769 <StartAddress>172.16.252.1</StartAddress>
4770 <EndAddress>172.16.252.99</EndAddress>
4771 </IpRange>
4772 </IpRanges>
4773 </IpScope>
4774 </IpScopes>
4775 <FenceMode>bridged</FenceMode>
4776
4777 Returns:
4778 The return dictionary and key for each entry vapp UUID
4779 """
4780 network_configuration = {}
4781
4782 if network_uuid is None:
4783 return network_uuid
4784
4785 try:
4786 content = self.get_network_action(network_uuid=network_uuid)
4787 if content is not None:
4788 vm_list_xmlroot = XmlElementTree.fromstring(content)
4789 network_configuration["status"] = vm_list_xmlroot.get("status")
4790 network_configuration["name"] = vm_list_xmlroot.get("name")
4791 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4792
4793 for child in vm_list_xmlroot:
4794 if child.tag.split("}")[1] == "IsShared":
4795 network_configuration["isShared"] = child.text.strip()
4796
4797 if child.tag.split("}")[1] == "Configuration":
4798 for configuration in child.iter():
4799 tagKey = configuration.tag.split("}")[1].strip()
4800 if tagKey != "":
4801 network_configuration[
4802 tagKey
4803 ] = configuration.text.strip()
4804 except Exception as exp:
4805 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4806
4807 raise vimconn.VimConnException(
4808 "get_vcd_network: Failed with Exception {}".format(exp)
4809 )
4810
4811 return network_configuration
4812
4813 def delete_network_action(self, network_uuid=None):
4814 """
4815 Method delete given network from vCloud director
4816
4817 Args:
4818 network_uuid - is a network uuid that client wish to delete
4819
4820 Returns:
4821 The return None or XML respond or false
4822 """
4823 client = self.connect_as_admin()
4824
4825 if not client:
4826 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4827
4828 if network_uuid is None:
4829 return False
4830
4831 url_list = [self.url, "/api/admin/network/", network_uuid]
4832 vm_list_rest_call = "".join(url_list)
4833
4834 if client._session:
4835 headers = {
4836 "Accept": "application/*+xml;version=" + API_VERSION,
4837 "x-vcloud-authorization": client._session.headers[
4838 "x-vcloud-authorization"
4839 ],
4840 }
4841 response = self.perform_request(
4842 req_type="DELETE", url=vm_list_rest_call, headers=headers
4843 )
4844
4845 if response.status_code == 202:
4846 return True
4847
4848 return False
4849
4850 def create_network(
4851 self,
4852 network_name=None,
4853 net_type="bridge",
4854 parent_network_uuid=None,
4855 ip_profile=None,
4856 isshared="true",
4857 ):
4858 """
4859 Method create network in vCloud director
4860
4861 Args:
4862 network_name - is network name to be created.
4863 net_type - can be 'bridge','data','ptp','mgmt'.
4864 ip_profile is a dict containing the IP parameters of the network
4865 isshared - is a boolean
4866 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4867 It optional attribute. by default if no parent network indicate the first available will be used.
4868
4869 Returns:
4870 The return network uuid or return None
4871 """
4872 new_network_name = [network_name, "-", str(uuid.uuid4())]
4873 content = self.create_network_rest(
4874 network_name="".join(new_network_name),
4875 ip_profile=ip_profile,
4876 net_type=net_type,
4877 parent_network_uuid=parent_network_uuid,
4878 isshared=isshared,
4879 )
4880
4881 if content is None:
4882 self.logger.debug("Failed create network {}.".format(network_name))
4883
4884 return None
4885
4886 try:
4887 vm_list_xmlroot = XmlElementTree.fromstring(content)
4888 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4889 if len(vcd_uuid) == 4:
4890 self.logger.info(
4891 "Created new network name: {} uuid: {}".format(
4892 network_name, vcd_uuid[3]
4893 )
4894 )
4895
4896 return vcd_uuid[3]
4897 except Exception:
4898 self.logger.debug("Failed create network {}".format(network_name))
4899
4900 return None
4901
4902 def create_network_rest(
4903 self,
4904 network_name=None,
4905 net_type="bridge",
4906 parent_network_uuid=None,
4907 ip_profile=None,
4908 isshared="true",
4909 ):
4910 """
4911 Method create network in vCloud director
4912
4913 Args:
4914 network_name - is network name to be created.
4915 net_type - can be 'bridge','data','ptp','mgmt'.
4916 ip_profile is a dict containing the IP parameters of the network
4917 isshared - is a boolean
4918 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4919 It optional attribute. by default if no parent network indicate the first available will be used.
4920
4921 Returns:
4922 The return network uuid or return None
4923 """
4924 client_as_admin = self.connect_as_admin()
4925
4926 if not client_as_admin:
4927 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4928
4929 if network_name is None:
4930 return None
4931
4932 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4933 vm_list_rest_call = "".join(url_list)
4934
4935 if client_as_admin._session:
4936 headers = {
4937 "Accept": "application/*+xml;version=" + API_VERSION,
4938 "x-vcloud-authorization": client_as_admin._session.headers[
4939 "x-vcloud-authorization"
4940 ],
4941 }
4942 response = self.perform_request(
4943 req_type="GET", url=vm_list_rest_call, headers=headers
4944 )
4945 provider_network = None
4946 available_networks = None
4947 add_vdc_rest_url = None
4948
4949 if response.status_code != requests.codes.ok:
4950 self.logger.debug(
4951 "REST API call {} failed. Return status code {}".format(
4952 vm_list_rest_call, response.status_code
4953 )
4954 )
4955
4956 return None
4957 else:
4958 try:
4959 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4960 for child in vm_list_xmlroot:
4961 if child.tag.split("}")[1] == "ProviderVdcReference":
4962 provider_network = child.attrib.get("href")
4963 # application/vnd.vmware.admin.providervdc+xml
4964
4965 if child.tag.split("}")[1] == "Link":
4966 if (
4967 child.attrib.get("type")
4968 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4969 and child.attrib.get("rel") == "add"
4970 ):
4971 add_vdc_rest_url = child.attrib.get("href")
4972 except Exception:
4973 self.logger.debug(
4974 "Failed parse respond for rest api call {}".format(
4975 vm_list_rest_call
4976 )
4977 )
4978 self.logger.debug("Respond body {}".format(response.text))
4979
4980 return None
4981
4982 # find pvdc provided available network
4983 response = self.perform_request(
4984 req_type="GET", url=provider_network, headers=headers
4985 )
4986
4987 if response.status_code != requests.codes.ok:
4988 self.logger.debug(
4989 "REST API call {} failed. Return status code {}".format(
4990 vm_list_rest_call, response.status_code
4991 )
4992 )
4993
4994 return None
4995
4996 if parent_network_uuid is None:
4997 try:
4998 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4999 for child in vm_list_xmlroot.iter():
5000 if child.tag.split("}")[1] == "AvailableNetworks":
5001 for networks in child.iter():
5002 # application/vnd.vmware.admin.network+xml
5003 if networks.attrib.get("href") is not None:
5004 available_networks = networks.attrib.get("href")
5005 break
5006 except Exception:
5007 return None
5008
5009 try:
5010 # Configure IP profile of the network
5011 ip_profile = (
5012 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
5013 )
5014
5015 if (
5016 "subnet_address" not in ip_profile
5017 or ip_profile["subnet_address"] is None
5018 ):
5019 subnet_rand = random.randint(0, 255)
5020 ip_base = "192.168.{}.".format(subnet_rand)
5021 ip_profile["subnet_address"] = ip_base + "0/24"
5022 else:
5023 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
5024
5025 if (
5026 "gateway_address" not in ip_profile
5027 or ip_profile["gateway_address"] is None
5028 ):
5029 ip_profile["gateway_address"] = ip_base + "1"
5030
5031 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
5032 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
5033
5034 if (
5035 "dhcp_enabled" not in ip_profile
5036 or ip_profile["dhcp_enabled"] is None
5037 ):
5038 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
5039
5040 if (
5041 "dhcp_start_address" not in ip_profile
5042 or ip_profile["dhcp_start_address"] is None
5043 ):
5044 ip_profile["dhcp_start_address"] = ip_base + "3"
5045
5046 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
5047 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
5048
5049 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
5050 ip_profile["dns_address"] = ip_base + "2"
5051
5052 gateway_address = ip_profile["gateway_address"]
5053 dhcp_count = int(ip_profile["dhcp_count"])
5054 subnet_address = self.convert_cidr_to_netmask(
5055 ip_profile["subnet_address"]
5056 )
5057
5058 if ip_profile["dhcp_enabled"] is True:
5059 dhcp_enabled = "true"
5060 else:
5061 dhcp_enabled = "false"
5062
5063 dhcp_start_address = ip_profile["dhcp_start_address"]
5064
5065 # derive dhcp_end_address from dhcp_start_address & dhcp_count
5066 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
5067 end_ip_int += dhcp_count - 1
5068 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
5069
5070 # ip_version = ip_profile['ip_version']
5071 dns_address = ip_profile["dns_address"]
5072 except KeyError as exp:
5073 self.logger.debug("Create Network REST: Key error {}".format(exp))
5074
5075 raise vimconn.VimConnException(
5076 "Create Network REST: Key error{}".format(exp)
5077 )
5078
5079 # either use client provided UUID or search for a first available
5080 # if both are not defined we return none
5081 if parent_network_uuid is not None:
5082 provider_network = None
5083 available_networks = None
5084 add_vdc_rest_url = None
5085 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
5086 add_vdc_rest_url = "".join(url_list)
5087 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
5088 available_networks = "".join(url_list)
5089
5090 # Creating all networks as Direct Org VDC type networks.
5091 # Unused in case of Underlay (data/ptp) network interface.
5092 fence_mode = "isolated"
5093 is_inherited = "false"
5094 dns_list = dns_address.split(";")
5095 dns1 = dns_list[0]
5096 dns2_text = ""
5097
5098 if len(dns_list) >= 2:
5099 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
5100 dns_list[1]
5101 )
5102
5103 if net_type == "isolated":
5104 fence_mode = "isolated"
5105 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5106 <Description>Openmano created</Description>
5107 <Configuration>
5108 <IpScopes>
5109 <IpScope>
5110 <IsInherited>{1:s}</IsInherited>
5111 <Gateway>{2:s}</Gateway>
5112 <Netmask>{3:s}</Netmask>
5113 <Dns1>{4:s}</Dns1>{5:s}
5114 <IsEnabled>{6:s}</IsEnabled>
5115 <IpRanges>
5116 <IpRange>
5117 <StartAddress>{7:s}</StartAddress>
5118 <EndAddress>{8:s}</EndAddress>
5119 </IpRange>
5120 </IpRanges>
5121 </IpScope>
5122 </IpScopes>
5123 <FenceMode>{9:s}</FenceMode>
5124 </Configuration>
5125 <IsShared>{10:s}</IsShared>
5126 </OrgVdcNetwork> """.format(
5127 escape(network_name),
5128 is_inherited,
5129 gateway_address,
5130 subnet_address,
5131 dns1,
5132 dns2_text,
5133 dhcp_enabled,
5134 dhcp_start_address,
5135 dhcp_end_address,
5136 fence_mode,
5137 isshared,
5138 )
5139 else:
5140 fence_mode = "bridged"
5141 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5142 <Description>Openmano created</Description>
5143 <Configuration>
5144 <IpScopes>
5145 <IpScope>
5146 <IsInherited>{1:s}</IsInherited>
5147 <Gateway>{2:s}</Gateway>
5148 <Netmask>{3:s}</Netmask>
5149 <Dns1>{4:s}</Dns1>{5:s}
5150 <IsEnabled>{6:s}</IsEnabled>
5151 <IpRanges>
5152 <IpRange>
5153 <StartAddress>{7:s}</StartAddress>
5154 <EndAddress>{8:s}</EndAddress>
5155 </IpRange>
5156 </IpRanges>
5157 </IpScope>
5158 </IpScopes>
5159 <ParentNetwork href="{9:s}"/>
5160 <FenceMode>{10:s}</FenceMode>
5161 </Configuration>
5162 <IsShared>{11:s}</IsShared>
5163 </OrgVdcNetwork> """.format(
5164 escape(network_name),
5165 is_inherited,
5166 gateway_address,
5167 subnet_address,
5168 dns1,
5169 dns2_text,
5170 dhcp_enabled,
5171 dhcp_start_address,
5172 dhcp_end_address,
5173 available_networks,
5174 fence_mode,
5175 isshared,
5176 )
5177
5178 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
5179 try:
5180 response = self.perform_request(
5181 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
5182 )
5183
5184 if response.status_code != 201:
5185 self.logger.debug(
5186 "Create Network POST REST API call failed. "
5187 "Return status code {}, response.text: {}".format(
5188 response.status_code, response.text
5189 )
5190 )
5191 else:
5192 network_task = self.get_task_from_response(response.text)
5193 self.logger.debug(
5194 "Create Network REST : Waiting for Network creation complete"
5195 )
5196 time.sleep(5)
5197 result = self.client.get_task_monitor().wait_for_success(
5198 task=network_task
5199 )
5200
5201 if result.get("status") == "success":
5202 return response.text
5203 else:
5204 self.logger.debug(
5205 "create_network_rest task failed. Network Create response : {}".format(
5206 response.text
5207 )
5208 )
5209 except Exception as exp:
5210 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
5211
5212 return None
5213
5214 def convert_cidr_to_netmask(self, cidr_ip=None):
5215 """
5216 Method sets convert CIDR netmask address to normal IP format
5217 Args:
5218 cidr_ip : CIDR IP address
5219 Returns:
5220 netmask : Converted netmask
5221 """
5222 if cidr_ip is not None:
5223 if "/" in cidr_ip:
5224 _, net_bits = cidr_ip.split("/")
5225 netmask = socket.inet_ntoa(
5226 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
5227 )
5228 else:
5229 netmask = cidr_ip
5230
5231 return netmask
5232
5233 return None
5234
5235 def get_provider_rest(self, vca=None):
5236 """
5237 Method gets provider vdc view from vcloud director
5238
5239 Args:
5240 network_name - is network name to be created.
5241 parent_network_uuid - is parent provider vdc network that will be used for mapping.
5242 It optional attribute. by default if no parent network indicate the first available will be used.
5243
5244 Returns:
5245 The return xml content of respond or None
5246 """
5247 url_list = [self.url, "/api/admin"]
5248
5249 if vca:
5250 headers = {
5251 "Accept": "application/*+xml;version=" + API_VERSION,
5252 "x-vcloud-authorization": self.client._session.headers[
5253 "x-vcloud-authorization"
5254 ],
5255 }
5256 response = self.perform_request(
5257 req_type="GET", url="".join(url_list), headers=headers
5258 )
5259
5260 if response.status_code == requests.codes.ok:
5261 return response.text
5262
5263 return None
5264
5265 def create_vdc(self, vdc_name=None):
5266 vdc_dict = {}
5267 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
5268
5269 if xml_content is not None:
5270 try:
5271 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
5272 for child in task_resp_xmlroot:
5273 if child.tag.split("}")[1] == "Owner":
5274 vdc_id = child.attrib.get("href").split("/")[-1]
5275 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
5276
5277 return vdc_dict
5278 except Exception:
5279 self.logger.debug("Respond body {}".format(xml_content))
5280
5281 return None
5282
5283 def create_vdc_from_tmpl_rest(self, vdc_name=None):
5284 """
5285 Method create vdc in vCloud director based on VDC template.
5286 it uses pre-defined template.
5287
5288 Args:
5289 vdc_name - name of a new vdc.
5290
5291 Returns:
5292 The return xml content of respond or None
5293 """
5294 # pre-requesite atleast one vdc template should be available in vCD
5295 self.logger.info("Creating new vdc {}".format(vdc_name))
5296 vca = self.connect_as_admin()
5297
5298 if not vca:
5299 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5300
5301 if vdc_name is None:
5302 return None
5303
5304 url_list = [self.url, "/api/vdcTemplates"]
5305 vm_list_rest_call = "".join(url_list)
5306 headers = {
5307 "Accept": "application/*+xml;version=" + API_VERSION,
5308 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
5309 }
5310 response = self.perform_request(
5311 req_type="GET", url=vm_list_rest_call, headers=headers
5312 )
5313
5314 # container url to a template
5315 vdc_template_ref = None
5316 try:
5317 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5318 for child in vm_list_xmlroot:
5319 # application/vnd.vmware.admin.providervdc+xml
5320 # we need find a template from witch we instantiate VDC
5321 if child.tag.split("}")[1] == "VdcTemplate":
5322 if (
5323 child.attrib.get("type")
5324 == "application/vnd.vmware.admin.vdcTemplate+xml"
5325 ):
5326 vdc_template_ref = child.attrib.get("href")
5327 except Exception:
5328 self.logger.debug(
5329 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5330 )
5331 self.logger.debug("Respond body {}".format(response.text))
5332
5333 return None
5334
5335 # if we didn't found required pre defined template we return None
5336 if vdc_template_ref is None:
5337 return None
5338
5339 try:
5340 # instantiate vdc
5341 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
5342 vm_list_rest_call = "".join(url_list)
5343 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5344 <Source href="{1:s}"></Source>
5345 <Description>opnemano</Description>
5346 </InstantiateVdcTemplateParams>""".format(
5347 vdc_name, vdc_template_ref
5348 )
5349 headers[
5350 "Content-Type"
5351 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
5352 response = self.perform_request(
5353 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
5354 )
5355 vdc_task = self.get_task_from_response(response.text)
5356 self.client.get_task_monitor().wait_for_success(task=vdc_task)
5357
5358 # if we all ok we respond with content otherwise by default None
5359 if response.status_code >= 200 and response.status_code < 300:
5360 return response.text
5361
5362 return None
5363 except Exception:
5364 self.logger.debug(
5365 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5366 )
5367 self.logger.debug("Respond body {}".format(response.text))
5368
5369 return None
5370
5371 def create_vdc_rest(self, vdc_name=None):
5372 """
5373 Method create network in vCloud director
5374
5375 Args:
5376 vdc_name - vdc name to be created
5377 Returns:
5378 The return response
5379 """
5380 self.logger.info("Creating new vdc {}".format(vdc_name))
5381 vca = self.connect_as_admin()
5382
5383 if not vca:
5384 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5385
5386 if vdc_name is None:
5387 return None
5388
5389 url_list = [self.url, "/api/admin/org/", self.org_uuid]
5390 vm_list_rest_call = "".join(url_list)
5391
5392 if vca._session:
5393 headers = {
5394 "Accept": "application/*+xml;version=" + API_VERSION,
5395 "x-vcloud-authorization": self.client._session.headers[
5396 "x-vcloud-authorization"
5397 ],
5398 }
5399 response = self.perform_request(
5400 req_type="GET", url=vm_list_rest_call, headers=headers
5401 )
5402 provider_vdc_ref = None
5403 add_vdc_rest_url = None
5404 # available_networks = None
5405
5406 if response.status_code != requests.codes.ok:
5407 self.logger.debug(
5408 "REST API call {} failed. Return status code {}".format(
5409 vm_list_rest_call, response.status_code
5410 )
5411 )
5412
5413 return None
5414 else:
5415 try:
5416 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5417 for child in vm_list_xmlroot:
5418 # application/vnd.vmware.admin.providervdc+xml
5419 if child.tag.split("}")[1] == "Link":
5420 if (
5421 child.attrib.get("type")
5422 == "application/vnd.vmware.admin.createVdcParams+xml"
5423 and child.attrib.get("rel") == "add"
5424 ):
5425 add_vdc_rest_url = child.attrib.get("href")
5426 except Exception:
5427 self.logger.debug(
5428 "Failed parse respond for rest api call {}".format(
5429 vm_list_rest_call
5430 )
5431 )
5432 self.logger.debug("Respond body {}".format(response.text))
5433
5434 return None
5435
5436 response = self.get_provider_rest(vca=vca)
5437 try:
5438 vm_list_xmlroot = XmlElementTree.fromstring(response)
5439 for child in vm_list_xmlroot:
5440 if child.tag.split("}")[1] == "ProviderVdcReferences":
5441 for sub_child in child:
5442 provider_vdc_ref = sub_child.attrib.get("href")
5443 except Exception:
5444 self.logger.debug(
5445 "Failed parse respond for rest api call {}".format(
5446 vm_list_rest_call
5447 )
5448 )
5449 self.logger.debug("Respond body {}".format(response))
5450
5451 return None
5452
5453 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
5454 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
5455 <AllocationModel>ReservationPool</AllocationModel>
5456 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
5457 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
5458 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
5459 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
5460 <ProviderVdcReference
5461 name="Main Provider"
5462 href="{2:s}" />
5463 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
5464 escape(vdc_name), escape(vdc_name), provider_vdc_ref
5465 )
5466 headers[
5467 "Content-Type"
5468 ] = "application/vnd.vmware.admin.createVdcParams+xml"
5469 response = self.perform_request(
5470 req_type="POST",
5471 url=add_vdc_rest_url,
5472 headers=headers,
5473 data=data,
5474 )
5475
5476 # if we all ok we respond with content otherwise by default None
5477 if response.status_code == 201:
5478 return response.text
5479
5480 return None
5481
5482 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5483 """
5484 Method retrieve vapp detail from vCloud director
5485
5486 Args:
5487 vapp_uuid - is vapp identifier.
5488
5489 Returns:
5490 The return network uuid or return None
5491 """
5492 parsed_respond = {}
5493 vca = None
5494
5495 if need_admin_access:
5496 vca = self.connect_as_admin()
5497 else:
5498 vca = self.client
5499
5500 if not vca:
5501 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5502 if vapp_uuid is None:
5503 return None
5504
5505 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5506 get_vapp_restcall = "".join(url_list)
5507
5508 if vca._session:
5509 headers = {
5510 "Accept": "application/*+xml;version=" + API_VERSION,
5511 "x-vcloud-authorization": vca._session.headers[
5512 "x-vcloud-authorization"
5513 ],
5514 }
5515 response = self.perform_request(
5516 req_type="GET", url=get_vapp_restcall, headers=headers
5517 )
5518
5519 if response.status_code == 403:
5520 if need_admin_access is False:
5521 response = self.retry_rest("GET", get_vapp_restcall)
5522
5523 if response.status_code != requests.codes.ok:
5524 self.logger.debug(
5525 "REST API call {} failed. Return status code {}".format(
5526 get_vapp_restcall, response.status_code
5527 )
5528 )
5529
5530 return parsed_respond
5531
5532 try:
5533 xmlroot_respond = XmlElementTree.fromstring(response.text)
5534 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5535 "ovfDescriptorUploaded"
5536 ]
5537 namespaces = {
5538 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5539 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5540 "vmw": "http://www.vmware.com/schema/ovf",
5541 "vm": "http://www.vmware.com/vcloud/v1.5",
5542 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5543 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5544 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5545 }
5546
5547 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5548 if created_section is not None:
5549 parsed_respond["created"] = created_section.text
5550
5551 network_section = xmlroot_respond.find(
5552 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5553 )
5554 if (
5555 network_section is not None
5556 and "networkName" in network_section.attrib
5557 ):
5558 parsed_respond["networkname"] = network_section.attrib[
5559 "networkName"
5560 ]
5561
5562 ipscopes_section = xmlroot_respond.find(
5563 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5564 namespaces,
5565 )
5566 if ipscopes_section is not None:
5567 for ipscope in ipscopes_section:
5568 for scope in ipscope:
5569 tag_key = scope.tag.split("}")[1]
5570 if tag_key == "IpRanges":
5571 ip_ranges = scope.getchildren()
5572 for ipblock in ip_ranges:
5573 for block in ipblock:
5574 parsed_respond[
5575 block.tag.split("}")[1]
5576 ] = block.text
5577 else:
5578 parsed_respond[tag_key] = scope.text
5579
5580 # parse children section for other attrib
5581 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5582 if children_section is not None:
5583 parsed_respond["name"] = children_section.attrib["name"]
5584 parsed_respond["nestedHypervisorEnabled"] = (
5585 children_section.attrib["nestedHypervisorEnabled"]
5586 if "nestedHypervisorEnabled" in children_section.attrib
5587 else None
5588 )
5589 parsed_respond["deployed"] = children_section.attrib["deployed"]
5590 parsed_respond["status"] = children_section.attrib["status"]
5591 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5592 -1
5593 ]
5594 network_adapter = children_section.find(
5595 "vm:NetworkConnectionSection", namespaces
5596 )
5597 nic_list = []
5598 for adapters in network_adapter:
5599 adapter_key = adapters.tag.split("}")[1]
5600 if adapter_key == "PrimaryNetworkConnectionIndex":
5601 parsed_respond["primarynetwork"] = adapters.text
5602
5603 if adapter_key == "NetworkConnection":
5604 vnic = {}
5605 if "network" in adapters.attrib:
5606 vnic["network"] = adapters.attrib["network"]
5607 for adapter in adapters:
5608 setting_key = adapter.tag.split("}")[1]
5609 vnic[setting_key] = adapter.text
5610 nic_list.append(vnic)
5611
5612 for link in children_section:
5613 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5614 if link.attrib["rel"] == "screen:acquireTicket":
5615 parsed_respond["acquireTicket"] = link.attrib
5616
5617 if link.attrib["rel"] == "screen:acquireMksTicket":
5618 parsed_respond["acquireMksTicket"] = link.attrib
5619
5620 parsed_respond["interfaces"] = nic_list
5621 vCloud_extension_section = children_section.find(
5622 "xmlns:VCloudExtension", namespaces
5623 )
5624 if vCloud_extension_section is not None:
5625 vm_vcenter_info = {}
5626 vim_info = vCloud_extension_section.find(
5627 "vmext:VmVimInfo", namespaces
5628 )
5629 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5630
5631 if vmext is not None:
5632 vm_vcenter_info["vm_moref_id"] = vmext.find(
5633 "vmext:MoRef", namespaces
5634 ).text
5635
5636 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5637
5638 virtual_hardware_section = children_section.find(
5639 "ovf:VirtualHardwareSection", namespaces
5640 )
5641 vm_virtual_hardware_info = {}
5642 if virtual_hardware_section is not None:
5643 for item in virtual_hardware_section.iterfind(
5644 "ovf:Item", namespaces
5645 ):
5646 if (
5647 item.find("rasd:Description", namespaces).text
5648 == "Hard disk"
5649 ):
5650 disk_size = item.find(
5651 "rasd:HostResource", namespaces
5652 ).attrib["{" + namespaces["vm"] + "}capacity"]
5653 vm_virtual_hardware_info["disk_size"] = disk_size
5654 break
5655
5656 for link in virtual_hardware_section:
5657 if (
5658 link.tag.split("}")[1] == "Link"
5659 and "rel" in link.attrib
5660 ):
5661 if link.attrib["rel"] == "edit" and link.attrib[
5662 "href"
5663 ].endswith("/disks"):
5664 vm_virtual_hardware_info[
5665 "disk_edit_href"
5666 ] = link.attrib["href"]
5667 break
5668
5669 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5670 except Exception as exp:
5671 self.logger.info(
5672 "Error occurred calling rest api for getting vApp details {}".format(
5673 exp
5674 )
5675 )
5676
5677 return parsed_respond
5678
5679 def acquire_console(self, vm_uuid=None):
5680 if vm_uuid is None:
5681 return None
5682
5683 if self.client._session:
5684 headers = {
5685 "Accept": "application/*+xml;version=" + API_VERSION,
5686 "x-vcloud-authorization": self.client._session.headers[
5687 "x-vcloud-authorization"
5688 ],
5689 }
5690 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
5691 console_dict = vm_dict["acquireTicket"]
5692 console_rest_call = console_dict["href"]
5693
5694 response = self.perform_request(
5695 req_type="POST", url=console_rest_call, headers=headers
5696 )
5697
5698 if response.status_code == 403:
5699 response = self.retry_rest("POST", console_rest_call)
5700
5701 if response.status_code == requests.codes.ok:
5702 return response.text
5703
5704 return None
5705
5706 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5707 """
5708 Method retrieve vm disk details
5709
5710 Args:
5711 vapp_uuid - is vapp identifier.
5712 flavor_disk - disk size as specified in VNFD (flavor)
5713
5714 Returns:
5715 The return network uuid or return None
5716 """
5717 status = None
5718 try:
5719 # Flavor disk is in GB convert it into MB
5720 flavor_disk = int(flavor_disk) * 1024
5721 vm_details = self.get_vapp_details_rest(vapp_uuid)
5722
5723 if vm_details:
5724 vm_name = vm_details["name"]
5725 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5726
5727 if vm_details and "vm_virtual_hardware" in vm_details:
5728 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5729 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5730 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5731
5732 if flavor_disk > vm_disk:
5733 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5734 self.logger.info(
5735 "Modify disk of VM {} from {} to {} MB".format(
5736 vm_name, vm_disk, flavor_disk
5737 )
5738 )
5739 else:
5740 status = True
5741 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5742
5743 return status
5744 except Exception as exp:
5745 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5746
5747 def modify_vm_disk_rest(self, disk_href, disk_size):
5748 """
5749 Method retrieve modify vm disk size
5750
5751 Args:
5752 disk_href - vCD API URL to GET and PUT disk data
5753 disk_size - disk size as specified in VNFD (flavor)
5754
5755 Returns:
5756 The return network uuid or return None
5757 """
5758 if disk_href is None or disk_size is None:
5759 return None
5760
5761 if self.client._session:
5762 headers = {
5763 "Accept": "application/*+xml;version=" + API_VERSION,
5764 "x-vcloud-authorization": self.client._session.headers[
5765 "x-vcloud-authorization"
5766 ],
5767 }
5768 response = self.perform_request(
5769 req_type="GET", url=disk_href, headers=headers
5770 )
5771
5772 if response.status_code == 403:
5773 response = self.retry_rest("GET", disk_href)
5774
5775 if response.status_code != requests.codes.ok:
5776 self.logger.debug(
5777 "GET REST API call {} failed. Return status code {}".format(
5778 disk_href, response.status_code
5779 )
5780 )
5781
5782 return None
5783
5784 try:
5785 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5786 namespaces = {
5787 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5788 }
5789 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5790
5791 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5792 if item.find("rasd:Description", namespaces).text == "Hard disk":
5793 disk_item = item.find("rasd:HostResource", namespaces)
5794 if disk_item is not None:
5795 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5796 disk_size
5797 )
5798 break
5799
5800 data = lxmlElementTree.tostring(
5801 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5802 )
5803
5804 # Send PUT request to modify disk size
5805 headers[
5806 "Content-Type"
5807 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5808
5809 response = self.perform_request(
5810 req_type="PUT", url=disk_href, headers=headers, data=data
5811 )
5812 if response.status_code == 403:
5813 add_headers = {"Content-Type": headers["Content-Type"]}
5814 response = self.retry_rest("PUT", disk_href, add_headers, data)
5815
5816 if response.status_code != 202:
5817 self.logger.debug(
5818 "PUT REST API call {} failed. Return status code {}".format(
5819 disk_href, response.status_code
5820 )
5821 )
5822 else:
5823 modify_disk_task = self.get_task_from_response(response.text)
5824 result = self.client.get_task_monitor().wait_for_success(
5825 task=modify_disk_task
5826 )
5827 if result.get("status") == "success":
5828 return True
5829 else:
5830 return False
5831
5832 return None
5833 except Exception as exp:
5834 self.logger.info(
5835 "Error occurred calling rest api for modifing disk size {}".format(exp)
5836 )
5837
5838 return None
5839
5840 def add_serial_device(self, vapp_uuid):
5841 """
5842 Method to attach a serial device to a VM
5843
5844 Args:
5845 vapp_uuid - uuid of vApp/VM
5846
5847 Returns:
5848 """
5849 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5850 _, content = self.get_vcenter_content()
5851 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5852
5853 if vm_moref_id:
5854 try:
5855 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5856 self.logger.info(
5857 "VM {} is currently on host {}".format(vm_obj, host_obj)
5858 )
5859 if host_obj and vm_obj:
5860 spec = vim.vm.ConfigSpec()
5861 spec.deviceChange = []
5862 serial_spec = vim.vm.device.VirtualDeviceSpec()
5863 serial_spec.operation = "add"
5864 serial_port = vim.vm.device.VirtualSerialPort()
5865 serial_port.yieldOnPoll = True
5866 backing = serial_port.URIBackingInfo()
5867 backing.serviceURI = "tcp://:65500"
5868 backing.direction = "server"
5869 serial_port.backing = backing
5870 serial_spec.device = serial_port
5871 spec.deviceChange.append(serial_spec)
5872 vm_obj.ReconfigVM_Task(spec=spec)
5873 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5874 except vmodl.MethodFault as error:
5875 self.logger.error("Error occurred while adding PCI devices {} ", error)
5876
5877 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5878 """
5879 Method to attach pci devices to VM
5880
5881 Args:
5882 vapp_uuid - uuid of vApp/VM
5883 pci_devices - pci devices infromation as specified in VNFD (flavor)
5884
5885 Returns:
5886 The status of add pci device task , vm object and
5887 vcenter_conect object
5888 """
5889 vm_obj = None
5890 self.logger.info(
5891 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5892 )
5893 vcenter_conect, content = self.get_vcenter_content()
5894 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5895
5896 if vm_moref_id:
5897 try:
5898 no_of_pci_devices = len(pci_devices)
5899 if no_of_pci_devices > 0:
5900 # Get VM and its host
5901 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5902 self.logger.info(
5903 "VM {} is currently on host {}".format(vm_obj, host_obj)
5904 )
5905
5906 if host_obj and vm_obj:
5907 # get PCI devies from host on which vapp is currently installed
5908 avilable_pci_devices = self.get_pci_devices(
5909 host_obj, no_of_pci_devices
5910 )
5911
5912 if avilable_pci_devices is None:
5913 # find other hosts with active pci devices
5914 (
5915 new_host_obj,
5916 avilable_pci_devices,
5917 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5918
5919 if (
5920 new_host_obj is not None
5921 and avilable_pci_devices is not None
5922 and len(avilable_pci_devices) > 0
5923 ):
5924 # Migrate vm to the host where PCI devices are availble
5925 self.logger.info(
5926 "Relocate VM {} on new host {}".format(
5927 vm_obj, new_host_obj
5928 )
5929 )
5930
5931 task = self.relocate_vm(new_host_obj, vm_obj)
5932 if task is not None:
5933 result = self.wait_for_vcenter_task(
5934 task, vcenter_conect
5935 )
5936 self.logger.info(
5937 "Migrate VM status: {}".format(result)
5938 )
5939 host_obj = new_host_obj
5940 else:
5941 self.logger.info(
5942 "Fail to migrate VM : {}".format(result)
5943 )
5944 raise vimconn.VimConnNotFoundException(
5945 "Fail to migrate VM : {} to host {}".format(
5946 vmname_andid, new_host_obj
5947 )
5948 )
5949
5950 if (
5951 host_obj is not None
5952 and avilable_pci_devices is not None
5953 and len(avilable_pci_devices) > 0
5954 ):
5955 # Add PCI devices one by one
5956 for pci_device in avilable_pci_devices:
5957 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5958 if task:
5959 status = self.wait_for_vcenter_task(
5960 task, vcenter_conect
5961 )
5962
5963 if status:
5964 self.logger.info(
5965 "Added PCI device {} to VM {}".format(
5966 pci_device, str(vm_obj)
5967 )
5968 )
5969 else:
5970 self.logger.error(
5971 "Fail to add PCI device {} to VM {}".format(
5972 pci_device, str(vm_obj)
5973 )
5974 )
5975
5976 return True, vm_obj, vcenter_conect
5977 else:
5978 self.logger.error(
5979 "Currently there is no host with"
5980 " {} number of avaialble PCI devices required for VM {}".format(
5981 no_of_pci_devices, vmname_andid
5982 )
5983 )
5984
5985 raise vimconn.VimConnNotFoundException(
5986 "Currently there is no host with {} "
5987 "number of avaialble PCI devices required for VM {}".format(
5988 no_of_pci_devices, vmname_andid
5989 )
5990 )
5991 else:
5992 self.logger.debug(
5993 "No infromation about PCI devices {} ", pci_devices
5994 )
5995 except vmodl.MethodFault as error:
5996 self.logger.error("Error occurred while adding PCI devices {} ", error)
5997
5998 return None, vm_obj, vcenter_conect
5999
6000 def get_vm_obj(self, content, mob_id):
6001 """
6002 Method to get the vsphere VM object associated with a given morf ID
6003 Args:
6004 vapp_uuid - uuid of vApp/VM
6005 content - vCenter content object
6006 mob_id - mob_id of VM
6007
6008 Returns:
6009 VM and host object
6010 """
6011 vm_obj = None
6012 host_obj = None
6013
6014 try:
6015 container = content.viewManager.CreateContainerView(
6016 content.rootFolder, [vim.VirtualMachine], True
6017 )
6018 for vm in container.view:
6019 mobID = vm._GetMoId()
6020
6021 if mobID == mob_id:
6022 vm_obj = vm
6023 host_obj = vm_obj.runtime.host
6024 break
6025 except Exception as exp:
6026 self.logger.error("Error occurred while finding VM object : {}".format(exp))
6027
6028 return host_obj, vm_obj
6029
6030 def get_pci_devices(self, host, need_devices):
6031 """
6032 Method to get the details of pci devices on given host
6033 Args:
6034 host - vSphere host object
6035 need_devices - number of pci devices needed on host
6036
6037 Returns:
6038 array of pci devices
6039 """
6040 all_devices = []
6041 all_device_ids = []
6042 used_devices_ids = []
6043
6044 try:
6045 if host:
6046 pciPassthruInfo = host.config.pciPassthruInfo
6047 pciDevies = host.hardware.pciDevice
6048
6049 for pci_status in pciPassthruInfo:
6050 if pci_status.passthruActive:
6051 for device in pciDevies:
6052 if device.id == pci_status.id:
6053 all_device_ids.append(device.id)
6054 all_devices.append(device)
6055
6056 # check if devices are in use
6057 avalible_devices = all_devices
6058 for vm in host.vm:
6059 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
6060 vm_devices = vm.config.hardware.device
6061 for device in vm_devices:
6062 if type(device) is vim.vm.device.VirtualPCIPassthrough:
6063 if device.backing.id in all_device_ids:
6064 for use_device in avalible_devices:
6065 if use_device.id == device.backing.id:
6066 avalible_devices.remove(use_device)
6067
6068 used_devices_ids.append(device.backing.id)
6069 self.logger.debug(
6070 "Device {} from devices {}"
6071 "is in use".format(device.backing.id, device)
6072 )
6073 if len(avalible_devices) < need_devices:
6074 self.logger.debug(
6075 "Host {} don't have {} number of active devices".format(
6076 host, need_devices
6077 )
6078 )
6079 self.logger.debug(
6080 "found only {} devices {}".format(
6081 len(avalible_devices), avalible_devices
6082 )
6083 )
6084
6085 return None
6086 else:
6087 required_devices = avalible_devices[:need_devices]
6088 self.logger.info(
6089 "Found {} PCI devices on host {} but required only {}".format(
6090 len(avalible_devices), host, need_devices
6091 )
6092 )
6093 self.logger.info(
6094 "Retruning {} devices as {}".format(need_devices, required_devices)
6095 )
6096
6097 return required_devices
6098 except Exception as exp:
6099 self.logger.error(
6100 "Error {} occurred while finding pci devices on host: {}".format(
6101 exp, host
6102 )
6103 )
6104
6105 return None
6106
6107 def get_host_and_PCIdevices(self, content, need_devices):
6108 """
6109 Method to get the details of pci devices infromation on all hosts
6110
6111 Args:
6112 content - vSphere host object
6113 need_devices - number of pci devices needed on host
6114
6115 Returns:
6116 array of pci devices and host object
6117 """
6118 host_obj = None
6119 pci_device_objs = None
6120
6121 try:
6122 if content:
6123 container = content.viewManager.CreateContainerView(
6124 content.rootFolder, [vim.HostSystem], True
6125 )
6126 for host in container.view:
6127 devices = self.get_pci_devices(host, need_devices)
6128
6129 if devices:
6130 host_obj = host
6131 pci_device_objs = devices
6132 break
6133 except Exception as exp:
6134 self.logger.error(
6135 "Error {} occurred while finding pci devices on host: {}".format(
6136 exp, host_obj
6137 )
6138 )
6139
6140 return host_obj, pci_device_objs
6141
6142 def relocate_vm(self, dest_host, vm):
6143 """
6144 Method to get the relocate VM to new host
6145
6146 Args:
6147 dest_host - vSphere host object
6148 vm - vSphere VM object
6149
6150 Returns:
6151 task object
6152 """
6153 task = None
6154
6155 try:
6156 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
6157 task = vm.Relocate(relocate_spec)
6158 self.logger.info(
6159 "Migrating {} to destination host {}".format(vm, dest_host)
6160 )
6161 except Exception as exp:
6162 self.logger.error(
6163 "Error occurred while relocate VM {} to new host {}: {}".format(
6164 dest_host, vm, exp
6165 )
6166 )
6167
6168 return task
6169
6170 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
6171 """
6172 Waits and provides updates on a vSphere task
6173 """
6174 while task.info.state == vim.TaskInfo.State.running:
6175 time.sleep(2)
6176
6177 if task.info.state == vim.TaskInfo.State.success:
6178 if task.info.result is not None and not hideResult:
6179 self.logger.info(
6180 "{} completed successfully, result: {}".format(
6181 actionName, task.info.result
6182 )
6183 )
6184 else:
6185 self.logger.info("Task {} completed successfully.".format(actionName))
6186 else:
6187 self.logger.error(
6188 "{} did not complete successfully: {} ".format(
6189 actionName, task.info.error
6190 )
6191 )
6192
6193 return task.info.result
6194
6195 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
6196 """
6197 Method to add pci device in given VM
6198
6199 Args:
6200 host_object - vSphere host object
6201 vm_object - vSphere VM object
6202 host_pci_dev - host_pci_dev must be one of the devices from the
6203 host_object.hardware.pciDevice list
6204 which is configured as a PCI passthrough device
6205
6206 Returns:
6207 task object
6208 """
6209 task = None
6210
6211 if vm_object and host_object and host_pci_dev:
6212 try:
6213 # Add PCI device to VM
6214 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
6215 host=None
6216 ).pciPassthrough
6217 systemid_by_pciid = {
6218 item.pciDevice.id: item.systemId for item in pci_passthroughs
6219 }
6220
6221 if host_pci_dev.id not in systemid_by_pciid:
6222 self.logger.error(
6223 "Device {} is not a passthrough device ".format(host_pci_dev)
6224 )
6225 return None
6226
6227 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
6228 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
6229 deviceId=deviceId,
6230 id=host_pci_dev.id,
6231 systemId=systemid_by_pciid[host_pci_dev.id],
6232 vendorId=host_pci_dev.vendorId,
6233 deviceName=host_pci_dev.deviceName,
6234 )
6235
6236 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
6237 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
6238 new_device_config.operation = "add"
6239 vmConfigSpec = vim.vm.ConfigSpec()
6240 vmConfigSpec.deviceChange = [new_device_config]
6241 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
6242 self.logger.info(
6243 "Adding PCI device {} into VM {} from host {} ".format(
6244 host_pci_dev, vm_object, host_object
6245 )
6246 )
6247 except Exception as exp:
6248 self.logger.error(
6249 "Error occurred while adding pci devive {} to VM {}: {}".format(
6250 host_pci_dev, vm_object, exp
6251 )
6252 )
6253
6254 return task
6255
6256 def get_vm_vcenter_info(self):
6257 """
6258 Method to get details of vCenter and vm
6259
6260 Args:
6261 vapp_uuid - uuid of vApp or VM
6262
6263 Returns:
6264 Moref Id of VM and deails of vCenter
6265 """
6266 vm_vcenter_info = {}
6267
6268 if self.vcenter_ip is not None:
6269 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
6270 else:
6271 raise vimconn.VimConnException(
6272 message="vCenter IP is not provided."
6273 " Please provide vCenter IP while attaching datacenter "
6274 "to tenant in --config"
6275 )
6276
6277 if self.vcenter_port is not None:
6278 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
6279 else:
6280 raise vimconn.VimConnException(
6281 message="vCenter port is not provided."
6282 " Please provide vCenter port while attaching datacenter "
6283 "to tenant in --config"
6284 )
6285
6286 if self.vcenter_user is not None:
6287 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
6288 else:
6289 raise vimconn.VimConnException(
6290 message="vCenter user is not provided."
6291 " Please provide vCenter user while attaching datacenter "
6292 "to tenant in --config"
6293 )
6294
6295 if self.vcenter_password is not None:
6296 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
6297 else:
6298 raise vimconn.VimConnException(
6299 message="vCenter user password is not provided."
6300 " Please provide vCenter user password while attaching datacenter "
6301 "to tenant in --config"
6302 )
6303
6304 return vm_vcenter_info
6305
6306 def get_vm_pci_details(self, vmuuid):
6307 """
6308 Method to get VM PCI device details from vCenter
6309
6310 Args:
6311 vm_obj - vSphere VM object
6312
6313 Returns:
6314 dict of PCI devives attached to VM
6315
6316 """
6317 vm_pci_devices_info = {}
6318
6319 try:
6320 _, content = self.get_vcenter_content()
6321 vm_moref_id = self.get_vm_moref_id(vmuuid)
6322 if vm_moref_id:
6323 # Get VM and its host
6324 if content:
6325 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6326 if host_obj and vm_obj:
6327 vm_pci_devices_info["host_name"] = host_obj.name
6328 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
6329 0
6330 ].spec.ip.ipAddress
6331
6332 for device in vm_obj.config.hardware.device:
6333 if type(device) == vim.vm.device.VirtualPCIPassthrough:
6334 device_details = {
6335 "devide_id": device.backing.id,
6336 "pciSlotNumber": device.slotInfo.pciSlotNumber,
6337 }
6338 vm_pci_devices_info[
6339 device.deviceInfo.label
6340 ] = device_details
6341 else:
6342 self.logger.error(
6343 "Can not connect to vCenter while getting "
6344 "PCI devices infromationn"
6345 )
6346
6347 return vm_pci_devices_info
6348 except Exception as exp:
6349 self.logger.error(
6350 "Error occurred while getting VM information" " for VM : {}".format(exp)
6351 )
6352
6353 raise vimconn.VimConnException(message=exp)
6354
6355 def reserve_memory_for_all_vms(self, vapp, memory_mb):
6356 """
6357 Method to reserve memory for all VMs
6358 Args :
6359 vapp - VApp
6360 memory_mb - Memory in MB
6361 Returns:
6362 None
6363 """
6364 self.logger.info("Reserve memory for all VMs")
6365
6366 for vms in vapp.get_all_vms():
6367 vm_id = vms.get("id").split(":")[-1]
6368 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
6369 self.url, vm_id
6370 )
6371 headers = {
6372 "Accept": "application/*+xml;version=" + API_VERSION,
6373 "x-vcloud-authorization": self.client._session.headers[
6374 "x-vcloud-authorization"
6375 ],
6376 }
6377 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
6378 response = self.perform_request(
6379 req_type="GET", url=url_rest_call, headers=headers
6380 )
6381
6382 if response.status_code == 403:
6383 response = self.retry_rest("GET", url_rest_call)
6384
6385 if response.status_code != 200:
6386 self.logger.error(
6387 "REST call {} failed reason : {}"
6388 "status code : {}".format(
6389 url_rest_call, response.text, response.status_code
6390 )
6391 )
6392 raise vimconn.VimConnException(
6393 "reserve_memory_for_all_vms : Failed to get " "memory"
6394 )
6395
6396 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
6397 contentelem = lxmlElementTree.XML(bytexml)
6398 namespaces = {
6399 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
6400 }
6401 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6402
6403 # Find the reservation element in the response
6404 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
6405 for memelem in memelem_list:
6406 memelem.text = str(memory_mb)
6407
6408 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
6409
6410 response = self.perform_request(
6411 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6412 )
6413
6414 if response.status_code == 403:
6415 add_headers = {"Content-Type": headers["Content-Type"]}
6416 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6417
6418 if response.status_code != 202:
6419 self.logger.error(
6420 "REST call {} failed reason : {}"
6421 "status code : {} ".format(
6422 url_rest_call, response.text, response.status_code
6423 )
6424 )
6425 raise vimconn.VimConnException(
6426 "reserve_memory_for_all_vms : Failed to update "
6427 "virtual hardware memory section"
6428 )
6429 else:
6430 mem_task = self.get_task_from_response(response.text)
6431 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
6432
6433 if result.get("status") == "success":
6434 self.logger.info(
6435 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
6436 )
6437 else:
6438 self.logger.error(
6439 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
6440 )
6441
6442 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
6443 """
6444 Configure VApp network config with org vdc network
6445 Args :
6446 vapp - VApp
6447 Returns:
6448 None
6449 """
6450
6451 self.logger.info(
6452 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
6453 )
6454
6455 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
6456 self.url, vapp_id
6457 )
6458
6459 headers = {
6460 "Accept": "application/*+xml;version=" + API_VERSION,
6461 "x-vcloud-authorization": self.client._session.headers[
6462 "x-vcloud-authorization"
6463 ],
6464 }
6465 response = self.perform_request(
6466 req_type="GET", url=url_rest_call, headers=headers
6467 )
6468
6469 if response.status_code == 403:
6470 response = self.retry_rest("GET", url_rest_call)
6471
6472 if response.status_code != 200:
6473 self.logger.error(
6474 "REST call {} failed reason : {}"
6475 "status code : {}".format(
6476 url_rest_call, response.text, response.status_code
6477 )
6478 )
6479 raise vimconn.VimConnException(
6480 "connect_vapp_to_org_vdc_network : Failed to get "
6481 "network config section"
6482 )
6483
6484 data = response.text
6485 headers[
6486 "Content-Type"
6487 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
6488 net_id = self.get_network_id_by_name(net_name)
6489 if not net_id:
6490 raise vimconn.VimConnException(
6491 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
6492 )
6493
6494 bytexml = bytes(bytearray(data, encoding="utf-8"))
6495 newelem = lxmlElementTree.XML(bytexml)
6496 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6497 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6498 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6499
6500 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6501 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6502 if parentnetworklist:
6503 for pn in parentnetworklist:
6504 if "href" not in pn.keys():
6505 id_val = pn.get("id")
6506 href_val = "{}/api/network/{}".format(self.url, id_val)
6507 pn.set("href", href_val)
6508
6509 newstr = """<NetworkConfig networkName="{}">
6510 <Configuration>
6511 <ParentNetwork href="{}/api/network/{}"/>
6512 <FenceMode>bridged</FenceMode>
6513 </Configuration>
6514 </NetworkConfig>
6515 """.format(
6516 net_name, self.url, net_id
6517 )
6518 newcfgelem = lxmlElementTree.fromstring(newstr)
6519 if nwcfglist:
6520 nwcfglist[0].addnext(newcfgelem)
6521
6522 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6523
6524 response = self.perform_request(
6525 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6526 )
6527
6528 if response.status_code == 403:
6529 add_headers = {"Content-Type": headers["Content-Type"]}
6530 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6531
6532 if response.status_code != 202:
6533 self.logger.error(
6534 "REST call {} failed reason : {}"
6535 "status code : {} ".format(
6536 url_rest_call, response.text, response.status_code
6537 )
6538 )
6539 raise vimconn.VimConnException(
6540 "connect_vapp_to_org_vdc_network : Failed to update "
6541 "network config section"
6542 )
6543 else:
6544 vapp_task = self.get_task_from_response(response.text)
6545 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6546 if result.get("status") == "success":
6547 self.logger.info(
6548 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6549 "network {}".format(vapp_id, net_name)
6550 )
6551 else:
6552 self.logger.error(
6553 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6554 "connect to network {}".format(vapp_id, net_name)
6555 )
6556
6557 def remove_primary_network_adapter_from_all_vms(self, vapp):
6558 """
6559 Method to remove network adapter type to vm
6560 Args :
6561 vapp - VApp
6562 Returns:
6563 None
6564 """
6565 self.logger.info("Removing network adapter from all VMs")
6566
6567 for vms in vapp.get_all_vms():
6568 vm_id = vms.get("id").split(":")[-1]
6569
6570 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6571 self.url, vm_id
6572 )
6573
6574 headers = {
6575 "Accept": "application/*+xml;version=" + API_VERSION,
6576 "x-vcloud-authorization": self.client._session.headers[
6577 "x-vcloud-authorization"
6578 ],
6579 }
6580 response = self.perform_request(
6581 req_type="GET", url=url_rest_call, headers=headers
6582 )
6583
6584 if response.status_code == 403:
6585 response = self.retry_rest("GET", url_rest_call)
6586
6587 if response.status_code != 200:
6588 self.logger.error(
6589 "REST call {} failed reason : {}"
6590 "status code : {}".format(
6591 url_rest_call, response.text, response.status_code
6592 )
6593 )
6594 raise vimconn.VimConnException(
6595 "remove_primary_network_adapter : Failed to get "
6596 "network connection section"
6597 )
6598
6599 data = response.text
6600 data = data.split('<Link rel="edit"')[0]
6601
6602 headers[
6603 "Content-Type"
6604 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6605
6606 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6607 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6608 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6609 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6610 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6611 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6612 xmlns:vmw="http://www.vmware.com/schema/ovf"
6613 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6614 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6615 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6616 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6617 ovf:required="false">
6618 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6619 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6620 <Link rel="edit" href="{url}"
6621 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6622 </NetworkConnectionSection>""".format(
6623 url=url_rest_call
6624 )
6625 response = self.perform_request(
6626 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6627 )
6628
6629 if response.status_code == 403:
6630 add_headers = {"Content-Type": headers["Content-Type"]}
6631 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6632
6633 if response.status_code != 202:
6634 self.logger.error(
6635 "REST call {} failed reason : {}"
6636 "status code : {} ".format(
6637 url_rest_call, response.text, response.status_code
6638 )
6639 )
6640 raise vimconn.VimConnException(
6641 "remove_primary_network_adapter : Failed to update "
6642 "network connection section"
6643 )
6644 else:
6645 nic_task = self.get_task_from_response(response.text)
6646 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6647 if result.get("status") == "success":
6648 self.logger.info(
6649 "remove_primary_network_adapter(): VM {} conneced to "
6650 "default NIC type".format(vm_id)
6651 )
6652 else:
6653 self.logger.error(
6654 "remove_primary_network_adapter(): VM {} failed to "
6655 "connect NIC type".format(vm_id)
6656 )
6657
6658 def add_network_adapter_to_vms(
6659 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6660 ):
6661 """
6662 Method to add network adapter type to vm
6663 Args :
6664 network_name - name of network
6665 primary_nic_index - int value for primary nic index
6666 nicIndex - int value for nic index
6667 nic_type - specify model name to which add to vm
6668 Returns:
6669 None
6670 """
6671
6672 self.logger.info(
6673 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6674 network_name, nicIndex, nic_type
6675 )
6676 )
6677 try:
6678 ip_address = None
6679 floating_ip = False
6680 mac_address = None
6681 if "floating_ip" in net:
6682 floating_ip = net["floating_ip"]
6683
6684 # Stub for ip_address feature
6685 if "ip_address" in net:
6686 ip_address = net["ip_address"]
6687
6688 if "mac_address" in net:
6689 mac_address = net["mac_address"]
6690
6691 if floating_ip:
6692 allocation_mode = "POOL"
6693 elif ip_address:
6694 allocation_mode = "MANUAL"
6695 else:
6696 allocation_mode = "DHCP"
6697
6698 if not nic_type:
6699 for vms in vapp.get_all_vms():
6700 vm_id = vms.get("id").split(":")[-1]
6701
6702 url_rest_call = (
6703 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6704 self.url, vm_id
6705 )
6706 )
6707
6708 headers = {
6709 "Accept": "application/*+xml;version=" + API_VERSION,
6710 "x-vcloud-authorization": self.client._session.headers[
6711 "x-vcloud-authorization"
6712 ],
6713 }
6714 response = self.perform_request(
6715 req_type="GET", url=url_rest_call, headers=headers
6716 )
6717
6718 if response.status_code == 403:
6719 response = self.retry_rest("GET", url_rest_call)
6720
6721 if response.status_code != 200:
6722 self.logger.error(
6723 "REST call {} failed reason : {}"
6724 "status code : {}".format(
6725 url_rest_call, response.text, response.status_code
6726 )
6727 )
6728 raise vimconn.VimConnException(
6729 "add_network_adapter_to_vms : Failed to get "
6730 "network connection section"
6731 )
6732
6733 data = response.text
6734 data = data.split('<Link rel="edit"')[0]
6735 if "<PrimaryNetworkConnectionIndex>" not in data:
6736 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6737 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6738 <NetworkConnection network="{}">
6739 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6740 <IsConnected>true</IsConnected>
6741 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6742 </NetworkConnection>""".format(
6743 primary_nic_index, network_name, nicIndex, allocation_mode
6744 )
6745
6746 # Stub for ip_address feature
6747 if ip_address:
6748 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6749 item = item.replace(
6750 "</NetworkConnectionIndex>\n",
6751 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6752 )
6753
6754 if mac_address:
6755 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6756 item = item.replace(
6757 "</IsConnected>\n",
6758 "</IsConnected>\n{}\n".format(mac_tag),
6759 )
6760
6761 data = data.replace(
6762 "</ovf:Info>\n",
6763 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6764 )
6765 else:
6766 self.logger.debug("add_network_adapter PrimaryNIC in data")
6767 new_item = """<NetworkConnection network="{}">
6768 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6769 <IsConnected>true</IsConnected>
6770 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6771 </NetworkConnection>""".format(
6772 network_name, nicIndex, allocation_mode
6773 )
6774
6775 # Stub for ip_address feature
6776 if ip_address:
6777 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6778 new_item = new_item.replace(
6779 "</NetworkConnectionIndex>\n",
6780 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6781 )
6782
6783 if mac_address:
6784 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6785 new_item = new_item.replace(
6786 "</IsConnected>\n",
6787 "</IsConnected>\n{}\n".format(mac_tag),
6788 )
6789
6790 data = data + new_item + "</NetworkConnectionSection>"
6791
6792 headers[
6793 "Content-Type"
6794 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6795
6796 response = self.perform_request(
6797 req_type="PUT", url=url_rest_call, headers=headers, data=data
6798 )
6799
6800 if response.status_code == 403:
6801 add_headers = {"Content-Type": headers["Content-Type"]}
6802 response = self.retry_rest(
6803 "PUT", url_rest_call, add_headers, data
6804 )
6805
6806 if response.status_code != 202:
6807 self.logger.error(
6808 "REST call {} failed reason : {}"
6809 "status code : {} ".format(
6810 url_rest_call, response.text, response.status_code
6811 )
6812 )
6813 raise vimconn.VimConnException(
6814 "add_network_adapter_to_vms : Failed to update "
6815 "network connection section"
6816 )
6817 else:
6818 nic_task = self.get_task_from_response(response.text)
6819 result = self.client.get_task_monitor().wait_for_success(
6820 task=nic_task
6821 )
6822
6823 if result.get("status") == "success":
6824 self.logger.info(
6825 "add_network_adapter_to_vms(): VM {} conneced to "
6826 "default NIC type".format(vm_id)
6827 )
6828 else:
6829 self.logger.error(
6830 "add_network_adapter_to_vms(): VM {} failed to "
6831 "connect NIC type".format(vm_id)
6832 )
6833 else:
6834 for vms in vapp.get_all_vms():
6835 vm_id = vms.get("id").split(":")[-1]
6836
6837 url_rest_call = (
6838 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6839 self.url, vm_id
6840 )
6841 )
6842
6843 headers = {
6844 "Accept": "application/*+xml;version=" + API_VERSION,
6845 "x-vcloud-authorization": self.client._session.headers[
6846 "x-vcloud-authorization"
6847 ],
6848 }
6849 response = self.perform_request(
6850 req_type="GET", url=url_rest_call, headers=headers
6851 )
6852
6853 if response.status_code == 403:
6854 response = self.retry_rest("GET", url_rest_call)
6855
6856 if response.status_code != 200:
6857 self.logger.error(
6858 "REST call {} failed reason : {}"
6859 "status code : {}".format(
6860 url_rest_call, response.text, response.status_code
6861 )
6862 )
6863 raise vimconn.VimConnException(
6864 "add_network_adapter_to_vms : Failed to get "
6865 "network connection section"
6866 )
6867 data = response.text
6868 data = data.split('<Link rel="edit"')[0]
6869 vcd_netadapter_type = nic_type
6870
6871 if nic_type in ["SR-IOV", "VF"]:
6872 vcd_netadapter_type = "SRIOVETHERNETCARD"
6873
6874 if "<PrimaryNetworkConnectionIndex>" not in data:
6875 self.logger.debug(
6876 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6877 nic_type
6878 )
6879 )
6880 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6881 <NetworkConnection network="{}">
6882 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6883 <IsConnected>true</IsConnected>
6884 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6885 <NetworkAdapterType>{}</NetworkAdapterType>
6886 </NetworkConnection>""".format(
6887 primary_nic_index,
6888 network_name,
6889 nicIndex,
6890 allocation_mode,
6891 vcd_netadapter_type,
6892 )
6893
6894 # Stub for ip_address feature
6895 if ip_address:
6896 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6897 item = item.replace(
6898 "</NetworkConnectionIndex>\n",
6899 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6900 )
6901
6902 if mac_address:
6903 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6904 item = item.replace(
6905 "</IsConnected>\n",
6906 "</IsConnected>\n{}\n".format(mac_tag),
6907 )
6908
6909 data = data.replace(
6910 "</ovf:Info>\n",
6911 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6912 )
6913 else:
6914 self.logger.debug(
6915 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6916 nic_type
6917 )
6918 )
6919 new_item = """<NetworkConnection network="{}">
6920 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6921 <IsConnected>true</IsConnected>
6922 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6923 <NetworkAdapterType>{}</NetworkAdapterType>
6924 </NetworkConnection>""".format(
6925 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6926 )
6927
6928 # Stub for ip_address feature
6929 if ip_address:
6930 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6931 new_item = new_item.replace(
6932 "</NetworkConnectionIndex>\n",
6933 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6934 )
6935
6936 if mac_address:
6937 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6938 new_item = new_item.replace(
6939 "</IsConnected>\n",
6940 "</IsConnected>\n{}\n".format(mac_tag),
6941 )
6942
6943 data = data + new_item + "</NetworkConnectionSection>"
6944
6945 headers[
6946 "Content-Type"
6947 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6948
6949 response = self.perform_request(
6950 req_type="PUT", url=url_rest_call, headers=headers, data=data
6951 )
6952
6953 if response.status_code == 403:
6954 add_headers = {"Content-Type": headers["Content-Type"]}
6955 response = self.retry_rest(
6956 "PUT", url_rest_call, add_headers, data
6957 )
6958
6959 if response.status_code != 202:
6960 self.logger.error(
6961 "REST call {} failed reason : {}"
6962 "status code : {}".format(
6963 url_rest_call, response.text, response.status_code
6964 )
6965 )
6966 raise vimconn.VimConnException(
6967 "add_network_adapter_to_vms : Failed to update "
6968 "network connection section"
6969 )
6970 else:
6971 nic_task = self.get_task_from_response(response.text)
6972 result = self.client.get_task_monitor().wait_for_success(
6973 task=nic_task
6974 )
6975
6976 if result.get("status") == "success":
6977 self.logger.info(
6978 "add_network_adapter_to_vms(): VM {} "
6979 "conneced to NIC type {}".format(vm_id, nic_type)
6980 )
6981 else:
6982 self.logger.error(
6983 "add_network_adapter_to_vms(): VM {} "
6984 "failed to connect NIC type {}".format(vm_id, nic_type)
6985 )
6986 except Exception as exp:
6987 self.logger.error(
6988 "add_network_adapter_to_vms() : exception occurred "
6989 "while adding Network adapter"
6990 )
6991
6992 raise vimconn.VimConnException(message=exp)
6993
6994 def set_numa_affinity(self, vmuuid, paired_threads_id):
6995 """
6996 Method to assign numa affinity in vm configuration parammeters
6997 Args :
6998 vmuuid - vm uuid
6999 paired_threads_id - one or more virtual processor
7000 numbers
7001 Returns:
7002 return if True
7003 """
7004 try:
7005 vcenter_conect, content = self.get_vcenter_content()
7006 vm_moref_id = self.get_vm_moref_id(vmuuid)
7007 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7008
7009 if vm_obj:
7010 config_spec = vim.vm.ConfigSpec()
7011 config_spec.extraConfig = []
7012 opt = vim.option.OptionValue()
7013 opt.key = "numa.nodeAffinity"
7014 opt.value = str(paired_threads_id)
7015 config_spec.extraConfig.append(opt)
7016 task = vm_obj.ReconfigVM_Task(config_spec)
7017
7018 if task:
7019 self.wait_for_vcenter_task(task, vcenter_conect)
7020 extra_config = vm_obj.config.extraConfig
7021 flag = False
7022
7023 for opts in extra_config:
7024 if "numa.nodeAffinity" in opts.key:
7025 flag = True
7026 self.logger.info(
7027 "set_numa_affinity: Sucessfully assign numa affinity "
7028 "value {} for vm {}".format(opt.value, vm_obj)
7029 )
7030
7031 if flag:
7032 return
7033 else:
7034 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
7035 except Exception as exp:
7036 self.logger.error(
7037 "set_numa_affinity : exception occurred while setting numa affinity "
7038 "for VM {} : {}".format(vm_obj, vm_moref_id)
7039 )
7040
7041 raise vimconn.VimConnException(
7042 "set_numa_affinity : Error {} failed to assign numa "
7043 "affinity".format(exp)
7044 )
7045
7046 def cloud_init(self, vapp, cloud_config):
7047 """
7048 Method to inject ssh-key
7049 vapp - vapp object
7050 cloud_config a dictionary with:
7051 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
7052 'users': (optional) list of users to be inserted, each item is a dict with:
7053 'name': (mandatory) user name,
7054 'key-pairs': (optional) list of strings with the public key to be inserted to the user
7055 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
7056 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
7057 'config-files': (optional). List of files to be transferred. Each item is a dict with:
7058 'dest': (mandatory) string with the destination absolute path
7059 'encoding': (optional, by default text). Can be one of:
7060 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
7061 'content' (mandatory): string with the content of the file
7062 'permissions': (optional) string with file permissions, typically octal notation '0644'
7063 'owner': (optional) file owner, string with the format 'owner:group'
7064 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
7065 """
7066 try:
7067 if not isinstance(cloud_config, dict):
7068 raise Exception(
7069 "cloud_init : parameter cloud_config is not a dictionary"
7070 )
7071 else:
7072 key_pairs = []
7073 userdata = []
7074
7075 if "key-pairs" in cloud_config:
7076 key_pairs = cloud_config["key-pairs"]
7077
7078 if "users" in cloud_config:
7079 userdata = cloud_config["users"]
7080
7081 self.logger.debug("cloud_init : Guest os customization started..")
7082 customize_script = self.format_script(
7083 key_pairs=key_pairs, users_list=userdata
7084 )
7085 customize_script = customize_script.replace("&", "&amp;")
7086 self.guest_customization(vapp, customize_script)
7087 except Exception as exp:
7088 self.logger.error(
7089 "cloud_init : exception occurred while injecting " "ssh-key"
7090 )
7091
7092 raise vimconn.VimConnException(
7093 "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
7094 )
7095
7096 def format_script(self, key_pairs=[], users_list=[]):
7097 bash_script = """#!/bin/sh
7098 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
7099 if [ "$1" = "precustomization" ];then
7100 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
7101 """
7102
7103 keys = "\n".join(key_pairs)
7104 if keys:
7105 keys_data = """
7106 if [ ! -d /root/.ssh ];then
7107 mkdir /root/.ssh
7108 chown root:root /root/.ssh
7109 chmod 700 /root/.ssh
7110 touch /root/.ssh/authorized_keys
7111 chown root:root /root/.ssh/authorized_keys
7112 chmod 600 /root/.ssh/authorized_keys
7113 # make centos with selinux happy
7114 which restorecon && restorecon -Rv /root/.ssh
7115 else
7116 touch /root/.ssh/authorized_keys
7117 chown root:root /root/.ssh/authorized_keys
7118 chmod 600 /root/.ssh/authorized_keys
7119 fi
7120 echo '{key}' >> /root/.ssh/authorized_keys
7121 """.format(
7122 key=keys
7123 )
7124
7125 bash_script += keys_data
7126
7127 for user in users_list:
7128 if "name" in user:
7129 user_name = user["name"]
7130
7131 if "key-pairs" in user:
7132 user_keys = "\n".join(user["key-pairs"])
7133 else:
7134 user_keys = None
7135
7136 add_user_name = """
7137 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
7138 """.format(
7139 user_name=user_name
7140 )
7141
7142 bash_script += add_user_name
7143
7144 if user_keys:
7145 user_keys_data = """
7146 mkdir /home/{user_name}/.ssh
7147 chown {user_name}:{user_name} /home/{user_name}/.ssh
7148 chmod 700 /home/{user_name}/.ssh
7149 touch /home/{user_name}/.ssh/authorized_keys
7150 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
7151 chmod 600 /home/{user_name}/.ssh/authorized_keys
7152 # make centos with selinux happy
7153 which restorecon && restorecon -Rv /home/{user_name}/.ssh
7154 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
7155 """.format(
7156 user_name=user_name, user_key=user_keys
7157 )
7158 bash_script += user_keys_data
7159
7160 return bash_script + "\n\tfi"
7161
7162 def guest_customization(self, vapp, customize_script):
7163 """
7164 Method to customize guest os
7165 vapp - Vapp object
7166 customize_script - Customize script to be run at first boot of VM.
7167 """
7168 for vm in vapp.get_all_vms():
7169 vm_id = vm.get("id").split(":")[-1]
7170 vm_name = vm.get("name")
7171 vm_name = vm_name.replace("_", "-")
7172
7173 vm_customization_url = (
7174 "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
7175 )
7176 headers = {
7177 "Accept": "application/*+xml;version=" + API_VERSION,
7178 "x-vcloud-authorization": self.client._session.headers[
7179 "x-vcloud-authorization"
7180 ],
7181 }
7182
7183 headers[
7184 "Content-Type"
7185 ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
7186
7187 data = """<GuestCustomizationSection
7188 xmlns="http://www.vmware.com/vcloud/v1.5"
7189 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
7190 ovf:required="false" href="{}"
7191 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
7192 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
7193 <Enabled>true</Enabled>
7194 <ChangeSid>false</ChangeSid>
7195 <VirtualMachineId>{}</VirtualMachineId>
7196 <JoinDomainEnabled>false</JoinDomainEnabled>
7197 <UseOrgSettings>false</UseOrgSettings>
7198 <AdminPasswordEnabled>false</AdminPasswordEnabled>
7199 <AdminPasswordAuto>true</AdminPasswordAuto>
7200 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
7201 <AdminAutoLogonCount>0</AdminAutoLogonCount>
7202 <ResetPasswordRequired>false</ResetPasswordRequired>
7203 <CustomizationScript>{}</CustomizationScript>
7204 <ComputerName>{}</ComputerName>
7205 <Link href="{}"
7206 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
7207 </GuestCustomizationSection>
7208 """.format(
7209 vm_customization_url,
7210 vm_id,
7211 customize_script,
7212 vm_name,
7213 vm_customization_url,
7214 )
7215
7216 response = self.perform_request(
7217 req_type="PUT", url=vm_customization_url, headers=headers, data=data
7218 )
7219 if response.status_code == 202:
7220 guest_task = self.get_task_from_response(response.text)
7221 self.client.get_task_monitor().wait_for_success(task=guest_task)
7222 self.logger.info(
7223 "guest_customization : customized guest os task "
7224 "completed for VM {}".format(vm_name)
7225 )
7226 else:
7227 self.logger.error(
7228 "guest_customization : task for customized guest os"
7229 "failed for VM {}".format(vm_name)
7230 )
7231
7232 raise vimconn.VimConnException(
7233 "guest_customization : failed to perform"
7234 "guest os customization on VM {}".format(vm_name)
7235 )
7236
7237 def add_new_disk(self, vapp_uuid, disk_size):
7238 """
7239 Method to create an empty vm disk
7240
7241 Args:
7242 vapp_uuid - is vapp identifier.
7243 disk_size - size of disk to be created in GB
7244
7245 Returns:
7246 None
7247 """
7248 status = False
7249 vm_details = None
7250 try:
7251 # Disk size in GB, convert it into MB
7252 if disk_size is not None:
7253 disk_size_mb = int(disk_size) * 1024
7254 vm_details = self.get_vapp_details_rest(vapp_uuid)
7255
7256 if vm_details and "vm_virtual_hardware" in vm_details:
7257 self.logger.info(
7258 "Adding disk to VM: {} disk size:{}GB".format(
7259 vm_details["name"], disk_size
7260 )
7261 )
7262 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
7263 status = self.add_new_disk_rest(disk_href, disk_size_mb)
7264 except Exception as exp:
7265 msg = "Error occurred while creating new disk {}.".format(exp)
7266 self.rollback_newvm(vapp_uuid, msg)
7267
7268 if status:
7269 self.logger.info(
7270 "Added new disk to VM: {} disk size:{}GB".format(
7271 vm_details["name"], disk_size
7272 )
7273 )
7274 else:
7275 # If failed to add disk, delete VM
7276 msg = "add_new_disk: Failed to add new disk to {}".format(
7277 vm_details["name"]
7278 )
7279 self.rollback_newvm(vapp_uuid, msg)
7280
7281 def add_new_disk_rest(self, disk_href, disk_size_mb):
7282 """
7283 Retrives vApp Disks section & add new empty disk
7284
7285 Args:
7286 disk_href: Disk section href to addd disk
7287 disk_size_mb: Disk size in MB
7288
7289 Returns: Status of add new disk task
7290 """
7291 status = False
7292 if self.client._session:
7293 headers = {
7294 "Accept": "application/*+xml;version=" + API_VERSION,
7295 "x-vcloud-authorization": self.client._session.headers[
7296 "x-vcloud-authorization"
7297 ],
7298 }
7299 response = self.perform_request(
7300 req_type="GET", url=disk_href, headers=headers
7301 )
7302
7303 if response.status_code == 403:
7304 response = self.retry_rest("GET", disk_href)
7305
7306 if response.status_code != requests.codes.ok:
7307 self.logger.error(
7308 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
7309 disk_href, response.status_code
7310 )
7311 )
7312
7313 return status
7314
7315 try:
7316 # Find but type & max of instance IDs assigned to disks
7317 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
7318 namespaces = {
7319 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
7320 }
7321 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
7322 instance_id = 0
7323
7324 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
7325 if item.find("rasd:Description", namespaces).text == "Hard disk":
7326 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
7327
7328 if inst_id > instance_id:
7329 instance_id = inst_id
7330 disk_item = item.find("rasd:HostResource", namespaces)
7331 bus_subtype = disk_item.attrib[
7332 "{" + namespaces["xmlns"] + "}busSubType"
7333 ]
7334 bus_type = disk_item.attrib[
7335 "{" + namespaces["xmlns"] + "}busType"
7336 ]
7337
7338 instance_id = instance_id + 1
7339 new_item = """<Item>
7340 <rasd:Description>Hard disk</rasd:Description>
7341 <rasd:ElementName>New disk</rasd:ElementName>
7342 <rasd:HostResource
7343 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
7344 vcloud:capacity="{}"
7345 vcloud:busSubType="{}"
7346 vcloud:busType="{}"></rasd:HostResource>
7347 <rasd:InstanceID>{}</rasd:InstanceID>
7348 <rasd:ResourceType>17</rasd:ResourceType>
7349 </Item>""".format(
7350 disk_size_mb, bus_subtype, bus_type, instance_id
7351 )
7352
7353 new_data = response.text
7354 # Add new item at the bottom
7355 new_data = new_data.replace(
7356 "</Item>\n</RasdItemsList>",
7357 "</Item>\n{}\n</RasdItemsList>".format(new_item),
7358 )
7359
7360 # Send PUT request to modify virtual hardware section with new disk
7361 headers[
7362 "Content-Type"
7363 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
7364
7365 response = self.perform_request(
7366 req_type="PUT", url=disk_href, data=new_data, headers=headers
7367 )
7368
7369 if response.status_code == 403:
7370 add_headers = {"Content-Type": headers["Content-Type"]}
7371 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
7372
7373 if response.status_code != 202:
7374 self.logger.error(
7375 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
7376 disk_href, response.status_code, response.text
7377 )
7378 )
7379 else:
7380 add_disk_task = self.get_task_from_response(response.text)
7381 result = self.client.get_task_monitor().wait_for_success(
7382 task=add_disk_task
7383 )
7384
7385 if result.get("status") == "success":
7386 status = True
7387 else:
7388 self.logger.error(
7389 "Add new disk REST task failed to add {} MB disk".format(
7390 disk_size_mb
7391 )
7392 )
7393 except Exception as exp:
7394 self.logger.error(
7395 "Error occurred calling rest api for creating new disk {}".format(exp)
7396 )
7397
7398 return status
7399
7400 def add_existing_disk(
7401 self,
7402 catalogs=None,
7403 image_id=None,
7404 size=None,
7405 template_name=None,
7406 vapp_uuid=None,
7407 ):
7408 """
7409 Method to add existing disk to vm
7410 Args :
7411 catalogs - List of VDC catalogs
7412 image_id - Catalog ID
7413 template_name - Name of template in catalog
7414 vapp_uuid - UUID of vApp
7415 Returns:
7416 None
7417 """
7418 disk_info = None
7419 vcenter_conect, content = self.get_vcenter_content()
7420 # find moref-id of vm in image
7421 catalog_vm_info = self.get_vapp_template_details(
7422 catalogs=catalogs,
7423 image_id=image_id,
7424 )
7425
7426 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
7427 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
7428 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
7429 "vm_moref_id", None
7430 )
7431
7432 if catalog_vm_moref_id:
7433 self.logger.info(
7434 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
7435 )
7436 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
7437
7438 if catalog_vm_obj:
7439 # find existing disk
7440 disk_info = self.find_disk(catalog_vm_obj)
7441 else:
7442 exp_msg = "No VM with image id {} found".format(image_id)
7443 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7444 else:
7445 exp_msg = "No Image found with image ID {} ".format(image_id)
7446 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7447
7448 if disk_info:
7449 self.logger.info("Existing disk_info : {}".format(disk_info))
7450 # get VM
7451 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7452 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7453
7454 if vm_obj:
7455 status = self.add_disk(
7456 vcenter_conect=vcenter_conect,
7457 vm=vm_obj,
7458 disk_info=disk_info,
7459 size=size,
7460 vapp_uuid=vapp_uuid,
7461 )
7462
7463 if status:
7464 self.logger.info(
7465 "Disk from image id {} added to {}".format(
7466 image_id, vm_obj.config.name
7467 )
7468 )
7469 else:
7470 msg = "No disk found with image id {} to add in VM {}".format(
7471 image_id, vm_obj.config.name
7472 )
7473 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
7474
7475 def find_disk(self, vm_obj):
7476 """
7477 Method to find details of existing disk in VM
7478 Args:
7479 vm_obj - vCenter object of VM
7480 Returns:
7481 disk_info : dict of disk details
7482 """
7483 disk_info = {}
7484 if vm_obj:
7485 try:
7486 devices = vm_obj.config.hardware.device
7487
7488 for device in devices:
7489 if type(device) is vim.vm.device.VirtualDisk:
7490 if isinstance(
7491 device.backing,
7492 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
7493 ) and hasattr(device.backing, "fileName"):
7494 disk_info["full_path"] = device.backing.fileName
7495 disk_info["datastore"] = device.backing.datastore
7496 disk_info["capacityKB"] = device.capacityInKB
7497 break
7498 except Exception as exp:
7499 self.logger.error(
7500 "find_disk() : exception occurred while "
7501 "getting existing disk details :{}".format(exp)
7502 )
7503
7504 return disk_info
7505
7506 def add_disk(
7507 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
7508 ):
7509 """
7510 Method to add existing disk in VM
7511 Args :
7512 vcenter_conect - vCenter content object
7513 vm - vCenter vm object
7514 disk_info : dict of disk details
7515 Returns:
7516 status : status of add disk task
7517 """
7518 datastore = disk_info["datastore"] if "datastore" in disk_info else None
7519 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
7520 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
7521 if size is not None:
7522 # Convert size from GB to KB
7523 sizeKB = int(size) * 1024 * 1024
7524 # compare size of existing disk and user given size.Assign whicherver is greater
7525 self.logger.info(
7526 "Add Existing disk : sizeKB {} , capacityKB {}".format(
7527 sizeKB, capacityKB
7528 )
7529 )
7530
7531 if sizeKB > capacityKB:
7532 capacityKB = sizeKB
7533
7534 if datastore and fullpath and capacityKB:
7535 try:
7536 spec = vim.vm.ConfigSpec()
7537 # get all disks on a VM, set unit_number to the next available
7538 unit_number = 0
7539 for dev in vm.config.hardware.device:
7540 if hasattr(dev.backing, "fileName"):
7541 unit_number = int(dev.unitNumber) + 1
7542 # unit_number 7 reserved for scsi controller
7543
7544 if unit_number == 7:
7545 unit_number += 1
7546
7547 if isinstance(dev, vim.vm.device.VirtualDisk):
7548 # vim.vm.device.VirtualSCSIController
7549 controller_key = dev.controllerKey
7550
7551 self.logger.info(
7552 "Add Existing disk : unit number {} , controller key {}".format(
7553 unit_number, controller_key
7554 )
7555 )
7556 # add disk here
7557 dev_changes = []
7558 disk_spec = vim.vm.device.VirtualDeviceSpec()
7559 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7560 disk_spec.device = vim.vm.device.VirtualDisk()
7561 disk_spec.device.backing = (
7562 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
7563 )
7564 disk_spec.device.backing.thinProvisioned = True
7565 disk_spec.device.backing.diskMode = "persistent"
7566 disk_spec.device.backing.datastore = datastore
7567 disk_spec.device.backing.fileName = fullpath
7568
7569 disk_spec.device.unitNumber = unit_number
7570 disk_spec.device.capacityInKB = capacityKB
7571 disk_spec.device.controllerKey = controller_key
7572 dev_changes.append(disk_spec)
7573 spec.deviceChange = dev_changes
7574 task = vm.ReconfigVM_Task(spec=spec)
7575 status = self.wait_for_vcenter_task(task, vcenter_conect)
7576
7577 return status
7578 except Exception as exp:
7579 exp_msg = (
7580 "add_disk() : exception {} occurred while adding disk "
7581 "{} to vm {}".format(exp, fullpath, vm.config.name)
7582 )
7583 self.rollback_newvm(vapp_uuid, exp_msg)
7584 else:
7585 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
7586 disk_info
7587 )
7588 self.rollback_newvm(vapp_uuid, msg)
7589
7590 def get_vcenter_content(self):
7591 """
7592 Get the vsphere content object
7593 """
7594 try:
7595 vm_vcenter_info = self.get_vm_vcenter_info()
7596 except Exception as exp:
7597 self.logger.error(
7598 "Error occurred while getting vCenter infromationn"
7599 " for VM : {}".format(exp)
7600 )
7601
7602 raise vimconn.VimConnException(message=exp)
7603
7604 context = None
7605 if hasattr(ssl, "_create_unverified_context"):
7606 context = ssl._create_unverified_context()
7607
7608 vcenter_conect = SmartConnect(
7609 host=vm_vcenter_info["vm_vcenter_ip"],
7610 user=vm_vcenter_info["vm_vcenter_user"],
7611 pwd=vm_vcenter_info["vm_vcenter_password"],
7612 port=int(vm_vcenter_info["vm_vcenter_port"]),
7613 sslContext=context,
7614 )
7615 atexit.register(Disconnect, vcenter_conect)
7616 content = vcenter_conect.RetrieveContent()
7617
7618 return vcenter_conect, content
7619
7620 def get_vm_moref_id(self, vapp_uuid):
7621 """
7622 Get the moref_id of given VM
7623 """
7624 try:
7625 if vapp_uuid:
7626 vm_details = self.get_vapp_details_rest(
7627 vapp_uuid, need_admin_access=True
7628 )
7629
7630 if vm_details and "vm_vcenter_info" in vm_details:
7631 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
7632
7633 return vm_moref_id
7634 except Exception as exp:
7635 self.logger.error(
7636 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
7637 )
7638
7639 return None
7640
7641 def get_vapp_template_details(
7642 self, catalogs=None, image_id=None, template_name=None
7643 ):
7644 """
7645 Method to get vApp template details
7646 Args :
7647 catalogs - list of VDC catalogs
7648 image_id - Catalog ID to find
7649 template_name : template name in catalog
7650 Returns:
7651 parsed_respond : dict of vApp tempalte details
7652 """
7653 parsed_response = {}
7654
7655 vca = self.connect_as_admin()
7656 if not vca:
7657 raise vimconn.VimConnConnectionException("Failed to connect vCD")
7658
7659 try:
7660 org, _ = self.get_vdc_details()
7661 catalog = self.get_catalog_obj(image_id, catalogs)
7662 if catalog:
7663 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
7664 catalog_items = [items.attrib]
7665
7666 if len(catalog_items) == 1:
7667 headers = {
7668 "Accept": "application/*+xml;version=" + API_VERSION,
7669 "x-vcloud-authorization": vca._session.headers[
7670 "x-vcloud-authorization"
7671 ],
7672 }
7673 response = self.perform_request(
7674 req_type="GET",
7675 url=catalog_items[0].get("href"),
7676 headers=headers,
7677 )
7678 catalogItem = XmlElementTree.fromstring(response.text)
7679 entity = [
7680 child
7681 for child in catalogItem
7682 if child.get("type")
7683 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
7684 ][0]
7685 vapp_tempalte_href = entity.get("href")
7686 # get vapp details and parse moref id
7687
7688 namespaces = {
7689 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7690 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7691 "vmw": "http://www.vmware.com/schema/ovf",
7692 "vm": "http://www.vmware.com/vcloud/v1.5",
7693 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7694 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7695 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7696 }
7697
7698 if vca._session:
7699 response = self.perform_request(
7700 req_type="GET", url=vapp_tempalte_href, headers=headers
7701 )
7702
7703 if response.status_code != requests.codes.ok:
7704 self.logger.debug(
7705 "REST API call {} failed. Return status code {}".format(
7706 vapp_tempalte_href, response.status_code
7707 )
7708 )
7709 else:
7710 xmlroot_respond = XmlElementTree.fromstring(response.text)
7711 children_section = xmlroot_respond.find(
7712 "vm:Children/", namespaces
7713 )
7714
7715 if children_section is not None:
7716 vCloud_extension_section = children_section.find(
7717 "xmlns:VCloudExtension", namespaces
7718 )
7719
7720 if vCloud_extension_section is not None:
7721 vm_vcenter_info = {}
7722 vim_info = vCloud_extension_section.find(
7723 "vmext:VmVimInfo", namespaces
7724 )
7725 vmext = vim_info.find(
7726 "vmext:VmVimObjectRef", namespaces
7727 )
7728
7729 if vmext is not None:
7730 vm_vcenter_info["vm_moref_id"] = vmext.find(
7731 "vmext:MoRef", namespaces
7732 ).text
7733
7734 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7735 except Exception as exp:
7736 self.logger.info(
7737 "Error occurred calling rest api for getting vApp details {}".format(
7738 exp
7739 )
7740 )
7741
7742 return parsed_response
7743
7744 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7745 """
7746 Method to delete vApp
7747 Args :
7748 vapp_uuid - vApp UUID
7749 msg - Error message to be logged
7750 exp_type : Exception type
7751 Returns:
7752 None
7753 """
7754 if vapp_uuid:
7755 self.delete_vminstance(vapp_uuid)
7756 else:
7757 msg = "No vApp ID"
7758
7759 self.logger.error(msg)
7760
7761 if exp_type == "Genric":
7762 raise vimconn.VimConnException(msg)
7763 elif exp_type == "NotFound":
7764 raise vimconn.VimConnNotFoundException(message=msg)
7765
7766 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
7767 """
7768 Method to attach SRIOV adapters to VM
7769
7770 Args:
7771 vapp_uuid - uuid of vApp/VM
7772 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
7773 vmname_andid - vmname
7774
7775 Returns:
7776 The status of add SRIOV adapter task , vm object and
7777 vcenter_conect object
7778 """
7779 vm_obj = None
7780 vcenter_conect, content = self.get_vcenter_content()
7781 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7782
7783 if vm_moref_id:
7784 try:
7785 no_of_sriov_devices = len(sriov_nets)
7786 if no_of_sriov_devices > 0:
7787 # Get VM and its host
7788 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
7789 self.logger.info(
7790 "VM {} is currently on host {}".format(vm_obj, host_obj)
7791 )
7792
7793 if host_obj and vm_obj:
7794 # get SRIOV devies from host on which vapp is currently installed
7795 avilable_sriov_devices = self.get_sriov_devices(
7796 host_obj,
7797 no_of_sriov_devices,
7798 )
7799
7800 if len(avilable_sriov_devices) == 0:
7801 # find other hosts with active pci devices
7802 (
7803 new_host_obj,
7804 avilable_sriov_devices,
7805 ) = self.get_host_and_sriov_devices(
7806 content,
7807 no_of_sriov_devices,
7808 )
7809
7810 if (
7811 new_host_obj is not None
7812 and len(avilable_sriov_devices) > 0
7813 ):
7814 # Migrate vm to the host where SRIOV devices are available
7815 self.logger.info(
7816 "Relocate VM {} on new host {}".format(
7817 vm_obj, new_host_obj
7818 )
7819 )
7820 task = self.relocate_vm(new_host_obj, vm_obj)
7821
7822 if task is not None:
7823 result = self.wait_for_vcenter_task(
7824 task, vcenter_conect
7825 )
7826 self.logger.info(
7827 "Migrate VM status: {}".format(result)
7828 )
7829 host_obj = new_host_obj
7830 else:
7831 self.logger.info(
7832 "Fail to migrate VM : {}".format(result)
7833 )
7834
7835 raise vimconn.VimConnNotFoundException(
7836 "Fail to migrate VM : {} to host {}".format(
7837 vmname_andid, new_host_obj
7838 )
7839 )
7840
7841 if (
7842 host_obj is not None
7843 and avilable_sriov_devices is not None
7844 and len(avilable_sriov_devices) > 0
7845 ):
7846 # Add SRIOV devices one by one
7847 for sriov_net in sriov_nets:
7848 network_name = sriov_net.get("net_id")
7849 self.create_dvPort_group(network_name)
7850
7851 if (
7852 sriov_net.get("type") == "VF"
7853 or sriov_net.get("type") == "SR-IOV"
7854 ):
7855 # add vlan ID ,Modify portgroup for vlan ID
7856 self.configure_vlanID(
7857 content, vcenter_conect, network_name
7858 )
7859
7860 task = self.add_sriov_to_vm(
7861 content,
7862 vm_obj,
7863 host_obj,
7864 network_name,
7865 avilable_sriov_devices[0],
7866 )
7867
7868 if task:
7869 status = self.wait_for_vcenter_task(
7870 task, vcenter_conect
7871 )
7872
7873 if status:
7874 self.logger.info(
7875 "Added SRIOV {} to VM {}".format(
7876 no_of_sriov_devices, str(vm_obj)
7877 )
7878 )
7879 else:
7880 self.logger.error(
7881 "Fail to add SRIOV {} to VM {}".format(
7882 no_of_sriov_devices, str(vm_obj)
7883 )
7884 )
7885
7886 raise vimconn.VimConnUnexpectedResponse(
7887 "Fail to add SRIOV adapter in VM {}".format(
7888 str(vm_obj)
7889 )
7890 )
7891
7892 return True, vm_obj, vcenter_conect
7893 else:
7894 self.logger.error(
7895 "Currently there is no host with"
7896 " {} number of avaialble SRIOV "
7897 "VFs required for VM {}".format(
7898 no_of_sriov_devices, vmname_andid
7899 )
7900 )
7901
7902 raise vimconn.VimConnNotFoundException(
7903 "Currently there is no host with {} "
7904 "number of avaialble SRIOV devices required for VM {}".format(
7905 no_of_sriov_devices, vmname_andid
7906 )
7907 )
7908 else:
7909 self.logger.debug(
7910 "No infromation about SRIOV devices {} ", sriov_nets
7911 )
7912 except vmodl.MethodFault as error:
7913 self.logger.error("Error occurred while adding SRIOV {} ", error)
7914
7915 return None, vm_obj, vcenter_conect
7916
7917 def get_sriov_devices(self, host, no_of_vfs):
7918 """
7919 Method to get the details of SRIOV devices on given host
7920 Args:
7921 host - vSphere host object
7922 no_of_vfs - number of VFs needed on host
7923
7924 Returns:
7925 array of SRIOV devices
7926 """
7927 sriovInfo = []
7928
7929 if host:
7930 for device in host.config.pciPassthruInfo:
7931 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7932 if device.numVirtualFunction >= no_of_vfs:
7933 sriovInfo.append(device)
7934 break
7935
7936 return sriovInfo
7937
7938 def get_host_and_sriov_devices(self, content, no_of_vfs):
7939 """
7940 Method to get the details of SRIOV devices infromation on all hosts
7941
7942 Args:
7943 content - vSphere host object
7944 no_of_vfs - number of pci VFs needed on host
7945
7946 Returns:
7947 array of SRIOV devices and host object
7948 """
7949 host_obj = None
7950 sriov_device_objs = None
7951
7952 try:
7953 if content:
7954 container = content.viewManager.CreateContainerView(
7955 content.rootFolder, [vim.HostSystem], True
7956 )
7957
7958 for host in container.view:
7959 devices = self.get_sriov_devices(host, no_of_vfs)
7960
7961 if devices:
7962 host_obj = host
7963 sriov_device_objs = devices
7964 break
7965 except Exception as exp:
7966 self.logger.error(
7967 "Error {} occurred while finding SRIOV devices on host: {}".format(
7968 exp, host_obj
7969 )
7970 )
7971
7972 return host_obj, sriov_device_objs
7973
7974 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
7975 """
7976 Method to add SRIOV adapter to vm
7977
7978 Args:
7979 host_obj - vSphere host object
7980 vm_obj - vSphere vm object
7981 content - vCenter content object
7982 network_name - name of distributed virtaul portgroup
7983 sriov_device - SRIOV device info
7984
7985 Returns:
7986 task object
7987 """
7988 devices = []
7989 vnic_label = "sriov nic"
7990
7991 try:
7992 dvs_portgr = self.get_dvport_group(network_name)
7993 network_name = dvs_portgr.name
7994 nic = vim.vm.device.VirtualDeviceSpec()
7995 # VM device
7996 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7997 nic.device = vim.vm.device.VirtualSriovEthernetCard()
7998 nic.device.addressType = "assigned"
7999 # nic.device.key = 13016
8000 nic.device.deviceInfo = vim.Description()
8001 nic.device.deviceInfo.label = vnic_label
8002 nic.device.deviceInfo.summary = network_name
8003 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
8004
8005 nic.device.backing.network = self.get_obj(
8006 content, [vim.Network], network_name
8007 )
8008 nic.device.backing.deviceName = network_name
8009 nic.device.backing.useAutoDetect = False
8010 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
8011 nic.device.connectable.startConnected = True
8012 nic.device.connectable.allowGuestControl = True
8013
8014 nic.device.sriovBacking = (
8015 vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
8016 )
8017 nic.device.sriovBacking.physicalFunctionBacking = (
8018 vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
8019 )
8020 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
8021
8022 devices.append(nic)
8023 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
8024 task = vm_obj.ReconfigVM_Task(vmconf)
8025
8026 return task
8027 except Exception as exp:
8028 self.logger.error(
8029 "Error {} occurred while adding SRIOV adapter in VM: {}".format(
8030 exp, vm_obj
8031 )
8032 )
8033
8034 return None
8035
8036 def create_dvPort_group(self, network_name):
8037 """
8038 Method to create disributed virtual portgroup
8039
8040 Args:
8041 network_name - name of network/portgroup
8042
8043 Returns:
8044 portgroup key
8045 """
8046 try:
8047 new_network_name = [network_name, "-", str(uuid.uuid4())]
8048 network_name = "".join(new_network_name)
8049 vcenter_conect, content = self.get_vcenter_content()
8050
8051 dv_switch = self.get_obj(
8052 content, [vim.DistributedVirtualSwitch], self.dvs_name
8053 )
8054
8055 if dv_switch:
8056 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8057 dv_pg_spec.name = network_name
8058
8059 dv_pg_spec.type = (
8060 vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
8061 )
8062 dv_pg_spec.defaultPortConfig = (
8063 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8064 )
8065 dv_pg_spec.defaultPortConfig.securityPolicy = (
8066 vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
8067 )
8068 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
8069 vim.BoolPolicy(value=False)
8070 )
8071 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
8072 vim.BoolPolicy(value=False)
8073 )
8074 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
8075 value=False
8076 )
8077
8078 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
8079 self.wait_for_vcenter_task(task, vcenter_conect)
8080
8081 dvPort_group = self.get_obj(
8082 content, [vim.dvs.DistributedVirtualPortgroup], network_name
8083 )
8084
8085 if dvPort_group:
8086 self.logger.info(
8087 "Created disributed virtaul port group: {}".format(dvPort_group)
8088 )
8089 return dvPort_group.key
8090 else:
8091 self.logger.debug(
8092 "No disributed virtual switch found with name {}".format(
8093 network_name
8094 )
8095 )
8096
8097 except Exception as exp:
8098 self.logger.error(
8099 "Error occurred while creating disributed virtaul port group {}"
8100 " : {}".format(network_name, exp)
8101 )
8102
8103 return None
8104
8105 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
8106 """
8107 Method to reconfigure disributed virtual portgroup
8108
8109 Args:
8110 dvPort_group_name - name of disributed virtual portgroup
8111 content - vCenter content object
8112 config_info - disributed virtual portgroup configuration
8113
8114 Returns:
8115 task object
8116 """
8117 try:
8118 dvPort_group = self.get_dvport_group(dvPort_group_name)
8119
8120 if dvPort_group:
8121 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8122 dv_pg_spec.configVersion = dvPort_group.config.configVersion
8123 dv_pg_spec.defaultPortConfig = (
8124 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8125 )
8126
8127 if "vlanID" in config_info:
8128 dv_pg_spec.defaultPortConfig.vlan = (
8129 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
8130 )
8131 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
8132
8133 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
8134
8135 return task
8136 else:
8137 return None
8138 except Exception as exp:
8139 self.logger.error(
8140 "Error occurred while reconfiguraing disributed virtaul port group {}"
8141 " : {}".format(dvPort_group_name, exp)
8142 )
8143
8144 return None
8145
8146 def destroy_dvport_group(self, dvPort_group_name):
8147 """
8148 Method to destroy disributed virtual portgroup
8149
8150 Args:
8151 network_name - name of network/portgroup
8152
8153 Returns:
8154 True if portgroup successfully got deleted else false
8155 """
8156 vcenter_conect, _ = self.get_vcenter_content()
8157
8158 try:
8159 status = None
8160 dvPort_group = self.get_dvport_group(dvPort_group_name)
8161
8162 if dvPort_group:
8163 task = dvPort_group.Destroy_Task()
8164 status = self.wait_for_vcenter_task(task, vcenter_conect)
8165
8166 return status
8167 except vmodl.MethodFault as exp:
8168 self.logger.error(
8169 "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
8170 exp, dvPort_group_name
8171 )
8172 )
8173
8174 return None
8175
8176 def get_dvport_group(self, dvPort_group_name):
8177 """
8178 Method to get disributed virtual portgroup
8179
8180 Args:
8181 network_name - name of network/portgroup
8182
8183 Returns:
8184 portgroup object
8185 """
8186 _, content = self.get_vcenter_content()
8187 dvPort_group = None
8188
8189 try:
8190 container = content.viewManager.CreateContainerView(
8191 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
8192 )
8193
8194 for item in container.view:
8195 if item.key == dvPort_group_name:
8196 dvPort_group = item
8197 break
8198
8199 return dvPort_group
8200 except vmodl.MethodFault as exp:
8201 self.logger.error(
8202 "Caught vmodl fault {} for disributed virtual port group {}".format(
8203 exp, dvPort_group_name
8204 )
8205 )
8206
8207 return None
8208
8209 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
8210 """
8211 Method to get disributed virtual portgroup vlanID
8212
8213 Args:
8214 network_name - name of network/portgroup
8215
8216 Returns:
8217 vlan ID
8218 """
8219 vlanId = None
8220
8221 try:
8222 dvPort_group = self.get_dvport_group(dvPort_group_name)
8223
8224 if dvPort_group:
8225 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
8226 except vmodl.MethodFault as exp:
8227 self.logger.error(
8228 "Caught vmodl fault {} for disributed virtaul port group {}".format(
8229 exp, dvPort_group_name
8230 )
8231 )
8232
8233 return vlanId
8234
8235 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
8236 """
8237 Method to configure vlanID in disributed virtual portgroup vlanID
8238
8239 Args:
8240 network_name - name of network/portgroup
8241
8242 Returns:
8243 None
8244 """
8245 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
8246
8247 if vlanID == 0:
8248 # configure vlanID
8249 vlanID = self.genrate_vlanID(dvPort_group_name)
8250 config = {"vlanID": vlanID}
8251 task = self.reconfig_portgroup(
8252 content, dvPort_group_name, config_info=config
8253 )
8254
8255 if task:
8256 status = self.wait_for_vcenter_task(task, vcenter_conect)
8257
8258 if status:
8259 self.logger.info(
8260 "Reconfigured Port group {} for vlan ID {}".format(
8261 dvPort_group_name, vlanID
8262 )
8263 )
8264 else:
8265 self.logger.error(
8266 "Fail reconfigure portgroup {} for vlanID{}".format(
8267 dvPort_group_name, vlanID
8268 )
8269 )
8270
8271 def genrate_vlanID(self, network_name):
8272 """
8273 Method to get unused vlanID
8274 Args:
8275 network_name - name of network/portgroup
8276 Returns:
8277 vlanID
8278 """
8279 vlan_id = None
8280 used_ids = []
8281
8282 if self.config.get("vlanID_range") is None:
8283 raise vimconn.VimConnConflictException(
8284 "You must provide a 'vlanID_range' "
8285 "at config value before creating sriov network with vlan tag"
8286 )
8287
8288 if "used_vlanIDs" not in self.persistent_info:
8289 self.persistent_info["used_vlanIDs"] = {}
8290 else:
8291 used_ids = list(self.persistent_info["used_vlanIDs"].values())
8292
8293 for vlanID_range in self.config.get("vlanID_range"):
8294 start_vlanid, end_vlanid = vlanID_range.split("-")
8295
8296 if start_vlanid > end_vlanid:
8297 raise vimconn.VimConnConflictException(
8298 "Invalid vlan ID range {}".format(vlanID_range)
8299 )
8300
8301 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
8302 if vid not in used_ids:
8303 vlan_id = vid
8304 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
8305 return vlan_id
8306
8307 if vlan_id is None:
8308 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
8309
8310 def get_obj(self, content, vimtype, name):
8311 """
8312 Get the vsphere object associated with a given text name
8313 """
8314 obj = None
8315 container = content.viewManager.CreateContainerView(
8316 content.rootFolder, vimtype, True
8317 )
8318
8319 for item in container.view:
8320 if item.name == name:
8321 obj = item
8322 break
8323
8324 return obj
8325
8326 def insert_media_to_vm(self, vapp, image_id):
8327 """
8328 Method to insert media CD-ROM (ISO image) from catalog to vm.
8329 vapp - vapp object to get vm id
8330 Image_id - image id for cdrom to be inerted to vm
8331 """
8332 # create connection object
8333 vca = self.connect()
8334 try:
8335 # fetching catalog details
8336 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
8337
8338 if vca._session:
8339 headers = {
8340 "Accept": "application/*+xml;version=" + API_VERSION,
8341 "x-vcloud-authorization": vca._session.headers[
8342 "x-vcloud-authorization"
8343 ],
8344 }
8345 response = self.perform_request(
8346 req_type="GET", url=rest_url, headers=headers
8347 )
8348
8349 if response.status_code != 200:
8350 self.logger.error(
8351 "REST call {} failed reason : {}"
8352 "status code : {}".format(
8353 rest_url, response.text, response.status_code
8354 )
8355 )
8356
8357 raise vimconn.VimConnException(
8358 "insert_media_to_vm(): Failed to get " "catalog details"
8359 )
8360
8361 # searching iso name and id
8362 iso_name, media_id = self.get_media_details(vca, response.text)
8363
8364 if iso_name and media_id:
8365 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
8366 <ns6:MediaInsertOrEjectParams
8367 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
8368 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
8369 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
8370 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
8371 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
8372 xmlns:ns7="http://www.vmware.com/schema/ovf"
8373 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
8374 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
8375 <ns6:Media
8376 type="application/vnd.vmware.vcloud.media+xml"
8377 name="{}"
8378 id="urn:vcloud:media:{}"
8379 href="https://{}/api/media/{}"/>
8380 </ns6:MediaInsertOrEjectParams>""".format(
8381 iso_name, media_id, self.url, media_id
8382 )
8383
8384 for vms in vapp.get_all_vms():
8385 vm_id = vms.get("id").split(":")[-1]
8386
8387 headers[
8388 "Content-Type"
8389 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
8390 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
8391 self.url, vm_id
8392 )
8393
8394 response = self.perform_request(
8395 req_type="POST", url=rest_url, data=data, headers=headers
8396 )
8397
8398 if response.status_code != 202:
8399 error_msg = (
8400 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
8401 "Status code {}".format(response.text, response.status_code)
8402 )
8403 self.logger.error(error_msg)
8404
8405 raise vimconn.VimConnException(error_msg)
8406 else:
8407 task = self.get_task_from_response(response.text)
8408 result = self.client.get_task_monitor().wait_for_success(
8409 task=task
8410 )
8411
8412 if result.get("status") == "success":
8413 self.logger.info(
8414 "insert_media_to_vm(): Sucessfully inserted media ISO"
8415 " image to vm {}".format(vm_id)
8416 )
8417 except Exception as exp:
8418 self.logger.error(
8419 "insert_media_to_vm() : exception occurred "
8420 "while inserting media CD-ROM"
8421 )
8422
8423 raise vimconn.VimConnException(message=exp)
8424
8425 def get_media_details(self, vca, content):
8426 """
8427 Method to get catalog item details
8428 vca - connection object
8429 content - Catalog details
8430 Return - Media name, media id
8431 """
8432 cataloghref_list = []
8433 try:
8434 if content:
8435 vm_list_xmlroot = XmlElementTree.fromstring(content)
8436
8437 for child in vm_list_xmlroot.iter():
8438 if "CatalogItem" in child.tag:
8439 cataloghref_list.append(child.attrib.get("href"))
8440
8441 if cataloghref_list is not None:
8442 for href in cataloghref_list:
8443 if href:
8444 headers = {
8445 "Accept": "application/*+xml;version=" + API_VERSION,
8446 "x-vcloud-authorization": vca._session.headers[
8447 "x-vcloud-authorization"
8448 ],
8449 }
8450 response = self.perform_request(
8451 req_type="GET", url=href, headers=headers
8452 )
8453
8454 if response.status_code != 200:
8455 self.logger.error(
8456 "REST call {} failed reason : {}"
8457 "status code : {}".format(
8458 href, response.text, response.status_code
8459 )
8460 )
8461
8462 raise vimconn.VimConnException(
8463 "get_media_details : Failed to get "
8464 "catalogitem details"
8465 )
8466
8467 list_xmlroot = XmlElementTree.fromstring(response.text)
8468
8469 for child in list_xmlroot.iter():
8470 if "Entity" in child.tag:
8471 if "media" in child.attrib.get("href"):
8472 name = child.attrib.get("name")
8473 media_id = (
8474 child.attrib.get("href").split("/").pop()
8475 )
8476
8477 return name, media_id
8478 else:
8479 self.logger.debug("Media name and id not found")
8480
8481 return False, False
8482 except Exception as exp:
8483 self.logger.error(
8484 "get_media_details : exception occurred " "getting media details"
8485 )
8486
8487 raise vimconn.VimConnException(message=exp)
8488
8489 def retry_rest(self, method, url, add_headers=None, data=None):
8490 """Method to get Token & retry respective REST request
8491 Args:
8492 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
8493 url - request url to be used
8494 add_headers - Additional headers (optional)
8495 data - Request payload data to be passed in request
8496 Returns:
8497 response - Response of request
8498 """
8499 response = None
8500
8501 # Get token
8502 self.get_token()
8503
8504 if self.client._session:
8505 headers = {
8506 "Accept": "application/*+xml;version=" + API_VERSION,
8507 "x-vcloud-authorization": self.client._session.headers[
8508 "x-vcloud-authorization"
8509 ],
8510 }
8511
8512 if add_headers:
8513 headers.update(add_headers)
8514
8515 if method == "GET":
8516 response = self.perform_request(req_type="GET", url=url, headers=headers)
8517 elif method == "PUT":
8518 response = self.perform_request(
8519 req_type="PUT", url=url, headers=headers, data=data
8520 )
8521 elif method == "POST":
8522 response = self.perform_request(
8523 req_type="POST", url=url, headers=headers, data=data
8524 )
8525 elif method == "DELETE":
8526 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
8527
8528 return response
8529
8530 def get_token(self):
8531 """Generate a new token if expired
8532
8533 Returns:
8534 The return client object that letter can be used to connect to vCloud director as admin for VDC
8535 """
8536 self.client = self.connect()
8537
8538 def get_vdc_details(self):
8539 """Get VDC details using pyVcloud Lib
8540
8541 Returns org and vdc object
8542 """
8543 vdc = None
8544
8545 try:
8546 org = Org(self.client, resource=self.client.get_org())
8547 vdc = org.get_vdc(self.tenant_name)
8548 except Exception as e:
8549 # pyvcloud not giving a specific exception, Refresh nevertheless
8550 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
8551
8552 # Retry once, if failed by refreshing token
8553 if vdc is None:
8554 self.get_token()
8555 org = Org(self.client, resource=self.client.get_org())
8556 vdc = org.get_vdc(self.tenant_name)
8557
8558 return org, vdc
8559
8560 def perform_request(self, req_type, url, headers=None, data=None):
8561 """Perform the POST/PUT/GET/DELETE request."""
8562 # Log REST request details
8563 self.log_request(req_type, url=url, headers=headers, data=data)
8564 # perform request and return its result
8565
8566 if req_type == "GET":
8567 response = requests.get(url=url, headers=headers, verify=False)
8568 elif req_type == "PUT":
8569 response = requests.put(url=url, headers=headers, data=data, verify=False)
8570 elif req_type == "POST":
8571 response = requests.post(url=url, headers=headers, data=data, verify=False)
8572 elif req_type == "DELETE":
8573 response = requests.delete(url=url, headers=headers, verify=False)
8574
8575 # Log the REST response
8576 self.log_response(response)
8577
8578 return response
8579
8580 def log_request(self, req_type, url=None, headers=None, data=None):
8581 """Logs REST request details"""
8582
8583 if req_type is not None:
8584 self.logger.debug("Request type: {}".format(req_type))
8585
8586 if url is not None:
8587 self.logger.debug("Request url: {}".format(url))
8588
8589 if headers is not None:
8590 for header in headers:
8591 self.logger.debug(
8592 "Request header: {}: {}".format(header, headers[header])
8593 )
8594
8595 if data is not None:
8596 self.logger.debug("Request data: {}".format(data))
8597
8598 def log_response(self, response):
8599 """Logs REST response details"""
8600
8601 self.logger.debug("Response status code: {} ".format(response.status_code))
8602
8603 def get_task_from_response(self, content):
8604 """
8605 content - API response.text(response.text)
8606 return task object
8607 """
8608 xmlroot = XmlElementTree.fromstring(content)
8609
8610 if xmlroot.tag.split("}")[1] == "Task":
8611 return xmlroot
8612 else:
8613 for ele in xmlroot:
8614 if ele.tag.split("}")[1] == "Tasks":
8615 task = ele[0]
8616 break
8617
8618 return task
8619
8620 def power_on_vapp(self, vapp_id, vapp_name):
8621 """
8622 vapp_id - vApp uuid
8623 vapp_name - vAapp name
8624 return - Task object
8625 """
8626 headers = {
8627 "Accept": "application/*+xml;version=" + API_VERSION,
8628 "x-vcloud-authorization": self.client._session.headers[
8629 "x-vcloud-authorization"
8630 ],
8631 }
8632
8633 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
8634 self.url, vapp_id
8635 )
8636 response = self.perform_request(
8637 req_type="POST", url=poweron_href, headers=headers
8638 )
8639
8640 if response.status_code != 202:
8641 self.logger.error(
8642 "REST call {} failed reason : {}"
8643 "status code : {} ".format(
8644 poweron_href, response.text, response.status_code
8645 )
8646 )
8647
8648 raise vimconn.VimConnException(
8649 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
8650 )
8651 else:
8652 poweron_task = self.get_task_from_response(response.text)
8653
8654 return poweron_task
8655
8656 def migrate_instance(self, vm_id, compute_host=None):
8657 """
8658 Migrate a vdu
8659 param:
8660 vm_id: ID of an instance
8661 compute_host: Host to migrate the vdu to
8662 """
8663 # TODO: Add support for migration
8664 raise vimconn.VimConnNotImplemented("Should have implemented this")
8665
8666 def resize_instance(self, vm_id, flavor_id=None):
8667 """
8668 resize a vdu
8669 param:
8670 vm_id: ID of an instance
8671 flavor_id: flavor_id to resize the vdu to
8672 """
8673 # TODO: Add support for resize
8674 raise vimconn.VimConnNotImplemented("Should have implemented this")