Fix bug 2092 to update params of delete_vminstance in all VIM connectors
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_vcd_network_list(self):
697 """Method available organization for a logged in tenant
698
699 Returns:
700 The return vca object that letter can be used to connect to vcloud direct as admin
701 """
702
703 self.logger.debug(
704 "get_vcd_network_list(): retrieving network list for vcd {}".format(
705 self.tenant_name
706 )
707 )
708
709 if not self.tenant_name:
710 raise vimconn.VimConnConnectionException("Tenant name is empty.")
711
712 _, vdc = self.get_vdc_details()
713 if vdc is None:
714 raise vimconn.VimConnConnectionException(
715 "Can't retrieve information for a VDC {}".format(self.tenant_name)
716 )
717
718 vdc_uuid = vdc.get("id").split(":")[3]
719 if self.client._session:
720 headers = {
721 "Accept": "application/*+xml;version=" + API_VERSION,
722 "x-vcloud-authorization": self.client._session.headers[
723 "x-vcloud-authorization"
724 ],
725 }
726 response = self.perform_request(
727 req_type="GET", url=vdc.get("href"), headers=headers
728 )
729
730 if response.status_code != 200:
731 self.logger.error("Failed to get vdc content")
732 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
733 else:
734 content = XmlElementTree.fromstring(response.text)
735
736 network_list = []
737 try:
738 for item in content:
739 if item.tag.split("}")[-1] == "AvailableNetworks":
740 for net in item:
741 response = self.perform_request(
742 req_type="GET", url=net.get("href"), headers=headers
743 )
744
745 if response.status_code != 200:
746 self.logger.error("Failed to get network content")
747 raise vimconn.VimConnNotFoundException(
748 "Failed to get network content"
749 )
750 else:
751 net_details = XmlElementTree.fromstring(response.text)
752
753 filter_dict = {}
754 net_uuid = net_details.get("id").split(":")
755
756 if len(net_uuid) != 4:
757 continue
758 else:
759 net_uuid = net_uuid[3]
760 # create dict entry
761 self.logger.debug(
762 "get_vcd_network_list(): Adding network {} "
763 "to a list vcd id {} network {}".format(
764 net_uuid, vdc_uuid, net_details.get("name")
765 )
766 )
767 filter_dict["name"] = net_details.get("name")
768 filter_dict["id"] = net_uuid
769
770 if [
771 i.text
772 for i in net_details
773 if i.tag.split("}")[-1] == "IsShared"
774 ][0] == "true":
775 shared = True
776 else:
777 shared = False
778
779 filter_dict["shared"] = shared
780 filter_dict["tenant_id"] = vdc_uuid
781
782 if int(net_details.get("status")) == 1:
783 filter_dict["admin_state_up"] = True
784 else:
785 filter_dict["admin_state_up"] = False
786
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 network_list.append(filter_dict)
790 self.logger.debug(
791 "get_vcd_network_list adding entry {}".format(
792 filter_dict
793 )
794 )
795 except Exception:
796 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
797 pass
798
799 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
800
801 return network_list
802
803 def get_network_list(self, filter_dict={}):
804 """Obtain tenant networks of VIM
805 Filter_dict can be:
806 name: network name OR/AND
807 id: network uuid OR/AND
808 shared: boolean OR/AND
809 tenant_id: tenant OR/AND
810 admin_state_up: boolean
811 status: 'ACTIVE'
812
813 [{key : value , key : value}]
814
815 Returns the network list of dictionaries:
816 [{<the fields at Filter_dict plus some VIM specific>}, ...]
817 List can be empty
818 """
819
820 self.logger.debug(
821 "get_network_list(): retrieving network list for vcd {}".format(
822 self.tenant_name
823 )
824 )
825
826 if not self.tenant_name:
827 raise vimconn.VimConnConnectionException("Tenant name is empty.")
828
829 _, vdc = self.get_vdc_details()
830 if vdc is None:
831 raise vimconn.VimConnConnectionException(
832 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
833 )
834
835 try:
836 vdcid = vdc.get("id").split(":")[3]
837
838 if self.client._session:
839 headers = {
840 "Accept": "application/*+xml;version=" + API_VERSION,
841 "x-vcloud-authorization": self.client._session.headers[
842 "x-vcloud-authorization"
843 ],
844 }
845 response = self.perform_request(
846 req_type="GET", url=vdc.get("href"), headers=headers
847 )
848
849 if response.status_code != 200:
850 self.logger.error("Failed to get vdc content")
851 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
852 else:
853 content = XmlElementTree.fromstring(response.text)
854
855 network_list = []
856 for item in content:
857 if item.tag.split("}")[-1] == "AvailableNetworks":
858 for net in item:
859 response = self.perform_request(
860 req_type="GET", url=net.get("href"), headers=headers
861 )
862
863 if response.status_code != 200:
864 self.logger.error("Failed to get network content")
865 raise vimconn.VimConnNotFoundException(
866 "Failed to get network content"
867 )
868 else:
869 net_details = XmlElementTree.fromstring(response.text)
870
871 filter_entry = {}
872 net_uuid = net_details.get("id").split(":")
873
874 if len(net_uuid) != 4:
875 continue
876 else:
877 net_uuid = net_uuid[3]
878 # create dict entry
879 self.logger.debug(
880 "get_network_list(): Adding net {}"
881 " to a list vcd id {} network {}".format(
882 net_uuid, vdcid, net_details.get("name")
883 )
884 )
885 filter_entry["name"] = net_details.get("name")
886 filter_entry["id"] = net_uuid
887
888 if [
889 i.text
890 for i in net_details
891 if i.tag.split("}")[-1] == "IsShared"
892 ][0] == "true":
893 shared = True
894 else:
895 shared = False
896
897 filter_entry["shared"] = shared
898 filter_entry["tenant_id"] = vdcid
899
900 if int(net_details.get("status")) == 1:
901 filter_entry["admin_state_up"] = True
902 else:
903 filter_entry["admin_state_up"] = False
904
905 filter_entry["status"] = "ACTIVE"
906 filter_entry["type"] = "bridge"
907 filtered_entry = filter_entry.copy()
908
909 if filter_dict is not None and filter_dict:
910 # we remove all the key : value we don't care and match only
911 # respected field
912 filtered_dict = set(filter_entry.keys()) - set(
913 filter_dict
914 )
915
916 for unwanted_key in filtered_dict:
917 del filter_entry[unwanted_key]
918
919 if filter_dict == filter_entry:
920 network_list.append(filtered_entry)
921 else:
922 network_list.append(filtered_entry)
923 except Exception as e:
924 self.logger.debug("Error in get_network_list", exc_info=True)
925
926 if isinstance(e, vimconn.VimConnException):
927 raise
928 else:
929 raise vimconn.VimConnNotFoundException(
930 "Failed : Networks list not found {} ".format(e)
931 )
932
933 self.logger.debug("Returning {}".format(network_list))
934
935 return network_list
936
937 def get_network(self, net_id):
938 """Method obtains network details of net_id VIM network
939 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
940 try:
941 _, vdc = self.get_vdc_details()
942 vdc_id = vdc.get("id").split(":")[3]
943
944 if self.client._session:
945 headers = {
946 "Accept": "application/*+xml;version=" + API_VERSION,
947 "x-vcloud-authorization": self.client._session.headers[
948 "x-vcloud-authorization"
949 ],
950 }
951 response = self.perform_request(
952 req_type="GET", url=vdc.get("href"), headers=headers
953 )
954
955 if response.status_code != 200:
956 self.logger.error("Failed to get vdc content")
957 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
958 else:
959 content = XmlElementTree.fromstring(response.text)
960
961 filter_dict = {}
962
963 for item in content:
964 if item.tag.split("}")[-1] == "AvailableNetworks":
965 for net in item:
966 response = self.perform_request(
967 req_type="GET", url=net.get("href"), headers=headers
968 )
969
970 if response.status_code != 200:
971 self.logger.error("Failed to get network content")
972 raise vimconn.VimConnNotFoundException(
973 "Failed to get network content"
974 )
975 else:
976 net_details = XmlElementTree.fromstring(response.text)
977
978 vdc_network_id = net_details.get("id").split(":")
979 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
980 filter_dict["name"] = net_details.get("name")
981 filter_dict["id"] = vdc_network_id[3]
982
983 if [
984 i.text
985 for i in net_details
986 if i.tag.split("}")[-1] == "IsShared"
987 ][0] == "true":
988 shared = True
989 else:
990 shared = False
991
992 filter_dict["shared"] = shared
993 filter_dict["tenant_id"] = vdc_id
994
995 if int(net_details.get("status")) == 1:
996 filter_dict["admin_state_up"] = True
997 else:
998 filter_dict["admin_state_up"] = False
999
1000 filter_dict["status"] = "ACTIVE"
1001 filter_dict["type"] = "bridge"
1002 self.logger.debug("Returning {}".format(filter_dict))
1003
1004 return filter_dict
1005 else:
1006 raise vimconn.VimConnNotFoundException(
1007 "Network {} not found".format(net_id)
1008 )
1009 except Exception as e:
1010 self.logger.debug("Error in get_network")
1011 self.logger.debug(traceback.format_exc())
1012
1013 if isinstance(e, vimconn.VimConnException):
1014 raise
1015 else:
1016 raise vimconn.VimConnNotFoundException(
1017 "Failed : Network not found {} ".format(e)
1018 )
1019
1020 return filter_dict
1021
1022 def delete_network(self, net_id, created_items=None):
1023 """
1024 Removes a tenant network from VIM and its associated elements
1025 :param net_id: VIM identifier of the network, provided by method new_network
1026 :param created_items: dictionary with extra items to be deleted. provided by method new_network
1027 Returns the network identifier or raises an exception upon error or when network is not found
1028 """
1029
1030 # ############# Stub code for SRIOV #################
1031 # dvport_group = self.get_dvport_group(net_id)
1032 # if dvport_group:
1033 # #delete portgroup
1034 # status = self.destroy_dvport_group(net_id)
1035 # if status:
1036 # # Remove vlanID from persistent info
1037 # if net_id in self.persistent_info["used_vlanIDs"]:
1038 # del self.persistent_info["used_vlanIDs"][net_id]
1039 #
1040 # return net_id
1041
1042 vcd_network = self.get_vcd_network(network_uuid=net_id)
1043 if vcd_network is not None and vcd_network:
1044 if self.delete_network_action(network_uuid=net_id):
1045 return net_id
1046 else:
1047 raise vimconn.VimConnNotFoundException(
1048 "Network {} not found".format(net_id)
1049 )
1050
1051 def refresh_nets_status(self, net_list):
1052 """Get the status of the networks
1053 Params: the list of network identifiers
1054 Returns a dictionary with:
1055 net_id: #VIM id of this network
1056 status: #Mandatory. Text with one of:
1057 # DELETED (not found at vim)
1058 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
1059 # OTHER (Vim reported other status not understood)
1060 # ERROR (VIM indicates an ERROR status)
1061 # ACTIVE, INACTIVE, DOWN (admin down),
1062 # BUILD (on building process)
1063 #
1064 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
1065 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
1066
1067 """
1068 dict_entry = {}
1069 try:
1070 for net in net_list:
1071 errormsg = ""
1072 vcd_network = self.get_vcd_network(network_uuid=net)
1073 if vcd_network is not None and vcd_network:
1074 if vcd_network["status"] == "1":
1075 status = "ACTIVE"
1076 else:
1077 status = "DOWN"
1078 else:
1079 status = "DELETED"
1080 errormsg = "Network not found."
1081
1082 dict_entry[net] = {
1083 "status": status,
1084 "error_msg": errormsg,
1085 "vim_info": yaml.safe_dump(vcd_network),
1086 }
1087 except Exception:
1088 self.logger.debug("Error in refresh_nets_status")
1089 self.logger.debug(traceback.format_exc())
1090
1091 return dict_entry
1092
1093 def get_flavor(self, flavor_id):
1094 """Obtain flavor details from the VIM
1095 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
1096 """
1097 if flavor_id not in vimconnector.flavorlist:
1098 raise vimconn.VimConnNotFoundException("Flavor not found.")
1099
1100 return vimconnector.flavorlist[flavor_id]
1101
1102 def new_flavor(self, flavor_data):
1103 """Adds a tenant flavor to VIM
1104 flavor_data contains a dictionary with information, keys:
1105 name: flavor name
1106 ram: memory (cloud type) in MBytes
1107 vpcus: cpus (cloud type)
1108 extended: EPA parameters
1109 - numas: #items requested in same NUMA
1110 memory: number of 1G huge pages memory
1111 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
1112 threads
1113 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
1114 - name: interface name
1115 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
1116 bandwidth: X Gbps; requested guarantee bandwidth
1117 vpci: requested virtual PCI address
1118 disk: disk size
1119 is_public:
1120 #TODO to concrete
1121 Returns the flavor identifier"""
1122
1123 # generate a new uuid put to internal dict and return it.
1124 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1125 new_flavor = flavor_data
1126 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1127 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1128 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1129
1130 if not isinstance(ram, int):
1131 raise vimconn.VimConnException("Non-integer value for ram")
1132 elif not isinstance(cpu, int):
1133 raise vimconn.VimConnException("Non-integer value for cpu")
1134 elif not isinstance(disk, int):
1135 raise vimconn.VimConnException("Non-integer value for disk")
1136
1137 extended_flv = flavor_data.get("extended")
1138 if extended_flv:
1139 numas = extended_flv.get("numas")
1140 if numas:
1141 for numa in numas:
1142 # overwrite ram and vcpus
1143 if "memory" in numa:
1144 ram = numa["memory"] * 1024
1145
1146 if "paired-threads" in numa:
1147 cpu = numa["paired-threads"] * 2
1148 elif "cores" in numa:
1149 cpu = numa["cores"]
1150 elif "threads" in numa:
1151 cpu = numa["threads"]
1152
1153 new_flavor[FLAVOR_RAM_KEY] = ram
1154 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1155 new_flavor[FLAVOR_DISK_KEY] = disk
1156 # generate a new uuid put to internal dict and return it.
1157 flavor_id = uuid.uuid4()
1158 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1159 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1160
1161 return str(flavor_id)
1162
1163 def delete_flavor(self, flavor_id):
1164 """Deletes a tenant flavor from VIM identify by its id
1165
1166 Returns the used id or raise an exception
1167 """
1168 if flavor_id not in vimconnector.flavorlist:
1169 raise vimconn.VimConnNotFoundException("Flavor not found.")
1170
1171 vimconnector.flavorlist.pop(flavor_id, None)
1172
1173 return flavor_id
1174
1175 def new_image(self, image_dict):
1176 """
1177 Adds a tenant image to VIM
1178 Returns:
1179 200, image-id if the image is created
1180 <0, message if there is an error
1181 """
1182 return self.get_image_id_from_path(image_dict["location"])
1183
1184 def delete_image(self, image_id):
1185 """
1186 Deletes a tenant image from VIM
1187 Args:
1188 image_id is ID of Image to be deleted
1189 Return:
1190 returns the image identifier in UUID format or raises an exception on error
1191 """
1192 conn = self.connect_as_admin()
1193
1194 if not conn:
1195 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1196
1197 # Get Catalog details
1198 url_list = [self.url, "/api/catalog/", image_id]
1199 catalog_herf = "".join(url_list)
1200
1201 headers = {
1202 "Accept": "application/*+xml;version=" + API_VERSION,
1203 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1204 }
1205
1206 response = self.perform_request(
1207 req_type="GET", url=catalog_herf, headers=headers
1208 )
1209
1210 if response.status_code != requests.codes.ok:
1211 self.logger.debug(
1212 "delete_image():GET REST API call {} failed. "
1213 "Return status code {}".format(catalog_herf, response.status_code)
1214 )
1215
1216 raise vimconn.VimConnNotFoundException(
1217 "Fail to get image {}".format(image_id)
1218 )
1219
1220 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1221 namespaces = {
1222 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1223 }
1224 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1225
1226 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1227 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1228
1229 for catalogItem in catalogItems:
1230 catalogItem_href = catalogItem.attrib["href"]
1231
1232 response = self.perform_request(
1233 req_type="GET", url=catalogItem_href, headers=headers
1234 )
1235
1236 if response.status_code != requests.codes.ok:
1237 self.logger.debug(
1238 "delete_image():GET REST API call {} failed. "
1239 "Return status code {}".format(catalog_herf, response.status_code)
1240 )
1241 raise vimconn.VimConnNotFoundException(
1242 "Fail to get catalogItem {} for catalog {}".format(
1243 catalogItem, image_id
1244 )
1245 )
1246
1247 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1248 namespaces = {
1249 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1250 }
1251 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1252 catalogitem_remove_href = lxmlroot_respond.find(
1253 "xmlns:Link[@rel='remove']", namespaces
1254 ).attrib["href"]
1255
1256 # Remove catalogItem
1257 response = self.perform_request(
1258 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1259 )
1260
1261 if response.status_code == requests.codes.no_content:
1262 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1263 else:
1264 raise vimconn.VimConnException(
1265 "Fail to delete Catalog Item {}".format(catalogItem)
1266 )
1267
1268 # Remove catalog
1269 url_list = [self.url, "/api/admin/catalog/", image_id]
1270 catalog_remove_herf = "".join(url_list)
1271 response = self.perform_request(
1272 req_type="DELETE", url=catalog_remove_herf, headers=headers
1273 )
1274
1275 if response.status_code == requests.codes.no_content:
1276 self.logger.debug("Deleted Catalog {}".format(image_id))
1277
1278 return image_id
1279 else:
1280 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1281
1282 def catalog_exists(self, catalog_name, catalogs):
1283 """
1284
1285 :param catalog_name:
1286 :param catalogs:
1287 :return:
1288 """
1289 for catalog in catalogs:
1290 if catalog["name"] == catalog_name:
1291 return catalog["id"]
1292
1293 def create_vimcatalog(self, vca=None, catalog_name=None):
1294 """Create new catalog entry in vCloud director.
1295
1296 Args
1297 vca: vCloud director.
1298 catalog_name catalog that client wish to create. Note no validation done for a name.
1299 Client must make sure that provide valid string representation.
1300
1301 Returns catalog id if catalog created else None.
1302
1303 """
1304 try:
1305 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1306
1307 if lxml_catalog_element:
1308 id_attr_value = lxml_catalog_element.get("id")
1309 return id_attr_value.split(":")[-1]
1310
1311 catalogs = vca.list_catalogs()
1312 except Exception as ex:
1313 self.logger.error(
1314 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1315 catalog_name, ex
1316 )
1317 )
1318 raise
1319 return self.catalog_exists(catalog_name, catalogs)
1320
1321 # noinspection PyIncorrectDocstring
1322 def upload_ovf(
1323 self,
1324 vca=None,
1325 catalog_name=None,
1326 image_name=None,
1327 media_file_name=None,
1328 description="",
1329 progress=False,
1330 chunk_bytes=128 * 1024,
1331 ):
1332 """
1333 Uploads a OVF file to a vCloud catalog
1334
1335 :param chunk_bytes:
1336 :param progress:
1337 :param description:
1338 :param image_name:
1339 :param vca:
1340 :param catalog_name: (str): The name of the catalog to upload the media.
1341 :param media_file_name: (str): The name of the local media file to upload.
1342 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1343 """
1344 os.path.isfile(media_file_name)
1345 statinfo = os.stat(media_file_name)
1346
1347 # find a catalog entry where we upload OVF.
1348 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1349 # status change.
1350 # if VCD can parse OVF we upload VMDK file
1351 try:
1352 for catalog in vca.list_catalogs():
1353 if catalog_name != catalog["name"]:
1354 continue
1355 catalog_href = "{}/api/catalog/{}/action/upload".format(
1356 self.url, catalog["id"]
1357 )
1358 data = """
1359 <UploadVAppTemplateParams name="{}"
1360 xmlns="http://www.vmware.com/vcloud/v1.5"
1361 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1362 <Description>{} vApp Template</Description>
1363 </UploadVAppTemplateParams>
1364 """.format(
1365 catalog_name, description
1366 )
1367
1368 if self.client:
1369 headers = {
1370 "Accept": "application/*+xml;version=" + API_VERSION,
1371 "x-vcloud-authorization": self.client._session.headers[
1372 "x-vcloud-authorization"
1373 ],
1374 }
1375 headers[
1376 "Content-Type"
1377 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1378
1379 response = self.perform_request(
1380 req_type="POST", url=catalog_href, headers=headers, data=data
1381 )
1382
1383 if response.status_code == requests.codes.created:
1384 catalogItem = XmlElementTree.fromstring(response.text)
1385 entity = [
1386 child
1387 for child in catalogItem
1388 if child.get("type")
1389 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1390 ][0]
1391 href = entity.get("href")
1392 template = href
1393
1394 response = self.perform_request(
1395 req_type="GET", url=href, headers=headers
1396 )
1397
1398 if response.status_code == requests.codes.ok:
1399 headers["Content-Type"] = "Content-Type text/xml"
1400 result = re.search(
1401 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1402 response.text,
1403 )
1404
1405 if result:
1406 transfer_href = result.group(1)
1407
1408 response = self.perform_request(
1409 req_type="PUT",
1410 url=transfer_href,
1411 headers=headers,
1412 data=open(media_file_name, "rb"),
1413 )
1414
1415 if response.status_code != requests.codes.ok:
1416 self.logger.debug(
1417 "Failed create vApp template for catalog name {} and image {}".format(
1418 catalog_name, media_file_name
1419 )
1420 )
1421 return False
1422
1423 # TODO fix this with aync block
1424 time.sleep(5)
1425
1426 self.logger.debug(
1427 "vApp template for catalog name {} and image {}".format(
1428 catalog_name, media_file_name
1429 )
1430 )
1431
1432 # uploading VMDK file
1433 # check status of OVF upload and upload remaining files.
1434 response = self.perform_request(
1435 req_type="GET", url=template, headers=headers
1436 )
1437
1438 if response.status_code == requests.codes.ok:
1439 result = re.search(
1440 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1441 )
1442
1443 if result:
1444 link_href = result.group(1)
1445
1446 # we skip ovf since it already uploaded.
1447 if "ovf" in link_href:
1448 continue
1449
1450 # The OVF file and VMDK must be in a same directory
1451 head, _ = os.path.split(media_file_name)
1452 file_vmdk = head + "/" + link_href.split("/")[-1]
1453
1454 if not os.path.isfile(file_vmdk):
1455 return False
1456
1457 statinfo = os.stat(file_vmdk)
1458 if statinfo.st_size == 0:
1459 return False
1460
1461 hrefvmdk = link_href
1462
1463 if progress:
1464 widgets = [
1465 "Uploading file: ",
1466 Percentage(),
1467 " ",
1468 Bar(),
1469 " ",
1470 ETA(),
1471 " ",
1472 FileTransferSpeed(),
1473 ]
1474 progress_bar = ProgressBar(
1475 widgets=widgets, maxval=statinfo.st_size
1476 ).start()
1477
1478 bytes_transferred = 0
1479 f = open(file_vmdk, "rb")
1480
1481 while bytes_transferred < statinfo.st_size:
1482 my_bytes = f.read(chunk_bytes)
1483 if len(my_bytes) <= chunk_bytes:
1484 headers["Content-Range"] = "bytes {}-{}/{}".format(
1485 bytes_transferred,
1486 len(my_bytes) - 1,
1487 statinfo.st_size,
1488 )
1489 headers["Content-Length"] = str(len(my_bytes))
1490 response = requests.put(
1491 url=hrefvmdk,
1492 headers=headers,
1493 data=my_bytes,
1494 verify=False,
1495 )
1496
1497 if response.status_code == requests.codes.ok:
1498 bytes_transferred += len(my_bytes)
1499 if progress:
1500 progress_bar.update(bytes_transferred)
1501 else:
1502 self.logger.debug(
1503 "file upload failed with error: [{}] {}".format(
1504 response.status_code, response.text
1505 )
1506 )
1507
1508 f.close()
1509
1510 return False
1511
1512 f.close()
1513 if progress:
1514 progress_bar.finish()
1515 time.sleep(10)
1516
1517 return True
1518 else:
1519 self.logger.debug(
1520 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1521 catalog_name, media_file_name
1522 )
1523 )
1524 return False
1525 except Exception as exp:
1526 self.logger.debug(
1527 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1528 catalog_name, media_file_name, exp
1529 )
1530 )
1531
1532 raise vimconn.VimConnException(
1533 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1534 catalog_name, media_file_name, exp
1535 )
1536 )
1537
1538 self.logger.debug(
1539 "Failed retrieve catalog name {} for OVF file {}".format(
1540 catalog_name, media_file_name
1541 )
1542 )
1543
1544 return False
1545
1546 def upload_vimimage(
1547 self,
1548 vca=None,
1549 catalog_name=None,
1550 media_name=None,
1551 medial_file_name=None,
1552 progress=False,
1553 ):
1554 """Upload media file"""
1555 # TODO add named parameters for readability
1556 return self.upload_ovf(
1557 vca=vca,
1558 catalog_name=catalog_name,
1559 image_name=media_name.split(".")[0],
1560 media_file_name=medial_file_name,
1561 description="medial_file_name",
1562 progress=progress,
1563 )
1564
1565 def validate_uuid4(self, uuid_string=None):
1566 """Method validate correct format of UUID.
1567
1568 Return: true if string represent valid uuid
1569 """
1570 try:
1571 uuid.UUID(uuid_string, version=4)
1572 except ValueError:
1573 return False
1574
1575 return True
1576
1577 def get_catalogid(self, catalog_name=None, catalogs=None):
1578 """Method check catalog and return catalog ID in UUID format.
1579
1580 Args
1581 catalog_name: catalog name as string
1582 catalogs: list of catalogs.
1583
1584 Return: catalogs uuid
1585 """
1586 for catalog in catalogs:
1587 if catalog["name"] == catalog_name:
1588 catalog_id = catalog["id"]
1589 return catalog_id
1590
1591 return None
1592
1593 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1594 """Method check catalog and return catalog name lookup done by catalog UUID.
1595
1596 Args
1597 catalog_name: catalog name as string
1598 catalogs: list of catalogs.
1599
1600 Return: catalogs name or None
1601 """
1602 if not self.validate_uuid4(uuid_string=catalog_uuid):
1603 return None
1604
1605 for catalog in catalogs:
1606 catalog_id = catalog.get("id")
1607
1608 if catalog_id == catalog_uuid:
1609 return catalog.get("name")
1610
1611 return None
1612
1613 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1614 """Method check catalog and return catalog name lookup done by catalog UUID.
1615
1616 Args
1617 catalog_name: catalog name as string
1618 catalogs: list of catalogs.
1619
1620 Return: catalogs name or None
1621 """
1622 if not self.validate_uuid4(uuid_string=catalog_uuid):
1623 return None
1624
1625 for catalog in catalogs:
1626 catalog_id = catalog.get("id")
1627
1628 if catalog_id == catalog_uuid:
1629 return catalog
1630
1631 return None
1632
1633 def get_image_id_from_path(self, path=None, progress=False):
1634 """Method upload OVF image to vCloud director.
1635
1636 Each OVF image represented as single catalog entry in vcloud director.
1637 The method check for existing catalog entry. The check done by file name without file extension.
1638
1639 if given catalog name already present method will respond with existing catalog uuid otherwise
1640 it will create new catalog entry and upload OVF file to newly created catalog.
1641
1642 If method can't create catalog entry or upload a file it will throw exception.
1643
1644 Method accept boolean flag progress that will output progress bar. It useful method
1645 for standalone upload use case. In case to test large file upload.
1646
1647 Args
1648 path: - valid path to OVF file.
1649 progress - boolean progress bar show progress bar.
1650
1651 Return: if image uploaded correct method will provide image catalog UUID.
1652 """
1653 if not path:
1654 raise vimconn.VimConnException("Image path can't be None.")
1655
1656 if not os.path.isfile(path):
1657 raise vimconn.VimConnException("Can't read file. File not found.")
1658
1659 if not os.access(path, os.R_OK):
1660 raise vimconn.VimConnException(
1661 "Can't read file. Check file permission to read."
1662 )
1663
1664 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1665
1666 _, filename = os.path.split(path)
1667 _, file_extension = os.path.splitext(path)
1668 if file_extension != ".ovf":
1669 self.logger.debug(
1670 "Wrong file extension {} connector support only OVF container.".format(
1671 file_extension
1672 )
1673 )
1674
1675 raise vimconn.VimConnException(
1676 "Wrong container. vCloud director supports only OVF."
1677 )
1678
1679 catalog_name = os.path.splitext(filename)[0]
1680 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1681 self.logger.debug(
1682 "File name {} Catalog Name {} file path {} "
1683 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1684 )
1685
1686 try:
1687 org, _ = self.get_vdc_details()
1688 catalogs = org.list_catalogs()
1689 except Exception as exp:
1690 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1691
1692 raise vimconn.VimConnException(
1693 "Failed get catalogs() with Exception {} ".format(exp)
1694 )
1695
1696 if len(catalogs) == 0:
1697 self.logger.info(
1698 "Creating a new catalog entry {} in vcloud director".format(
1699 catalog_name
1700 )
1701 )
1702
1703 if self.create_vimcatalog(org, catalog_md5_name) is None:
1704 raise vimconn.VimConnException(
1705 "Failed create new catalog {} ".format(catalog_md5_name)
1706 )
1707
1708 result = self.upload_vimimage(
1709 vca=org,
1710 catalog_name=catalog_md5_name,
1711 media_name=filename,
1712 medial_file_name=path,
1713 progress=progress,
1714 )
1715
1716 if not result:
1717 raise vimconn.VimConnException(
1718 "Failed create vApp template for catalog {} ".format(catalog_name)
1719 )
1720
1721 return self.get_catalogid(catalog_name, catalogs)
1722 else:
1723 for catalog in catalogs:
1724 # search for existing catalog if we find same name we return ID
1725 # TODO optimize this
1726 if catalog["name"] == catalog_md5_name:
1727 self.logger.debug(
1728 "Found existing catalog entry for {} "
1729 "catalog id {}".format(
1730 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1731 )
1732 )
1733
1734 return self.get_catalogid(catalog_md5_name, catalogs)
1735
1736 # if we didn't find existing catalog we create a new one and upload image.
1737 self.logger.debug(
1738 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1739 )
1740 if self.create_vimcatalog(org, catalog_md5_name) is None:
1741 raise vimconn.VimConnException(
1742 "Failed create new catalog {} ".format(catalog_md5_name)
1743 )
1744
1745 result = self.upload_vimimage(
1746 vca=org,
1747 catalog_name=catalog_md5_name,
1748 media_name=filename,
1749 medial_file_name=path,
1750 progress=progress,
1751 )
1752 if not result:
1753 raise vimconn.VimConnException(
1754 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1755 )
1756
1757 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1758
1759 def get_image_list(self, filter_dict={}):
1760 """Obtain tenant images from VIM
1761 Filter_dict can be:
1762 name: image name
1763 id: image uuid
1764 checksum: image checksum
1765 location: image path
1766 Returns the image list of dictionaries:
1767 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1768 List can be empty
1769 """
1770 try:
1771 org, _ = self.get_vdc_details()
1772 image_list = []
1773 catalogs = org.list_catalogs()
1774
1775 if len(catalogs) == 0:
1776 return image_list
1777 else:
1778 for catalog in catalogs:
1779 catalog_uuid = catalog.get("id")
1780 name = catalog.get("name")
1781 filtered_dict = {}
1782
1783 if filter_dict.get("name") and filter_dict["name"] != name:
1784 continue
1785
1786 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1787 continue
1788
1789 filtered_dict["name"] = name
1790 filtered_dict["id"] = catalog_uuid
1791 image_list.append(filtered_dict)
1792
1793 self.logger.debug(
1794 "List of already created catalog items: {}".format(image_list)
1795 )
1796
1797 return image_list
1798 except Exception as exp:
1799 raise vimconn.VimConnException(
1800 "Exception occured while retriving catalog items {}".format(exp)
1801 )
1802
1803 def get_vappid(self, vdc=None, vapp_name=None):
1804 """Method takes vdc object and vApp name and returns vapp uuid or None
1805
1806 Args:
1807 vdc: The VDC object.
1808 vapp_name: is application vappp name identifier
1809
1810 Returns:
1811 The return vApp name otherwise None
1812 """
1813 if vdc is None or vapp_name is None:
1814 return None
1815
1816 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1817 try:
1818 refs = [
1819 ref
1820 for ref in vdc.ResourceEntities.ResourceEntity
1821 if ref.name == vapp_name
1822 and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1823 ]
1824
1825 if len(refs) == 1:
1826 return refs[0].href.split("vapp")[1][1:]
1827 except Exception as e:
1828 self.logger.exception(e)
1829 return False
1830
1831 return None
1832
1833 def check_vapp(self, vdc=None, vapp_uuid=None):
1834 """Method Method returns True or False if vapp deployed in vCloud director
1835
1836 Args:
1837 vca: Connector to VCA
1838 vdc: The VDC object.
1839 vappid: vappid is application identifier
1840
1841 Returns:
1842 The return True if vApp deployed
1843 :param vdc:
1844 :param vapp_uuid:
1845 """
1846 try:
1847 refs = [
1848 ref
1849 for ref in vdc.ResourceEntities.ResourceEntity
1850 if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
1851 ]
1852
1853 for ref in refs:
1854 vappid = ref.href.split("vapp")[1][1:]
1855 # find vapp with respected vapp uuid
1856
1857 if vappid == vapp_uuid:
1858 return True
1859 except Exception as e:
1860 self.logger.exception(e)
1861
1862 return False
1863
1864 return False
1865
1866 def get_namebyvappid(self, vapp_uuid=None):
1867 """Method returns vApp name from vCD and lookup done by vapp_id.
1868
1869 Args:
1870 vapp_uuid: vappid is application identifier
1871
1872 Returns:
1873 The return vApp name otherwise None
1874 """
1875 try:
1876 if self.client and vapp_uuid:
1877 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1878 headers = {
1879 "Accept": "application/*+xml;version=" + API_VERSION,
1880 "x-vcloud-authorization": self.client._session.headers[
1881 "x-vcloud-authorization"
1882 ],
1883 }
1884
1885 response = self.perform_request(
1886 req_type="GET", url=vapp_call, headers=headers
1887 )
1888
1889 # Retry login if session expired & retry sending request
1890 if response.status_code == 403:
1891 response = self.retry_rest("GET", vapp_call)
1892
1893 tree = XmlElementTree.fromstring(response.text)
1894
1895 return tree.attrib["name"] if "name" in tree.attrib else None
1896 except Exception as e:
1897 self.logger.exception(e)
1898
1899 return None
1900
1901 return None
1902
1903 def new_vminstance(
1904 self,
1905 name=None,
1906 description="",
1907 start=False,
1908 image_id=None,
1909 flavor_id=None,
1910 affinity_group_list=[],
1911 net_list=[],
1912 cloud_config=None,
1913 disk_list=None,
1914 availability_zone_index=None,
1915 availability_zone_list=None,
1916 ):
1917 """Adds a VM instance to VIM
1918 Params:
1919 'start': (boolean) indicates if VM must start or created in pause mode.
1920 'image_id','flavor_id': image and flavor VIM id to use for the VM
1921 'net_list': list of interfaces, each one is a dictionary with:
1922 'name': (optional) name for the interface.
1923 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1924 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1925 capabilities
1926 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1927 'mac_address': (optional) mac address to assign to this interface
1928 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1929 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1930 for tagging VF
1931 'type': (mandatory) can be one of:
1932 'virtual', in this case always connected to a network of type 'net_type=bridge'
1933 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1934 data/ptp network or it can created unconnected
1935 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1936 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1937 are allocated on the same physical NIC
1938 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1939 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1940 or True, it must apply the default VIM behaviour
1941 After execution the method will add the key:
1942 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1943 interface. 'net_list' is modified
1944 'cloud_config': (optional) dictionary with:
1945 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1946 'users': (optional) list of users to be inserted, each item is a dict with:
1947 'name': (mandatory) user name,
1948 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1949 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1950 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1951 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1952 'dest': (mandatory) string with the destination absolute path
1953 'encoding': (optional, by default text). Can be one of:
1954 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1955 'content' (mandatory): string with the content of the file
1956 'permissions': (optional) string with file permissions, typically octal notation '0644'
1957 'owner': (optional) file owner, string with the format 'owner:group'
1958 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1959 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1960 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1961 'size': (mandatory) string with the size of the disk in GB
1962 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1963 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1964 availability_zone_index is None
1965 Returns a tuple with the instance identifier and created_items or raises an exception on error
1966 created_items can be None or a dictionary where this method can include key-values that will be passed to
1967 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1968 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1969 as not present.
1970 """
1971 self.logger.info("Creating new instance for entry {}".format(name))
1972 self.logger.debug(
1973 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1974 "availability_zone_index {} availability_zone_list {}".format(
1975 description,
1976 start,
1977 image_id,
1978 flavor_id,
1979 net_list,
1980 cloud_config,
1981 disk_list,
1982 availability_zone_index,
1983 availability_zone_list,
1984 )
1985 )
1986
1987 # new vm name = vmname + tenant_id + uuid
1988 new_vm_name = [name, "-", str(uuid.uuid4())]
1989 vmname_andid = "".join(new_vm_name)
1990
1991 for net in net_list:
1992 if net["type"] == "PCI-PASSTHROUGH":
1993 raise vimconn.VimConnNotSupportedException(
1994 "Current vCD version does not support type : {}".format(net["type"])
1995 )
1996
1997 if len(net_list) > 10:
1998 raise vimconn.VimConnNotSupportedException(
1999 "The VM hardware versions 7 and above support upto 10 NICs only"
2000 )
2001
2002 # if vm already deployed we return existing uuid
2003 # we check for presence of VDC, Catalog entry and Flavor.
2004 org, vdc = self.get_vdc_details()
2005 if vdc is None:
2006 raise vimconn.VimConnNotFoundException(
2007 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
2008 name
2009 )
2010 )
2011
2012 catalogs = org.list_catalogs()
2013 if catalogs is None:
2014 # Retry once, if failed by refreshing token
2015 self.get_token()
2016 org = Org(self.client, resource=self.client.get_org())
2017 catalogs = org.list_catalogs()
2018
2019 if catalogs is None:
2020 raise vimconn.VimConnNotFoundException(
2021 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
2022 name
2023 )
2024 )
2025
2026 catalog_hash_name = self.get_catalogbyid(
2027 catalog_uuid=image_id, catalogs=catalogs
2028 )
2029 if catalog_hash_name:
2030 self.logger.info(
2031 "Found catalog entry {} for image id {}".format(
2032 catalog_hash_name, image_id
2033 )
2034 )
2035 else:
2036 raise vimconn.VimConnNotFoundException(
2037 "new_vminstance(): Failed create vApp {}: "
2038 "(Failed retrieve catalog information {})".format(name, image_id)
2039 )
2040
2041 # Set vCPU and Memory based on flavor.
2042 vm_cpus = None
2043 vm_memory = None
2044 vm_disk = None
2045 numas = None
2046
2047 if flavor_id is not None:
2048 if flavor_id not in vimconnector.flavorlist:
2049 raise vimconn.VimConnNotFoundException(
2050 "new_vminstance(): Failed create vApp {}: "
2051 "Failed retrieve flavor information "
2052 "flavor id {}".format(name, flavor_id)
2053 )
2054 else:
2055 try:
2056 flavor = vimconnector.flavorlist[flavor_id]
2057 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
2058 vm_memory = flavor[FLAVOR_RAM_KEY]
2059 vm_disk = flavor[FLAVOR_DISK_KEY]
2060 extended = flavor.get("extended", None)
2061
2062 if extended:
2063 numas = extended.get("numas", None)
2064 except Exception as exp:
2065 raise vimconn.VimConnException(
2066 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
2067 )
2068
2069 # image upload creates template name as catalog name space Template.
2070 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
2071 # power_on = 'false'
2072 # if start:
2073 # power_on = 'true'
2074
2075 # client must provide at least one entry in net_list if not we report error
2076 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
2077 # If no mgmt, then the 1st NN in netlist is considered as primary net.
2078 primary_net = None
2079 primary_netname = None
2080 primary_net_href = None
2081 # network_mode = 'bridged'
2082 if net_list is not None and len(net_list) > 0:
2083 for net in net_list:
2084 if "use" in net and net["use"] == "mgmt" and not primary_net:
2085 primary_net = net
2086
2087 if primary_net is None:
2088 primary_net = net_list[0]
2089
2090 try:
2091 primary_net_id = primary_net["net_id"]
2092 url_list = [self.url, "/api/network/", primary_net_id]
2093 primary_net_href = "".join(url_list)
2094 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
2095
2096 if "name" in network_dict:
2097 primary_netname = network_dict["name"]
2098 except KeyError:
2099 raise vimconn.VimConnException(
2100 "Corrupted flavor. {}".format(primary_net)
2101 )
2102 else:
2103 raise vimconn.VimConnUnexpectedResponse(
2104 "new_vminstance(): Failed network list is empty."
2105 )
2106
2107 # use: 'data', 'bridge', 'mgmt'
2108 # create vApp. Set vcpu and ram based on flavor id.
2109 try:
2110 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
2111 if not vdc_obj:
2112 raise vimconn.VimConnNotFoundException(
2113 "new_vminstance(): Failed to get VDC object"
2114 )
2115
2116 for retry in (1, 2):
2117 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
2118 catalog_items = [items.attrib]
2119
2120 if len(catalog_items) == 1:
2121 if self.client:
2122 headers = {
2123 "Accept": "application/*+xml;version=" + API_VERSION,
2124 "x-vcloud-authorization": self.client._session.headers[
2125 "x-vcloud-authorization"
2126 ],
2127 }
2128
2129 response = self.perform_request(
2130 req_type="GET",
2131 url=catalog_items[0].get("href"),
2132 headers=headers,
2133 )
2134 catalogItem = XmlElementTree.fromstring(response.text)
2135 entity = [
2136 child
2137 for child in catalogItem
2138 if child.get("type")
2139 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
2140 ][0]
2141 vapp_tempalte_href = entity.get("href")
2142
2143 response = self.perform_request(
2144 req_type="GET", url=vapp_tempalte_href, headers=headers
2145 )
2146
2147 if response.status_code != requests.codes.ok:
2148 self.logger.debug(
2149 "REST API call {} failed. Return status code {}".format(
2150 vapp_tempalte_href, response.status_code
2151 )
2152 )
2153 else:
2154 result = (response.text).replace("\n", " ")
2155
2156 vapp_template_tree = XmlElementTree.fromstring(response.text)
2157 children_element = [
2158 child for child in vapp_template_tree if "Children" in child.tag
2159 ][0]
2160 vm_element = [child for child in children_element if "Vm" in child.tag][
2161 0
2162 ]
2163 vm_name = vm_element.get("name")
2164 vm_id = vm_element.get("id")
2165 vm_href = vm_element.get("href")
2166
2167 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
2168 # result).group(1)
2169 memory_mb = re.search(
2170 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
2171 result,
2172 ).group(1)
2173 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
2174
2175 headers[
2176 "Content-Type"
2177 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
2178 vdc_id = vdc.get("id").split(":")[-1]
2179 instantiate_vapp_href = (
2180 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
2181 self.url, vdc_id
2182 )
2183 )
2184
2185 with open(
2186 os.path.join(
2187 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2188 ),
2189 "r",
2190 ) as f:
2191 template = f.read()
2192
2193 data = template.format(
2194 vmname_andid,
2195 primary_netname,
2196 primary_net_href,
2197 vapp_tempalte_href,
2198 vm_href,
2199 vm_id,
2200 vm_name,
2201 primary_netname,
2202 cpu=vm_cpus,
2203 core=1,
2204 memory=vm_memory,
2205 )
2206
2207 response = self.perform_request(
2208 req_type="POST",
2209 url=instantiate_vapp_href,
2210 headers=headers,
2211 data=data,
2212 )
2213
2214 if response.status_code != 201:
2215 self.logger.error(
2216 "REST call {} failed reason : {}"
2217 "status code : {}".format(
2218 instantiate_vapp_href, response.text, response.status_code
2219 )
2220 )
2221 raise vimconn.VimConnException(
2222 "new_vminstance(): Failed to create"
2223 "vAapp {}".format(vmname_andid)
2224 )
2225 else:
2226 vapptask = self.get_task_from_response(response.text)
2227
2228 if vapptask is None and retry == 1:
2229 self.get_token() # Retry getting token
2230 continue
2231 else:
2232 break
2233
2234 if vapptask is None or vapptask is False:
2235 raise vimconn.VimConnUnexpectedResponse(
2236 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2237 )
2238
2239 # wait for task to complete
2240 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2241
2242 if result.get("status") == "success":
2243 self.logger.debug(
2244 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2245 )
2246 else:
2247 raise vimconn.VimConnUnexpectedResponse(
2248 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2249 )
2250 except Exception as exp:
2251 raise vimconn.VimConnUnexpectedResponse(
2252 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2253 vmname_andid, exp
2254 )
2255 )
2256
2257 # we should have now vapp in undeployed state.
2258 try:
2259 vdc_obj = VDC(self.client, href=vdc.get("href"))
2260 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2261 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2262 vapp = VApp(self.client, resource=vapp_resource)
2263 except Exception as exp:
2264 raise vimconn.VimConnUnexpectedResponse(
2265 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2266 vmname_andid, exp
2267 )
2268 )
2269
2270 if vapp_uuid is None:
2271 raise vimconn.VimConnUnexpectedResponse(
2272 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2273 vmname_andid
2274 )
2275 )
2276
2277 # Add PCI passthrough/SRIOV configrations
2278 pci_devices_info = []
2279 reserve_memory = False
2280
2281 for net in net_list:
2282 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2283 pci_devices_info.append(net)
2284 elif (
2285 net["type"] == "VF"
2286 or net["type"] == "SR-IOV"
2287 or net["type"] == "VFnotShared"
2288 ) and "net_id" in net:
2289 reserve_memory = True
2290
2291 # Add PCI
2292 if len(pci_devices_info) > 0:
2293 self.logger.info(
2294 "Need to add PCI devices {} into VM {}".format(
2295 pci_devices_info, vmname_andid
2296 )
2297 )
2298 PCI_devices_status, _, _ = self.add_pci_devices(
2299 vapp_uuid, pci_devices_info, vmname_andid
2300 )
2301
2302 if PCI_devices_status:
2303 self.logger.info(
2304 "Added PCI devives {} to VM {}".format(
2305 pci_devices_info, vmname_andid
2306 )
2307 )
2308 reserve_memory = True
2309 else:
2310 self.logger.info(
2311 "Fail to add PCI devives {} to VM {}".format(
2312 pci_devices_info, vmname_andid
2313 )
2314 )
2315
2316 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2317 self.add_serial_device(vapp_uuid)
2318
2319 if vm_disk:
2320 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2321 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2322 if result:
2323 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2324
2325 # Add new or existing disks to vApp
2326 if disk_list:
2327 added_existing_disk = False
2328 for disk in disk_list:
2329 if "device_type" in disk and disk["device_type"] == "cdrom":
2330 image_id = disk["image_id"]
2331 # Adding CD-ROM to VM
2332 # will revisit code once specification ready to support this feature
2333 self.insert_media_to_vm(vapp, image_id)
2334 elif "image_id" in disk and disk["image_id"] is not None:
2335 self.logger.debug(
2336 "Adding existing disk from image {} to vm {} ".format(
2337 disk["image_id"], vapp_uuid
2338 )
2339 )
2340 self.add_existing_disk(
2341 catalogs=catalogs,
2342 image_id=disk["image_id"],
2343 size=disk["size"],
2344 template_name=templateName,
2345 vapp_uuid=vapp_uuid,
2346 )
2347 added_existing_disk = True
2348 else:
2349 # Wait till added existing disk gets reflected into vCD database/API
2350 if added_existing_disk:
2351 time.sleep(5)
2352 added_existing_disk = False
2353 self.add_new_disk(vapp_uuid, disk["size"])
2354
2355 if numas:
2356 # Assigning numa affinity setting
2357 for numa in numas:
2358 if "paired-threads-id" in numa:
2359 paired_threads_id = numa["paired-threads-id"]
2360 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2361
2362 # add NICs & connect to networks in netlist
2363 try:
2364 vdc_obj = VDC(self.client, href=vdc.get("href"))
2365 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2366 vapp = VApp(self.client, resource=vapp_resource)
2367 vapp_id = vapp_resource.get("id").split(":")[-1]
2368
2369 self.logger.info("Removing primary NIC: ")
2370 # First remove all NICs so that NIC properties can be adjusted as needed
2371 self.remove_primary_network_adapter_from_all_vms(vapp)
2372
2373 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2374 primary_nic_index = 0
2375 nicIndex = 0
2376 for net in net_list:
2377 # openmano uses network id in UUID format.
2378 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2379 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2380 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2381
2382 if "net_id" not in net:
2383 continue
2384
2385 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2386 # Same will be returned in refresh_vms_status() as vim_interface_id
2387 net["vim_id"] = net[
2388 "net_id"
2389 ] # Provide the same VIM identifier as the VIM network
2390
2391 interface_net_id = net["net_id"]
2392 interface_net_name = self.get_network_name_by_id(
2393 network_uuid=interface_net_id
2394 )
2395 interface_network_mode = net["use"]
2396
2397 if interface_network_mode == "mgmt":
2398 primary_nic_index = nicIndex
2399
2400 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2401 - DHCP (The IP address is obtained from a DHCP service.)
2402 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2403 - NONE (No IP addressing mode specified.)"""
2404
2405 if primary_netname is not None:
2406 self.logger.debug(
2407 "new_vminstance(): Filtering by net name {}".format(
2408 interface_net_name
2409 )
2410 )
2411 nets = [
2412 n
2413 for n in self.get_network_list()
2414 if n.get("name") == interface_net_name
2415 ]
2416
2417 if len(nets) == 1:
2418 self.logger.info(
2419 "new_vminstance(): Found requested network: {}".format(
2420 nets[0].get("name")
2421 )
2422 )
2423
2424 if interface_net_name != primary_netname:
2425 # connect network to VM - with all DHCP by default
2426 self.logger.info(
2427 "new_vminstance(): Attaching net {} to vapp".format(
2428 interface_net_name
2429 )
2430 )
2431 self.connect_vapp_to_org_vdc_network(
2432 vapp_id, nets[0].get("name")
2433 )
2434
2435 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2436 nic_type = "VMXNET3"
2437 if "type" in net and net["type"] not in type_list:
2438 # fetching nic type from vnf
2439 if "model" in net:
2440 if net["model"] is not None:
2441 if (
2442 net["model"].lower() == "paravirt"
2443 or net["model"].lower() == "virtio"
2444 ):
2445 nic_type = "VMXNET3"
2446 else:
2447 nic_type = net["model"]
2448
2449 self.logger.info(
2450 "new_vminstance(): adding network adapter "
2451 "to a network {}".format(nets[0].get("name"))
2452 )
2453 self.add_network_adapter_to_vms(
2454 vapp,
2455 nets[0].get("name"),
2456 primary_nic_index,
2457 nicIndex,
2458 net,
2459 nic_type=nic_type,
2460 )
2461 else:
2462 self.logger.info(
2463 "new_vminstance(): adding network adapter "
2464 "to a network {}".format(nets[0].get("name"))
2465 )
2466
2467 if net["type"] in ["SR-IOV", "VF"]:
2468 nic_type = net["type"]
2469 self.add_network_adapter_to_vms(
2470 vapp,
2471 nets[0].get("name"),
2472 primary_nic_index,
2473 nicIndex,
2474 net,
2475 nic_type=nic_type,
2476 )
2477 nicIndex += 1
2478
2479 # cloud-init for ssh-key injection
2480 if cloud_config:
2481 # Create a catalog which will be carrying the config drive ISO
2482 # This catalog is deleted during vApp deletion. The catalog name carries
2483 # vApp UUID and thats how it gets identified during its deletion.
2484 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2485 self.logger.info(
2486 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2487 config_drive_catalog_name
2488 )
2489 )
2490 config_drive_catalog_id = self.create_vimcatalog(
2491 org, config_drive_catalog_name
2492 )
2493
2494 if config_drive_catalog_id is None:
2495 error_msg = (
2496 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2497 "ISO".format(config_drive_catalog_name)
2498 )
2499 raise Exception(error_msg)
2500
2501 # Create config-drive ISO
2502 _, userdata = self._create_user_data(cloud_config)
2503 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2504 iso_path = self.create_config_drive_iso(userdata)
2505 self.logger.debug(
2506 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2507 iso_path
2508 )
2509 )
2510
2511 self.logger.info(
2512 "new_vminstance(): uploading iso to catalog {}".format(
2513 config_drive_catalog_name
2514 )
2515 )
2516 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2517 # Attach the config-drive ISO to the VM
2518 self.logger.info(
2519 "new_vminstance(): Attaching the config-drive ISO to the VM"
2520 )
2521 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2522 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2523
2524 # If VM has PCI devices or SRIOV reserve memory for VM
2525 if reserve_memory:
2526 self.reserve_memory_for_all_vms(vapp, memory_mb)
2527
2528 self.logger.debug(
2529 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2530 )
2531
2532 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2533 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2534 if result.get("status") == "success":
2535 self.logger.info(
2536 "new_vminstance(): Successfully power on "
2537 "vApp {}".format(vmname_andid)
2538 )
2539 else:
2540 self.logger.error(
2541 "new_vminstance(): failed to power on vApp "
2542 "{}".format(vmname_andid)
2543 )
2544
2545 except Exception as exp:
2546 try:
2547 self.delete_vminstance(vapp_uuid)
2548 except Exception as exp2:
2549 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2550 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2551 self.logger.error(
2552 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2553 name, exp
2554 )
2555 )
2556 raise vimconn.VimConnException(
2557 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2558 name, exp
2559 )
2560 )
2561 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2562 wait_time = 0
2563 vapp_uuid = None
2564 while wait_time <= MAX_WAIT_TIME:
2565 try:
2566 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2567 vapp = VApp(self.client, resource=vapp_resource)
2568 except Exception as exp:
2569 raise vimconn.VimConnUnexpectedResponse(
2570 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2571 vmname_andid, exp
2572 )
2573 )
2574
2575 # if vapp and vapp.me.deployed:
2576 if vapp and vapp_resource.get("deployed") == "true":
2577 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2578 break
2579 else:
2580 self.logger.debug(
2581 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2582 )
2583 time.sleep(INTERVAL_TIME)
2584
2585 wait_time += INTERVAL_TIME
2586
2587 # SET Affinity Rule for VM
2588 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2589 # While creating VIM account user has to pass the Host Group names in availability_zone list
2590 # "availability_zone" is a part of VIM "config" parameters
2591 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2592 # Host groups are referred as availability zones
2593 # With following procedure, deployed VM will be added into a VM group.
2594 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2595 if availability_zone_list:
2596 self.logger.debug(
2597 "Existing Host Groups in VIM {}".format(
2598 self.config.get("availability_zone")
2599 )
2600 )
2601 # Admin access required for creating Affinity rules
2602 client = self.connect_as_admin()
2603
2604 if not client:
2605 raise vimconn.VimConnConnectionException(
2606 "Failed to connect vCD as admin"
2607 )
2608 else:
2609 self.client = client
2610
2611 if self.client:
2612 headers = {
2613 "Accept": "application/*+xml;version=27.0",
2614 "x-vcloud-authorization": self.client._session.headers[
2615 "x-vcloud-authorization"
2616 ],
2617 }
2618
2619 # Step1: Get provider vdc details from organization
2620 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2621 if pvdc_href is not None:
2622 # Step2: Found required pvdc, now get resource pool information
2623 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2624 if respool_href is None:
2625 # Raise error if respool_href not found
2626 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2627 pvdc_href
2628 )
2629 self.log_message(msg)
2630
2631 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2632 # get availability Zone
2633 vm_az = self.get_vm_availability_zone(
2634 availability_zone_index, availability_zone_list
2635 )
2636
2637 # check if provided av zone(hostGroup) is present in vCD VIM
2638 status = self.check_availibility_zone(vm_az, respool_href, headers)
2639 if status is False:
2640 msg = (
2641 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2642 "resource pool {} status: {}"
2643 ).format(vm_az, respool_href, status)
2644 self.log_message(msg)
2645 else:
2646 self.logger.debug(
2647 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2648 )
2649
2650 # Step4: Find VM group references to create vm group
2651 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2652 if vmgrp_href is None:
2653 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2654 self.log_message(msg)
2655
2656 # Step5: Create a VmGroup with name az_VmGroup
2657 vmgrp_name = (
2658 vm_az + "_" + name
2659 ) # Formed VM Group name = Host Group name + VM name
2660 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2661 if status is not True:
2662 msg = "new_vminstance(): Error in creating VM group {}".format(
2663 vmgrp_name
2664 )
2665 self.log_message(msg)
2666
2667 # VM Group url to add vms to vm group
2668 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2669
2670 # Step6: Add VM to VM Group
2671 # Find VM uuid from vapp_uuid
2672 vm_details = self.get_vapp_details_rest(vapp_uuid)
2673 vm_uuid = vm_details["vmuuid"]
2674
2675 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2676 if status is not True:
2677 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2678 vmgrp_name
2679 )
2680 self.log_message(msg)
2681
2682 # Step7: Create VM to Host affinity rule
2683 addrule_href = self.get_add_rule_reference(respool_href, headers)
2684 if addrule_href is None:
2685 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2686 respool_href
2687 )
2688 self.log_message(msg)
2689
2690 status = self.create_vm_to_host_affinity_rule(
2691 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2692 )
2693 if status is False:
2694 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2695 name, vm_az
2696 )
2697 self.log_message(msg)
2698 else:
2699 self.logger.debug(
2700 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2701 name, vm_az
2702 )
2703 )
2704 # Reset token to a normal user to perform other operations
2705 self.get_token()
2706
2707 if vapp_uuid is not None:
2708 return vapp_uuid, None
2709 else:
2710 raise vimconn.VimConnUnexpectedResponse(
2711 "new_vminstance(): Failed create new vm instance {}".format(name)
2712 )
2713
2714 def create_config_drive_iso(self, user_data):
2715 tmpdir = tempfile.mkdtemp()
2716 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2717 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2718 os.makedirs(latest_dir)
2719 with open(
2720 os.path.join(latest_dir, "meta_data.json"), "w"
2721 ) as meta_file_obj, open(
2722 os.path.join(latest_dir, "user_data"), "w"
2723 ) as userdata_file_obj:
2724 userdata_file_obj.write(user_data)
2725 meta_file_obj.write(
2726 json.dumps(
2727 {
2728 "availability_zone": "nova",
2729 "launch_index": 0,
2730 "name": "ConfigDrive",
2731 "uuid": str(uuid.uuid4()),
2732 }
2733 )
2734 )
2735 genisoimage_cmd = (
2736 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2737 iso_path=iso_path, source_dir_path=tmpdir
2738 )
2739 )
2740 self.logger.info(
2741 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2742 genisoimage_cmd
2743 )
2744 )
2745
2746 try:
2747 FNULL = open(os.devnull, "w")
2748 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2749 except subprocess.CalledProcessError as e:
2750 shutil.rmtree(tmpdir, ignore_errors=True)
2751 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2752 e
2753 )
2754 self.logger.error(error_msg)
2755 raise Exception(error_msg)
2756
2757 return iso_path
2758
2759 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2760 if not os.path.isfile(iso_file_path):
2761 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2762 iso_file_path
2763 )
2764 self.logger.error(error_msg)
2765 raise Exception(error_msg)
2766
2767 iso_file_stat = os.stat(iso_file_path)
2768 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2769 <Media
2770 xmlns="http://www.vmware.com/vcloud/v1.5"
2771 name="{iso_name}"
2772 size="{iso_size}"
2773 imageType="iso">
2774 <Description>ISO image for config-drive</Description>
2775 </Media>""".format(
2776 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2777 )
2778 headers = {
2779 "Accept": "application/*+xml;version=" + API_VERSION,
2780 "x-vcloud-authorization": self.client._session.headers[
2781 "x-vcloud-authorization"
2782 ],
2783 }
2784 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2785 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2786 response = self.perform_request(
2787 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2788 )
2789
2790 if response.status_code != 201:
2791 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2792 catalog_href
2793 )
2794 self.logger.error(error_msg)
2795 raise Exception(error_msg)
2796
2797 catalogItem = XmlElementTree.fromstring(response.text)
2798 entity = [
2799 child
2800 for child in catalogItem
2801 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2802 ][0]
2803 entity_href = entity.get("href")
2804
2805 response = self.perform_request(
2806 req_type="GET", url=entity_href, headers=headers
2807 )
2808 if response.status_code != 200:
2809 raise Exception(
2810 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2811 entity_href
2812 )
2813 )
2814
2815 match = re.search(
2816 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2817 response.text,
2818 re.DOTALL,
2819 )
2820 if match:
2821 media_upload_href = match.group(1)
2822 else:
2823 raise Exception(
2824 "Could not parse the upload URL for the media file from the last response"
2825 )
2826 upload_iso_task = self.get_task_from_response(response.text)
2827 headers["Content-Type"] = "application/octet-stream"
2828 response = self.perform_request(
2829 req_type="PUT",
2830 url=media_upload_href,
2831 headers=headers,
2832 data=open(iso_file_path, "rb"),
2833 )
2834
2835 if response.status_code != 200:
2836 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2837
2838 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2839 if result.get("status") != "success":
2840 raise Exception(
2841 "The upload iso task failed with status {}".format(result.get("status"))
2842 )
2843
2844 def get_vcd_availibility_zones(self, respool_href, headers):
2845 """Method to find presence of av zone is VIM resource pool
2846
2847 Args:
2848 respool_href - resource pool href
2849 headers - header information
2850
2851 Returns:
2852 vcd_az - list of azone present in vCD
2853 """
2854 vcd_az = []
2855 url = respool_href
2856 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2857
2858 if resp.status_code != requests.codes.ok:
2859 self.logger.debug(
2860 "REST API call {} failed. Return status code {}".format(
2861 url, resp.status_code
2862 )
2863 )
2864 else:
2865 # Get the href to hostGroups and find provided hostGroup is present in it
2866 resp_xml = XmlElementTree.fromstring(resp.content)
2867 for child in resp_xml:
2868 if "VMWProviderVdcResourcePool" in child.tag:
2869 for schild in child:
2870 if "Link" in schild.tag:
2871 if (
2872 schild.attrib.get("type")
2873 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
2874 ):
2875 hostGroup = schild.attrib.get("href")
2876 hg_resp = self.perform_request(
2877 req_type="GET", url=hostGroup, headers=headers
2878 )
2879
2880 if hg_resp.status_code != requests.codes.ok:
2881 self.logger.debug(
2882 "REST API call {} failed. Return status code {}".format(
2883 hostGroup, hg_resp.status_code
2884 )
2885 )
2886 else:
2887 hg_resp_xml = XmlElementTree.fromstring(
2888 hg_resp.content
2889 )
2890 for hostGroup in hg_resp_xml:
2891 if "HostGroup" in hostGroup.tag:
2892 # append host group name to the list
2893 vcd_az.append(hostGroup.attrib.get("name"))
2894
2895 return vcd_az
2896
2897 def set_availability_zones(self):
2898 """
2899 Set vim availability zone
2900 """
2901 vim_availability_zones = None
2902 availability_zone = None
2903
2904 if "availability_zone" in self.config:
2905 vim_availability_zones = self.config.get("availability_zone")
2906
2907 if isinstance(vim_availability_zones, str):
2908 availability_zone = [vim_availability_zones]
2909 elif isinstance(vim_availability_zones, list):
2910 availability_zone = vim_availability_zones
2911 else:
2912 return availability_zone
2913
2914 return availability_zone
2915
2916 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2917 """
2918 Return the availability zone to be used by the created VM.
2919 returns: The VIM availability zone to be used or None
2920 """
2921 if availability_zone_index is None:
2922 if not self.config.get("availability_zone"):
2923 return None
2924 elif isinstance(self.config.get("availability_zone"), str):
2925 return self.config["availability_zone"]
2926 else:
2927 return self.config["availability_zone"][0]
2928
2929 vim_availability_zones = self.availability_zone
2930
2931 # check if VIM offer enough availability zones describe in the VNFD
2932 if vim_availability_zones and len(availability_zone_list) <= len(
2933 vim_availability_zones
2934 ):
2935 # check if all the names of NFV AV match VIM AV names
2936 match_by_index = False
2937 for av in availability_zone_list:
2938 if av not in vim_availability_zones:
2939 match_by_index = True
2940 break
2941
2942 if match_by_index:
2943 self.logger.debug(
2944 "Required Availability zone or Host Group not found in VIM config"
2945 )
2946 self.logger.debug(
2947 "Input Availability zone list: {}".format(availability_zone_list)
2948 )
2949 self.logger.debug(
2950 "VIM configured Availability zones: {}".format(
2951 vim_availability_zones
2952 )
2953 )
2954 self.logger.debug("VIM Availability zones will be used by index")
2955 return vim_availability_zones[availability_zone_index]
2956 else:
2957 return availability_zone_list[availability_zone_index]
2958 else:
2959 raise vimconn.VimConnConflictException(
2960 "No enough availability zones at VIM for this deployment"
2961 )
2962
2963 def create_vm_to_host_affinity_rule(
2964 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2965 ):
2966 """Method to create VM to Host Affinity rule in vCD
2967
2968 Args:
2969 addrule_href - href to make a POST request
2970 vmgrpname - name of the VM group created
2971 hostgrpnmae - name of the host group created earlier
2972 polarity - Affinity or Anti-affinity (default: Affinity)
2973 headers - headers to make REST call
2974
2975 Returns:
2976 True- if rule is created
2977 False- Failed to create rule due to some error
2978
2979 """
2980 task_status = False
2981 rule_name = polarity + "_" + vmgrpname
2982 payload = """<?xml version="1.0" encoding="UTF-8"?>
2983 <vmext:VMWVmHostAffinityRule
2984 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2985 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2986 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2987 <vcloud:Name>{}</vcloud:Name>
2988 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2989 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2990 <vcloud:Polarity>{}</vcloud:Polarity>
2991 <vmext:HostGroupName>{}</vmext:HostGroupName>
2992 <vmext:VmGroupName>{}</vmext:VmGroupName>
2993 </vmext:VMWVmHostAffinityRule>""".format(
2994 rule_name, polarity, hostgrpname, vmgrpname
2995 )
2996
2997 resp = self.perform_request(
2998 req_type="POST", url=addrule_href, headers=headers, data=payload
2999 )
3000
3001 if resp.status_code != requests.codes.accepted:
3002 self.logger.debug(
3003 "REST API call {} failed. Return status code {}".format(
3004 addrule_href, resp.status_code
3005 )
3006 )
3007 task_status = False
3008
3009 return task_status
3010 else:
3011 affinity_task = self.get_task_from_response(resp.content)
3012 self.logger.debug("affinity_task: {}".format(affinity_task))
3013
3014 if affinity_task is None or affinity_task is False:
3015 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
3016 # wait for task to complete
3017 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
3018
3019 if result.get("status") == "success":
3020 self.logger.debug(
3021 "Successfully created affinity rule {}".format(rule_name)
3022 )
3023 return True
3024 else:
3025 raise vimconn.VimConnUnexpectedResponse(
3026 "failed to create affinity rule {}".format(rule_name)
3027 )
3028
3029 def get_add_rule_reference(self, respool_href, headers):
3030 """This method finds href to add vm to host affinity rule to vCD
3031
3032 Args:
3033 respool_href- href to resource pool
3034 headers- header information to make REST call
3035
3036 Returns:
3037 None - if no valid href to add rule found or
3038 addrule_href - href to add vm to host affinity rule of resource pool
3039 """
3040 addrule_href = None
3041 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3042
3043 if resp.status_code != requests.codes.ok:
3044 self.logger.debug(
3045 "REST API call {} failed. Return status code {}".format(
3046 respool_href, resp.status_code
3047 )
3048 )
3049 else:
3050 resp_xml = XmlElementTree.fromstring(resp.content)
3051 for child in resp_xml:
3052 if "VMWProviderVdcResourcePool" in child.tag:
3053 for schild in child:
3054 if "Link" in schild.tag:
3055 if (
3056 schild.attrib.get("type")
3057 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
3058 and schild.attrib.get("rel") == "add"
3059 ):
3060 addrule_href = schild.attrib.get("href")
3061 break
3062
3063 return addrule_href
3064
3065 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
3066 """Method to add deployed VM to newly created VM Group.
3067 This is required to create VM to Host affinity in vCD
3068
3069 Args:
3070 vm_uuid- newly created vm uuid
3071 vmGroupNameURL- URL to VM Group name
3072 vmGroup_name- Name of VM group created
3073 headers- Headers for REST request
3074
3075 Returns:
3076 True- if VM added to VM group successfully
3077 False- if any error encounter
3078 """
3079 addvm_resp = self.perform_request(
3080 req_type="GET", url=vmGroupNameURL, headers=headers
3081 ) # , data=payload)
3082
3083 if addvm_resp.status_code != requests.codes.ok:
3084 self.logger.debug(
3085 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
3086 vmGroupNameURL, addvm_resp.status_code
3087 )
3088 )
3089 return False
3090 else:
3091 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
3092 for child in resp_xml:
3093 if child.tag.split("}")[1] == "Link":
3094 if child.attrib.get("rel") == "addVms":
3095 addvmtogrpURL = child.attrib.get("href")
3096
3097 # Get vm details
3098 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
3099 vmdetailsURL = "".join(url_list)
3100
3101 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
3102
3103 if resp.status_code != requests.codes.ok:
3104 self.logger.debug(
3105 "REST API call {} failed. Return status code {}".format(
3106 vmdetailsURL, resp.status_code
3107 )
3108 )
3109 return False
3110
3111 # Parse VM details
3112 resp_xml = XmlElementTree.fromstring(resp.content)
3113 if resp_xml.tag.split("}")[1] == "Vm":
3114 vm_id = resp_xml.attrib.get("id")
3115 vm_name = resp_xml.attrib.get("name")
3116 vm_href = resp_xml.attrib.get("href")
3117 # print vm_id, vm_name, vm_href
3118
3119 # Add VM into VMgroup
3120 payload = """<?xml version="1.0" encoding="UTF-8"?>\
3121 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
3122 xmlns="http://www.vmware.com/vcloud/versions" \
3123 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
3124 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
3125 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
3126 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
3127 xmlns:ns7="http://www.vmware.com/schema/ovf" \
3128 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
3129 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
3130 <ns2:VmReference href="{}" id="{}" name="{}" \
3131 type="application/vnd.vmware.vcloud.vm+xml" />\
3132 </ns2:Vms>""".format(
3133 vm_href, vm_id, vm_name
3134 )
3135
3136 addvmtogrp_resp = self.perform_request(
3137 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
3138 )
3139
3140 if addvmtogrp_resp.status_code != requests.codes.accepted:
3141 self.logger.debug(
3142 "REST API call {} failed. Return status code {}".format(
3143 addvmtogrpURL, addvmtogrp_resp.status_code
3144 )
3145 )
3146
3147 return False
3148 else:
3149 self.logger.debug(
3150 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
3151 )
3152
3153 return True
3154
3155 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
3156 """Method to create a VM group in vCD
3157
3158 Args:
3159 vmgroup_name : Name of VM group to be created
3160 vmgroup_href : href for vmgroup
3161 headers- Headers for REST request
3162 """
3163 # POST to add URL with required data
3164 vmgroup_status = False
3165 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
3166 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
3167 <vmCount>1</vmCount>\
3168 </VMWVmGroup>""".format(
3169 vmgroup_name
3170 )
3171 resp = self.perform_request(
3172 req_type="POST", url=vmgroup_href, headers=headers, data=payload
3173 )
3174
3175 if resp.status_code != requests.codes.accepted:
3176 self.logger.debug(
3177 "REST API call {} failed. Return status code {}".format(
3178 vmgroup_href, resp.status_code
3179 )
3180 )
3181
3182 return vmgroup_status
3183 else:
3184 vmgroup_task = self.get_task_from_response(resp.content)
3185 if vmgroup_task is None or vmgroup_task is False:
3186 raise vimconn.VimConnUnexpectedResponse(
3187 "create_vmgroup(): failed to create VM group {}".format(
3188 vmgroup_name
3189 )
3190 )
3191
3192 # wait for task to complete
3193 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
3194
3195 if result.get("status") == "success":
3196 self.logger.debug(
3197 "create_vmgroup(): Successfully created VM group {}".format(
3198 vmgroup_name
3199 )
3200 )
3201 # time.sleep(10)
3202 vmgroup_status = True
3203
3204 return vmgroup_status
3205 else:
3206 raise vimconn.VimConnUnexpectedResponse(
3207 "create_vmgroup(): failed to create VM group {}".format(
3208 vmgroup_name
3209 )
3210 )
3211
3212 def find_vmgroup_reference(self, url, headers):
3213 """Method to create a new VMGroup which is required to add created VM
3214 Args:
3215 url- resource pool href
3216 headers- header information
3217
3218 Returns:
3219 returns href to VM group to create VM group
3220 """
3221 # Perform GET on resource pool to find 'add' link to create VMGroup
3222 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
3223 vmgrp_href = None
3224 resp = self.perform_request(req_type="GET", url=url, headers=headers)
3225
3226 if resp.status_code != requests.codes.ok:
3227 self.logger.debug(
3228 "REST API call {} failed. Return status code {}".format(
3229 url, resp.status_code
3230 )
3231 )
3232 else:
3233 # Get the href to add vmGroup to vCD
3234 resp_xml = XmlElementTree.fromstring(resp.content)
3235 for child in resp_xml:
3236 if "VMWProviderVdcResourcePool" in child.tag:
3237 for schild in child:
3238 if "Link" in schild.tag:
3239 # Find href with type VMGroup and rel with add
3240 if (
3241 schild.attrib.get("type")
3242 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3243 and schild.attrib.get("rel") == "add"
3244 ):
3245 vmgrp_href = schild.attrib.get("href")
3246
3247 return vmgrp_href
3248
3249 def check_availibility_zone(self, az, respool_href, headers):
3250 """Method to verify requested av zone is present or not in provided
3251 resource pool
3252
3253 Args:
3254 az - name of hostgroup (availibility_zone)
3255 respool_href - Resource Pool href
3256 headers - Headers to make REST call
3257 Returns:
3258 az_found - True if availibility_zone is found else False
3259 """
3260 az_found = False
3261 headers["Accept"] = "application/*+xml;version=27.0"
3262 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3263
3264 if resp.status_code != requests.codes.ok:
3265 self.logger.debug(
3266 "REST API call {} failed. Return status code {}".format(
3267 respool_href, resp.status_code
3268 )
3269 )
3270 else:
3271 # Get the href to hostGroups and find provided hostGroup is present in it
3272 resp_xml = XmlElementTree.fromstring(resp.content)
3273
3274 for child in resp_xml:
3275 if "VMWProviderVdcResourcePool" in child.tag:
3276 for schild in child:
3277 if "Link" in schild.tag:
3278 if (
3279 schild.attrib.get("type")
3280 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3281 ):
3282 hostGroup_href = schild.attrib.get("href")
3283 hg_resp = self.perform_request(
3284 req_type="GET", url=hostGroup_href, headers=headers
3285 )
3286
3287 if hg_resp.status_code != requests.codes.ok:
3288 self.logger.debug(
3289 "REST API call {} failed. Return status code {}".format(
3290 hostGroup_href, hg_resp.status_code
3291 )
3292 )
3293 else:
3294 hg_resp_xml = XmlElementTree.fromstring(
3295 hg_resp.content
3296 )
3297 for hostGroup in hg_resp_xml:
3298 if "HostGroup" in hostGroup.tag:
3299 if hostGroup.attrib.get("name") == az:
3300 az_found = True
3301 break
3302
3303 return az_found
3304
3305 def get_pvdc_for_org(self, org_vdc, headers):
3306 """This method gets provider vdc references from organisation
3307
3308 Args:
3309 org_vdc - name of the organisation VDC to find pvdc
3310 headers - headers to make REST call
3311
3312 Returns:
3313 None - if no pvdc href found else
3314 pvdc_href - href to pvdc
3315 """
3316 # Get provider VDC references from vCD
3317 pvdc_href = None
3318 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3319 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3320 url = "".join(url_list)
3321
3322 response = self.perform_request(req_type="GET", url=url, headers=headers)
3323 if response.status_code != requests.codes.ok:
3324 self.logger.debug(
3325 "REST API call {} failed. Return status code {}".format(
3326 url, response.status_code
3327 )
3328 )
3329 else:
3330 xmlroot_response = XmlElementTree.fromstring(response.text)
3331 for child in xmlroot_response:
3332 if "ProviderVdcReference" in child.tag:
3333 pvdc_href = child.attrib.get("href")
3334 # Get vdcReferences to find org
3335 pvdc_resp = self.perform_request(
3336 req_type="GET", url=pvdc_href, headers=headers
3337 )
3338
3339 if pvdc_resp.status_code != requests.codes.ok:
3340 raise vimconn.VimConnException(
3341 "REST API call {} failed. "
3342 "Return status code {}".format(url, pvdc_resp.status_code)
3343 )
3344
3345 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3346 for child in pvdc_resp_xml:
3347 if "Link" in child.tag:
3348 if (
3349 child.attrib.get("type")
3350 == "application/vnd.vmware.admin.vdcReferences+xml"
3351 ):
3352 vdc_href = child.attrib.get("href")
3353
3354 # Check if provided org is present in vdc
3355 vdc_resp = self.perform_request(
3356 req_type="GET", url=vdc_href, headers=headers
3357 )
3358
3359 if vdc_resp.status_code != requests.codes.ok:
3360 raise vimconn.VimConnException(
3361 "REST API call {} failed. "
3362 "Return status code {}".format(
3363 url, vdc_resp.status_code
3364 )
3365 )
3366 vdc_resp_xml = XmlElementTree.fromstring(
3367 vdc_resp.content
3368 )
3369
3370 for child in vdc_resp_xml:
3371 if "VdcReference" in child.tag:
3372 if child.attrib.get("name") == org_vdc:
3373 return pvdc_href
3374
3375 def get_resource_pool_details(self, pvdc_href, headers):
3376 """Method to get resource pool information.
3377 Host groups are property of resource group.
3378 To get host groups, we need to GET details of resource pool.
3379
3380 Args:
3381 pvdc_href: href to pvdc details
3382 headers: headers
3383
3384 Returns:
3385 respool_href - Returns href link reference to resource pool
3386 """
3387 respool_href = None
3388 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3389
3390 if resp.status_code != requests.codes.ok:
3391 self.logger.debug(
3392 "REST API call {} failed. Return status code {}".format(
3393 pvdc_href, resp.status_code
3394 )
3395 )
3396 else:
3397 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3398 for child in respool_resp_xml:
3399 if "Link" in child.tag:
3400 if (
3401 child.attrib.get("type")
3402 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3403 ):
3404 respool_href = child.attrib.get("href")
3405 break
3406
3407 return respool_href
3408
3409 def log_message(self, msg):
3410 """
3411 Method to log error messages related to Affinity rule creation
3412 in new_vminstance & raise Exception
3413 Args :
3414 msg - Error message to be logged
3415
3416 """
3417 # get token to connect vCD as a normal user
3418 self.get_token()
3419 self.logger.debug(msg)
3420
3421 raise vimconn.VimConnException(msg)
3422
3423 # #
3424 # #
3425 # # based on current discussion
3426 # #
3427 # #
3428 # # server:
3429 # created: '2016-09-08T11:51:58'
3430 # description: simple-instance.linux1.1
3431 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
3432 # hostId: e836c036-74e7-11e6-b249-0800273e724c
3433 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
3434 # status: ACTIVE
3435 # error_msg:
3436 # interfaces: …
3437 #
3438 def get_vminstance(self, vim_vm_uuid=None):
3439 """Returns the VM instance information from VIM"""
3440 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3441
3442 _, vdc = self.get_vdc_details()
3443 if vdc is None:
3444 raise vimconn.VimConnConnectionException(
3445 "Failed to get a reference of VDC for a tenant {}".format(
3446 self.tenant_name
3447 )
3448 )
3449
3450 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3451 if not vm_info_dict:
3452 self.logger.debug(
3453 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3454 vim_vm_uuid
3455 )
3456 )
3457 raise vimconn.VimConnNotFoundException(
3458 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3459 )
3460
3461 status_key = vm_info_dict["status"]
3462 error = ""
3463 try:
3464 vm_dict = {
3465 "created": vm_info_dict["created"],
3466 "description": vm_info_dict["name"],
3467 "status": vcdStatusCode2manoFormat[int(status_key)],
3468 "hostId": vm_info_dict["vmuuid"],
3469 "error_msg": error,
3470 "vim_info": yaml.safe_dump(vm_info_dict),
3471 "interfaces": [],
3472 }
3473
3474 if "interfaces" in vm_info_dict:
3475 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3476 else:
3477 vm_dict["interfaces"] = []
3478 except KeyError:
3479 vm_dict = {
3480 "created": "",
3481 "description": "",
3482 "status": vcdStatusCode2manoFormat[int(-1)],
3483 "hostId": vm_info_dict["vmuuid"],
3484 "error_msg": "Inconsistency state",
3485 "vim_info": yaml.safe_dump(vm_info_dict),
3486 "interfaces": [],
3487 }
3488
3489 return vm_dict
3490
3491 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
3492 """Method poweroff and remove VM instance from vcloud director network.
3493
3494 Args:
3495 vm_id: VM UUID
3496
3497 Returns:
3498 Returns the instance identifier
3499 """
3500 self.logger.debug("Client requesting delete vm instance {} ".format(vm_id))
3501
3502 _, vdc = self.get_vdc_details()
3503 vdc_obj = VDC(self.client, href=vdc.get("href"))
3504 if vdc_obj is None:
3505 self.logger.debug(
3506 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3507 self.tenant_name
3508 )
3509 )
3510 raise vimconn.VimConnException(
3511 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3512 self.tenant_name
3513 )
3514 )
3515
3516 try:
3517 vapp_name = self.get_namebyvappid(vm_id)
3518 if vapp_name is None:
3519 self.logger.debug(
3520 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3521 vm_id
3522 )
3523 )
3524
3525 return (
3526 -1,
3527 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3528 vm_id
3529 ),
3530 )
3531
3532 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm_id))
3533 vapp_resource = vdc_obj.get_vapp(vapp_name)
3534 vapp = VApp(self.client, resource=vapp_resource)
3535
3536 # Delete vApp and wait for status change if task executed and vApp is None.
3537 if vapp:
3538 if vapp_resource.get("deployed") == "true":
3539 self.logger.info("Powering off vApp {}".format(vapp_name))
3540 # Power off vApp
3541 powered_off = False
3542 wait_time = 0
3543
3544 while wait_time <= MAX_WAIT_TIME:
3545 power_off_task = vapp.power_off()
3546 result = self.client.get_task_monitor().wait_for_success(
3547 task=power_off_task
3548 )
3549
3550 if result.get("status") == "success":
3551 powered_off = True
3552 break
3553 else:
3554 self.logger.info(
3555 "Wait for vApp {} to power off".format(vapp_name)
3556 )
3557 time.sleep(INTERVAL_TIME)
3558
3559 wait_time += INTERVAL_TIME
3560
3561 if not powered_off:
3562 self.logger.debug(
3563 "delete_vminstance(): Failed to power off VM instance {} ".format(
3564 vm_id
3565 )
3566 )
3567 else:
3568 self.logger.info(
3569 "delete_vminstance(): Powered off VM instance {} ".format(
3570 vm_id
3571 )
3572 )
3573
3574 # Undeploy vApp
3575 self.logger.info("Undeploy vApp {}".format(vapp_name))
3576 wait_time = 0
3577 undeployed = False
3578 while wait_time <= MAX_WAIT_TIME:
3579 vapp = VApp(self.client, resource=vapp_resource)
3580 if not vapp:
3581 self.logger.debug(
3582 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3583 vm_id
3584 )
3585 )
3586
3587 return (
3588 -1,
3589 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3590 vm_id
3591 ),
3592 )
3593
3594 undeploy_task = vapp.undeploy()
3595 result = self.client.get_task_monitor().wait_for_success(
3596 task=undeploy_task
3597 )
3598
3599 if result.get("status") == "success":
3600 undeployed = True
3601 break
3602 else:
3603 self.logger.debug(
3604 "Wait for vApp {} to undeploy".format(vapp_name)
3605 )
3606 time.sleep(INTERVAL_TIME)
3607
3608 wait_time += INTERVAL_TIME
3609
3610 if not undeployed:
3611 self.logger.debug(
3612 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3613 vm_id
3614 )
3615 )
3616
3617 # delete vapp
3618 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3619 if vapp is not None:
3620 wait_time = 0
3621 result = False
3622
3623 while wait_time <= MAX_WAIT_TIME:
3624 vapp = VApp(self.client, resource=vapp_resource)
3625 if not vapp:
3626 self.logger.debug(
3627 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3628 vm_id
3629 )
3630 )
3631
3632 return (
3633 -1,
3634 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3635 vm_id
3636 ),
3637 )
3638
3639 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3640 result = self.client.get_task_monitor().wait_for_success(
3641 task=delete_task
3642 )
3643 if result.get("status") == "success":
3644 break
3645 else:
3646 self.logger.debug(
3647 "Wait for vApp {} to delete".format(vapp_name)
3648 )
3649 time.sleep(INTERVAL_TIME)
3650
3651 wait_time += INTERVAL_TIME
3652
3653 if result is None:
3654 self.logger.debug(
3655 "delete_vminstance(): Failed delete uuid {} ".format(vm_id)
3656 )
3657 else:
3658 self.logger.info(
3659 "Deleted vm instance {} successfully".format(vm_id)
3660 )
3661 config_drive_catalog_name, config_drive_catalog_id = (
3662 "cfg_drv-" + vm_id,
3663 None,
3664 )
3665 catalog_list = self.get_image_list()
3666
3667 try:
3668 config_drive_catalog_id = [
3669 catalog_["id"]
3670 for catalog_ in catalog_list
3671 if catalog_["name"] == config_drive_catalog_name
3672 ][0]
3673 except IndexError:
3674 pass
3675
3676 if config_drive_catalog_id:
3677 self.logger.debug(
3678 "delete_vminstance(): Found a config drive catalog {} matching "
3679 'vapp_name"{}". Deleting it.'.format(
3680 config_drive_catalog_id, vapp_name
3681 )
3682 )
3683 self.delete_image(config_drive_catalog_id)
3684
3685 return vm_id
3686 except Exception:
3687 self.logger.debug(traceback.format_exc())
3688
3689 raise vimconn.VimConnException(
3690 "delete_vminstance(): Failed delete vm instance {}".format(vm_id)
3691 )
3692
3693 def refresh_vms_status(self, vm_list):
3694 """Get the status of the virtual machines and their interfaces/ports
3695 Params: the list of VM identifiers
3696 Returns a dictionary with:
3697 vm_id: #VIM id of this Virtual Machine
3698 status: #Mandatory. Text with one of:
3699 # DELETED (not found at vim)
3700 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3701 # OTHER (Vim reported other status not understood)
3702 # ERROR (VIM indicates an ERROR status)
3703 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3704 # CREATING (on building process), ERROR
3705 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3706 #
3707 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3708 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3709 interfaces:
3710 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3711 mac_address: #Text format XX:XX:XX:XX:XX:XX
3712 vim_net_id: #network id where this interface is connected
3713 vim_interface_id: #interface/port VIM id
3714 ip_address: #null, or text with IPv4, IPv6 address
3715 """
3716 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3717
3718 _, vdc = self.get_vdc_details()
3719 if vdc is None:
3720 raise vimconn.VimConnException(
3721 "Failed to get a reference of VDC for a tenant {}".format(
3722 self.tenant_name
3723 )
3724 )
3725
3726 vms_dict = {}
3727 nsx_edge_list = []
3728 for vmuuid in vm_list:
3729 vapp_name = self.get_namebyvappid(vmuuid)
3730 if vapp_name is not None:
3731 try:
3732 vm_pci_details = self.get_vm_pci_details(vmuuid)
3733 vdc_obj = VDC(self.client, href=vdc.get("href"))
3734 vapp_resource = vdc_obj.get_vapp(vapp_name)
3735 the_vapp = VApp(self.client, resource=vapp_resource)
3736
3737 vm_details = {}
3738 for vm in the_vapp.get_all_vms():
3739 headers = {
3740 "Accept": "application/*+xml;version=" + API_VERSION,
3741 "x-vcloud-authorization": self.client._session.headers[
3742 "x-vcloud-authorization"
3743 ],
3744 }
3745 response = self.perform_request(
3746 req_type="GET", url=vm.get("href"), headers=headers
3747 )
3748
3749 if response.status_code != 200:
3750 self.logger.error(
3751 "refresh_vms_status : REST call {} failed reason : {}"
3752 "status code : {}".format(
3753 vm.get("href"), response.text, response.status_code
3754 )
3755 )
3756 raise vimconn.VimConnException(
3757 "refresh_vms_status : Failed to get VM details"
3758 )
3759
3760 xmlroot = XmlElementTree.fromstring(response.text)
3761 result = response.text.replace("\n", " ")
3762 hdd_match = re.search(
3763 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3764 result,
3765 )
3766
3767 if hdd_match:
3768 hdd_mb = hdd_match.group(1)
3769 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3770
3771 cpus_match = re.search(
3772 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3773 result,
3774 )
3775
3776 if cpus_match:
3777 cpus = cpus_match.group(1)
3778 vm_details["cpus"] = int(cpus) if cpus else None
3779
3780 memory_mb = re.search(
3781 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3782 result,
3783 ).group(1)
3784 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3785 vm_details["status"] = vcdStatusCode2manoFormat[
3786 int(xmlroot.get("status"))
3787 ]
3788 vm_details["id"] = xmlroot.get("id")
3789 vm_details["name"] = xmlroot.get("name")
3790 vm_info = [vm_details]
3791
3792 if vm_pci_details:
3793 vm_info[0].update(vm_pci_details)
3794
3795 vm_dict = {
3796 "status": vcdStatusCode2manoFormat[
3797 int(vapp_resource.get("status"))
3798 ],
3799 "error_msg": vcdStatusCode2manoFormat[
3800 int(vapp_resource.get("status"))
3801 ],
3802 "vim_info": yaml.safe_dump(vm_info),
3803 "interfaces": [],
3804 }
3805
3806 # get networks
3807 vm_ip = None
3808 vm_mac = None
3809 networks = re.findall(
3810 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3811 result,
3812 )
3813
3814 for network in networks:
3815 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3816 vm_mac = mac_s.group(1) if mac_s else None
3817 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3818 vm_ip = ip_s.group(1) if ip_s else None
3819
3820 if vm_ip is None:
3821 if not nsx_edge_list:
3822 nsx_edge_list = self.get_edge_details()
3823 if nsx_edge_list is None:
3824 raise vimconn.VimConnException(
3825 "refresh_vms_status:"
3826 "Failed to get edge details from NSX Manager"
3827 )
3828
3829 if vm_mac is not None:
3830 vm_ip = self.get_ipaddr_from_NSXedge(
3831 nsx_edge_list, vm_mac
3832 )
3833
3834 net_s = re.search('network="(.*?)"', network)
3835 network_name = net_s.group(1) if net_s else None
3836 vm_net_id = self.get_network_id_by_name(network_name)
3837 interface = {
3838 "mac_address": vm_mac,
3839 "vim_net_id": vm_net_id,
3840 "vim_interface_id": vm_net_id,
3841 "ip_address": vm_ip,
3842 }
3843 vm_dict["interfaces"].append(interface)
3844
3845 # add a vm to vm dict
3846 vms_dict.setdefault(vmuuid, vm_dict)
3847 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3848 except Exception as exp:
3849 self.logger.debug("Error in response {}".format(exp))
3850 self.logger.debug(traceback.format_exc())
3851
3852 return vms_dict
3853
3854 def get_edge_details(self):
3855 """Get the NSX edge list from NSX Manager
3856 Returns list of NSX edges
3857 """
3858 edge_list = []
3859 rheaders = {"Content-Type": "application/xml"}
3860 nsx_api_url = "/api/4.0/edges"
3861
3862 self.logger.debug(
3863 "Get edge details from NSX Manager {} {}".format(
3864 self.nsx_manager, nsx_api_url
3865 )
3866 )
3867
3868 try:
3869 resp = requests.get(
3870 self.nsx_manager + nsx_api_url,
3871 auth=(self.nsx_user, self.nsx_password),
3872 verify=False,
3873 headers=rheaders,
3874 )
3875 if resp.status_code == requests.codes.ok:
3876 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3877 for edge_pages in paged_Edge_List:
3878 if edge_pages.tag == "edgePage":
3879 for edge_summary in edge_pages:
3880 if edge_summary.tag == "pagingInfo":
3881 for element in edge_summary:
3882 if (
3883 element.tag == "totalCount"
3884 and element.text == "0"
3885 ):
3886 raise vimconn.VimConnException(
3887 "get_edge_details: No NSX edges details found: {}".format(
3888 self.nsx_manager
3889 )
3890 )
3891
3892 if edge_summary.tag == "edgeSummary":
3893 for element in edge_summary:
3894 if element.tag == "id":
3895 edge_list.append(element.text)
3896 else:
3897 raise vimconn.VimConnException(
3898 "get_edge_details: No NSX edge details found: {}".format(
3899 self.nsx_manager
3900 )
3901 )
3902
3903 if not edge_list:
3904 raise vimconn.VimConnException(
3905 "get_edge_details: "
3906 "No NSX edge details found: {}".format(self.nsx_manager)
3907 )
3908 else:
3909 self.logger.debug(
3910 "get_edge_details: Found NSX edges {}".format(edge_list)
3911 )
3912
3913 return edge_list
3914 else:
3915 self.logger.debug(
3916 "get_edge_details: "
3917 "Failed to get NSX edge details from NSX Manager: {}".format(
3918 resp.content
3919 )
3920 )
3921
3922 return None
3923
3924 except Exception as exp:
3925 self.logger.debug(
3926 "get_edge_details: "
3927 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3928 )
3929 raise vimconn.VimConnException(
3930 "get_edge_details: "
3931 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3932 )
3933
3934 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3935 """Get IP address details from NSX edges, using the MAC address
3936 PARAMS: nsx_edges : List of NSX edges
3937 mac_address : Find IP address corresponding to this MAC address
3938 Returns: IP address corrresponding to the provided MAC address
3939 """
3940 ip_addr = None
3941 rheaders = {"Content-Type": "application/xml"}
3942
3943 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3944
3945 try:
3946 for edge in nsx_edges:
3947 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3948
3949 resp = requests.get(
3950 self.nsx_manager + nsx_api_url,
3951 auth=(self.nsx_user, self.nsx_password),
3952 verify=False,
3953 headers=rheaders,
3954 )
3955
3956 if resp.status_code == requests.codes.ok:
3957 dhcp_leases = XmlElementTree.fromstring(resp.text)
3958 for child in dhcp_leases:
3959 if child.tag == "dhcpLeaseInfo":
3960 dhcpLeaseInfo = child
3961 for leaseInfo in dhcpLeaseInfo:
3962 for elem in leaseInfo:
3963 if (elem.tag) == "macAddress":
3964 edge_mac_addr = elem.text
3965
3966 if (elem.tag) == "ipAddress":
3967 ip_addr = elem.text
3968
3969 if edge_mac_addr is not None:
3970 if edge_mac_addr == mac_address:
3971 self.logger.debug(
3972 "Found ip addr {} for mac {} at NSX edge {}".format(
3973 ip_addr, mac_address, edge
3974 )
3975 )
3976
3977 return ip_addr
3978 else:
3979 self.logger.debug(
3980 "get_ipaddr_from_NSXedge: "
3981 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3982 resp.content
3983 )
3984 )
3985
3986 self.logger.debug(
3987 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3988 )
3989
3990 return None
3991
3992 except XmlElementTree.ParseError as Err:
3993 self.logger.debug(
3994 "ParseError in response from NSX Manager {}".format(Err.message),
3995 exc_info=True,
3996 )
3997
3998 def action_vminstance(self, vm_id=None, action_dict=None, created_items={}):
3999 """Send and action over a VM instance from VIM
4000 Returns the vm_id if the action was successfully sent to the VIM"""
4001
4002 self.logger.debug(
4003 "Received action for vm {} and action dict {}".format(vm_id, action_dict)
4004 )
4005
4006 if vm_id is None or action_dict is None:
4007 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
4008
4009 _, vdc = self.get_vdc_details()
4010 if vdc is None:
4011 raise vimconn.VimConnException(
4012 "Failed to get a reference of VDC for a tenant {}".format(
4013 self.tenant_name
4014 )
4015 )
4016
4017 vapp_name = self.get_namebyvappid(vm_id)
4018 if vapp_name is None:
4019 self.logger.debug(
4020 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
4021 vm_id
4022 )
4023 )
4024
4025 raise vimconn.VimConnException(
4026 "Failed to get vm by given {} vm uuid".format(vm_id)
4027 )
4028 else:
4029 self.logger.info(
4030 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm_id)
4031 )
4032
4033 try:
4034 vdc_obj = VDC(self.client, href=vdc.get("href"))
4035 vapp_resource = vdc_obj.get_vapp(vapp_name)
4036 vapp = VApp(self.client, resource=vapp_resource)
4037
4038 if "start" in action_dict:
4039 self.logger.info(
4040 "action_vminstance: Power on vApp: {}".format(vapp_name)
4041 )
4042 poweron_task = self.power_on_vapp(vm_id, vapp_name)
4043 result = self.client.get_task_monitor().wait_for_success(
4044 task=poweron_task
4045 )
4046 self.instance_actions_result("start", result, vapp_name)
4047 elif "rebuild" in action_dict:
4048 self.logger.info(
4049 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
4050 )
4051 rebuild_task = vapp.deploy(power_on=True)
4052 result = self.client.get_task_monitor().wait_for_success(
4053 task=rebuild_task
4054 )
4055 self.instance_actions_result("rebuild", result, vapp_name)
4056 elif "pause" in action_dict:
4057 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
4058 pause_task = vapp.undeploy(action="suspend")
4059 result = self.client.get_task_monitor().wait_for_success(
4060 task=pause_task
4061 )
4062 self.instance_actions_result("pause", result, vapp_name)
4063 elif "resume" in action_dict:
4064 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
4065 poweron_task = self.power_on_vapp(vm_id, vapp_name)
4066 result = self.client.get_task_monitor().wait_for_success(
4067 task=poweron_task
4068 )
4069 self.instance_actions_result("resume", result, vapp_name)
4070 elif "shutoff" in action_dict or "shutdown" in action_dict:
4071 action_name, _ = list(action_dict.items())[0]
4072 self.logger.info(
4073 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
4074 )
4075 shutdown_task = vapp.shutdown()
4076 result = self.client.get_task_monitor().wait_for_success(
4077 task=shutdown_task
4078 )
4079 if action_name == "shutdown":
4080 self.instance_actions_result("shutdown", result, vapp_name)
4081 else:
4082 self.instance_actions_result("shutoff", result, vapp_name)
4083 elif "forceOff" in action_dict:
4084 result = vapp.undeploy(action="powerOff")
4085 self.instance_actions_result("forceOff", result, vapp_name)
4086 elif "reboot" in action_dict:
4087 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
4088 reboot_task = vapp.reboot()
4089 self.client.get_task_monitor().wait_for_success(task=reboot_task)
4090 else:
4091 raise vimconn.VimConnException(
4092 "action_vminstance: Invalid action {} or action is None.".format(
4093 action_dict
4094 )
4095 )
4096
4097 return vm_id
4098 except Exception as exp:
4099 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
4100
4101 raise vimconn.VimConnException(
4102 "action_vminstance: Failed with Exception {}".format(exp)
4103 )
4104
4105 def instance_actions_result(self, action, result, vapp_name):
4106 if result.get("status") == "success":
4107 self.logger.info(
4108 "action_vminstance: Sucessfully {} the vApp: {}".format(
4109 action, vapp_name
4110 )
4111 )
4112 else:
4113 self.logger.error(
4114 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
4115 )
4116
4117 def get_vminstance_console(self, vm_id, console_type="novnc"):
4118 """
4119 Get a console for the virtual machine
4120 Params:
4121 vm_id: uuid of the VM
4122 console_type, can be:
4123 "novnc" (by default), "xvpvnc" for VNC types,
4124 "rdp-html5" for RDP types, "spice-html5" for SPICE types
4125 Returns dict with the console parameters:
4126 protocol: ssh, ftp, http, https, ...
4127 server: usually ip address
4128 port: the http, ssh, ... port
4129 suffix: extra text, e.g. the http path and query string
4130 """
4131 console_dict = {}
4132
4133 if console_type is None or console_type == "novnc":
4134 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
4135 self.url, vm_id
4136 )
4137 headers = {
4138 "Accept": "application/*+xml;version=" + API_VERSION,
4139 "x-vcloud-authorization": self.client._session.headers[
4140 "x-vcloud-authorization"
4141 ],
4142 }
4143 response = self.perform_request(
4144 req_type="POST", url=url_rest_call, headers=headers
4145 )
4146
4147 if response.status_code == 403:
4148 response = self.retry_rest("GET", url_rest_call)
4149
4150 if response.status_code != 200:
4151 self.logger.error(
4152 "REST call {} failed reason : {}"
4153 "status code : {}".format(
4154 url_rest_call, response.text, response.status_code
4155 )
4156 )
4157 raise vimconn.VimConnException(
4158 "get_vminstance_console : Failed to get " "VM Mks ticket details"
4159 )
4160
4161 s = re.search("<Host>(.*?)</Host>", response.text)
4162 console_dict["server"] = s.group(1) if s else None
4163 s1 = re.search("<Port>(\d+)</Port>", response.text)
4164 console_dict["port"] = s1.group(1) if s1 else None
4165 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
4166 self.url, vm_id
4167 )
4168 headers = {
4169 "Accept": "application/*+xml;version=" + API_VERSION,
4170 "x-vcloud-authorization": self.client._session.headers[
4171 "x-vcloud-authorization"
4172 ],
4173 }
4174 response = self.perform_request(
4175 req_type="POST", url=url_rest_call, headers=headers
4176 )
4177
4178 if response.status_code == 403:
4179 response = self.retry_rest("GET", url_rest_call)
4180
4181 if response.status_code != 200:
4182 self.logger.error(
4183 "REST call {} failed reason : {}"
4184 "status code : {}".format(
4185 url_rest_call, response.text, response.status_code
4186 )
4187 )
4188 raise vimconn.VimConnException(
4189 "get_vminstance_console : Failed to get " "VM console details"
4190 )
4191
4192 s = re.search(">.*?/(vm-\d+.*)</", response.text)
4193 console_dict["suffix"] = s.group(1) if s else None
4194 console_dict["protocol"] = "https"
4195
4196 return console_dict
4197
4198 # NOT USED METHODS in current version
4199
4200 def host_vim2gui(self, host, server_dict):
4201 """Transform host dictionary from VIM format to GUI format,
4202 and append to the server_dict
4203 """
4204 raise vimconn.VimConnNotImplemented("Should have implemented this")
4205
4206 def get_hosts_info(self):
4207 """Get the information of deployed hosts
4208 Returns the hosts content"""
4209 raise vimconn.VimConnNotImplemented("Should have implemented this")
4210
4211 def get_hosts(self, vim_tenant):
4212 """Get the hosts and deployed instances
4213 Returns the hosts content"""
4214 raise vimconn.VimConnNotImplemented("Should have implemented this")
4215
4216 def get_processor_rankings(self):
4217 """Get the processor rankings in the VIM database"""
4218 raise vimconn.VimConnNotImplemented("Should have implemented this")
4219
4220 def new_host(self, host_data):
4221 """Adds a new host to VIM"""
4222 """Returns status code of the VIM response"""
4223 raise vimconn.VimConnNotImplemented("Should have implemented this")
4224
4225 def new_external_port(self, port_data):
4226 """Adds a external port to VIM"""
4227 """Returns the port identifier"""
4228 raise vimconn.VimConnNotImplemented("Should have implemented this")
4229
4230 def new_external_network(self, net_name, net_type):
4231 """Adds a external network to VIM (shared)"""
4232 """Returns the network identifier"""
4233 raise vimconn.VimConnNotImplemented("Should have implemented this")
4234
4235 def connect_port_network(self, port_id, network_id, admin=False):
4236 """Connects a external port to a network"""
4237 """Returns status code of the VIM response"""
4238 raise vimconn.VimConnNotImplemented("Should have implemented this")
4239
4240 def new_vminstancefromJSON(self, vm_data):
4241 """Adds a VM instance to VIM"""
4242 """Returns the instance identifier"""
4243 raise vimconn.VimConnNotImplemented("Should have implemented this")
4244
4245 def get_network_name_by_id(self, network_uuid=None):
4246 """Method gets vcloud director network named based on supplied uuid.
4247
4248 Args:
4249 network_uuid: network_id
4250
4251 Returns:
4252 The return network name.
4253 """
4254
4255 if not network_uuid:
4256 return None
4257
4258 try:
4259 org_dict = self.get_org(self.org_uuid)
4260 if "networks" in org_dict:
4261 org_network_dict = org_dict["networks"]
4262
4263 for net_uuid in org_network_dict:
4264 if net_uuid == network_uuid:
4265 return org_network_dict[net_uuid]
4266 except Exception:
4267 self.logger.debug("Exception in get_network_name_by_id")
4268 self.logger.debug(traceback.format_exc())
4269
4270 return None
4271
4272 def get_network_id_by_name(self, network_name=None):
4273 """Method gets vcloud director network uuid based on supplied name.
4274
4275 Args:
4276 network_name: network_name
4277 Returns:
4278 The return network uuid.
4279 network_uuid: network_id
4280 """
4281 if not network_name:
4282 self.logger.debug("get_network_id_by_name() : Network name is empty")
4283 return None
4284
4285 try:
4286 org_dict = self.get_org(self.org_uuid)
4287 if org_dict and "networks" in org_dict:
4288 org_network_dict = org_dict["networks"]
4289
4290 for net_uuid, net_name in org_network_dict.items():
4291 if net_name == network_name:
4292 return net_uuid
4293
4294 except KeyError as exp:
4295 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4296
4297 return None
4298
4299 def get_physical_network_by_name(self, physical_network_name):
4300 """
4301 Methos returns uuid of physical network which passed
4302 Args:
4303 physical_network_name: physical network name
4304 Returns:
4305 UUID of physical_network_name
4306 """
4307 try:
4308 client_as_admin = self.connect_as_admin()
4309
4310 if not client_as_admin:
4311 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4312
4313 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4314 vm_list_rest_call = "".join(url_list)
4315
4316 if client_as_admin._session:
4317 headers = {
4318 "Accept": "application/*+xml;version=" + API_VERSION,
4319 "x-vcloud-authorization": client_as_admin._session.headers[
4320 "x-vcloud-authorization"
4321 ],
4322 }
4323 response = self.perform_request(
4324 req_type="GET", url=vm_list_rest_call, headers=headers
4325 )
4326 provider_network = None
4327 available_network = None
4328 # add_vdc_rest_url = None
4329
4330 if response.status_code != requests.codes.ok:
4331 self.logger.debug(
4332 "REST API call {} failed. Return status code {}".format(
4333 vm_list_rest_call, response.status_code
4334 )
4335 )
4336 return None
4337 else:
4338 try:
4339 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4340 for child in vm_list_xmlroot:
4341 if child.tag.split("}")[1] == "ProviderVdcReference":
4342 provider_network = child.attrib.get("href")
4343 # application/vnd.vmware.admin.providervdc+xml
4344
4345 if child.tag.split("}")[1] == "Link":
4346 if (
4347 child.attrib.get("type")
4348 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4349 and child.attrib.get("rel") == "add"
4350 ):
4351 child.attrib.get("href")
4352 except Exception:
4353 self.logger.debug(
4354 "Failed parse respond for rest api call {}".format(
4355 vm_list_rest_call
4356 )
4357 )
4358 self.logger.debug("Respond body {}".format(response.text))
4359
4360 return None
4361
4362 # find pvdc provided available network
4363 response = self.perform_request(
4364 req_type="GET", url=provider_network, headers=headers
4365 )
4366
4367 if response.status_code != requests.codes.ok:
4368 self.logger.debug(
4369 "REST API call {} failed. Return status code {}".format(
4370 vm_list_rest_call, response.status_code
4371 )
4372 )
4373
4374 return None
4375
4376 try:
4377 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4378 for child in vm_list_xmlroot.iter():
4379 if child.tag.split("}")[1] == "AvailableNetworks":
4380 for networks in child.iter():
4381 if (
4382 networks.attrib.get("href") is not None
4383 and networks.attrib.get("name") is not None
4384 ):
4385 if (
4386 networks.attrib.get("name")
4387 == physical_network_name
4388 ):
4389 network_url = networks.attrib.get("href")
4390 available_network = network_url[
4391 network_url.rindex("/") + 1 :
4392 ]
4393 break
4394 except Exception:
4395 return None
4396
4397 return available_network
4398 except Exception as e:
4399 self.logger.error("Error while getting physical network: {}".format(e))
4400
4401 def list_org_action(self):
4402 """
4403 Method leverages vCloud director and query for available organization for particular user
4404
4405 Args:
4406 vca - is active VCA connection.
4407 vdc_name - is a vdc name that will be used to query vms action
4408
4409 Returns:
4410 The return XML respond
4411 """
4412 url_list = [self.url, "/api/org"]
4413 vm_list_rest_call = "".join(url_list)
4414
4415 if self.client._session:
4416 headers = {
4417 "Accept": "application/*+xml;version=" + API_VERSION,
4418 "x-vcloud-authorization": self.client._session.headers[
4419 "x-vcloud-authorization"
4420 ],
4421 }
4422
4423 response = self.perform_request(
4424 req_type="GET", url=vm_list_rest_call, headers=headers
4425 )
4426
4427 if response.status_code == 403:
4428 response = self.retry_rest("GET", vm_list_rest_call)
4429
4430 if response.status_code == requests.codes.ok:
4431 return response.text
4432
4433 return None
4434
4435 def get_org_action(self, org_uuid=None):
4436 """
4437 Method leverages vCloud director and retrieve available object for organization.
4438
4439 Args:
4440 org_uuid - vCD organization uuid
4441 self.client - is active connection.
4442
4443 Returns:
4444 The return XML respond
4445 """
4446
4447 if org_uuid is None:
4448 return None
4449
4450 url_list = [self.url, "/api/org/", org_uuid]
4451 vm_list_rest_call = "".join(url_list)
4452
4453 if self.client._session:
4454 headers = {
4455 "Accept": "application/*+xml;version=" + API_VERSION,
4456 "x-vcloud-authorization": self.client._session.headers[
4457 "x-vcloud-authorization"
4458 ],
4459 }
4460
4461 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4462 response = self.perform_request(
4463 req_type="GET", url=vm_list_rest_call, headers=headers
4464 )
4465
4466 if response.status_code == 403:
4467 response = self.retry_rest("GET", vm_list_rest_call)
4468
4469 if response.status_code == requests.codes.ok:
4470 return response.text
4471
4472 return None
4473
4474 def get_org(self, org_uuid=None):
4475 """
4476 Method retrieves available organization in vCloud Director
4477
4478 Args:
4479 org_uuid - is a organization uuid.
4480
4481 Returns:
4482 The return dictionary with following key
4483 "network" - for network list under the org
4484 "catalogs" - for network list under the org
4485 "vdcs" - for vdc list under org
4486 """
4487
4488 org_dict = {}
4489
4490 if org_uuid is None:
4491 return org_dict
4492
4493 content = self.get_org_action(org_uuid=org_uuid)
4494 try:
4495 vdc_list = {}
4496 network_list = {}
4497 catalog_list = {}
4498 vm_list_xmlroot = XmlElementTree.fromstring(content)
4499 for child in vm_list_xmlroot:
4500 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4501 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4502 "name"
4503 ]
4504 org_dict["vdcs"] = vdc_list
4505
4506 if (
4507 child.attrib["type"]
4508 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4509 ):
4510 network_list[
4511 child.attrib["href"].split("/")[-1:][0]
4512 ] = child.attrib["name"]
4513 org_dict["networks"] = network_list
4514
4515 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4516 catalog_list[
4517 child.attrib["href"].split("/")[-1:][0]
4518 ] = child.attrib["name"]
4519 org_dict["catalogs"] = catalog_list
4520 except Exception:
4521 pass
4522
4523 return org_dict
4524
4525 def get_org_list(self):
4526 """
4527 Method retrieves available organization in vCloud Director
4528
4529 Args:
4530 vca - is active VCA connection.
4531
4532 Returns:
4533 The return dictionary and key for each entry VDC UUID
4534 """
4535 org_dict = {}
4536
4537 content = self.list_org_action()
4538 try:
4539 vm_list_xmlroot = XmlElementTree.fromstring(content)
4540
4541 for vm_xml in vm_list_xmlroot:
4542 if vm_xml.tag.split("}")[1] == "Org":
4543 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4544 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4545 except Exception:
4546 pass
4547
4548 return org_dict
4549
4550 def vms_view_action(self, vdc_name=None):
4551 """Method leverages vCloud director vms query call
4552
4553 Args:
4554 vca - is active VCA connection.
4555 vdc_name - is a vdc name that will be used to query vms action
4556
4557 Returns:
4558 The return XML respond
4559 """
4560 vca = self.connect()
4561 if vdc_name is None:
4562 return None
4563
4564 url_list = [vca.host, "/api/vms/query"]
4565 vm_list_rest_call = "".join(url_list)
4566
4567 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4568 refs = [
4569 ref
4570 for ref in vca.vcloud_session.organization.Link
4571 if ref.name == vdc_name
4572 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4573 ]
4574
4575 if len(refs) == 1:
4576 response = self.perform_request(
4577 req_type="GET",
4578 url=vm_list_rest_call,
4579 headers=vca.vcloud_session.get_vcloud_headers(),
4580 verify=vca.verify,
4581 logger=vca.logger,
4582 )
4583
4584 if response.status_code == requests.codes.ok:
4585 return response.text
4586
4587 return None
4588
4589 def get_vapp_list(self, vdc_name=None):
4590 """
4591 Method retrieves vApp list deployed vCloud director and returns a dictionary
4592 contains a list of all vapp deployed for queried VDC.
4593 The key for a dictionary is vApp UUID
4594
4595
4596 Args:
4597 vca - is active VCA connection.
4598 vdc_name - is a vdc name that will be used to query vms action
4599
4600 Returns:
4601 The return dictionary and key for each entry vapp UUID
4602 """
4603 vapp_dict = {}
4604
4605 if vdc_name is None:
4606 return vapp_dict
4607
4608 content = self.vms_view_action(vdc_name=vdc_name)
4609 try:
4610 vm_list_xmlroot = XmlElementTree.fromstring(content)
4611 for vm_xml in vm_list_xmlroot:
4612 if vm_xml.tag.split("}")[1] == "VMRecord":
4613 if vm_xml.attrib["isVAppTemplate"] == "true":
4614 rawuuid = vm_xml.attrib["container"].split("/")[-1:]
4615 if "vappTemplate-" in rawuuid[0]:
4616 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4617 # vm and use raw UUID as key
4618 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
4619 except Exception:
4620 pass
4621
4622 return vapp_dict
4623
4624 def get_vm_list(self, vdc_name=None):
4625 """
4626 Method retrieves VM's list deployed vCloud director. It returns a dictionary
4627 contains a list of all VM's deployed for queried VDC.
4628 The key for a dictionary is VM UUID
4629
4630
4631 Args:
4632 vca - is active VCA connection.
4633 vdc_name - is a vdc name that will be used to query vms action
4634
4635 Returns:
4636 The return dictionary and key for each entry vapp UUID
4637 """
4638 vm_dict = {}
4639
4640 if vdc_name is None:
4641 return vm_dict
4642
4643 content = self.vms_view_action(vdc_name=vdc_name)
4644 try:
4645 vm_list_xmlroot = XmlElementTree.fromstring(content)
4646 for vm_xml in vm_list_xmlroot:
4647 if vm_xml.tag.split("}")[1] == "VMRecord":
4648 if vm_xml.attrib["isVAppTemplate"] == "false":
4649 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4650 if "vm-" in rawuuid[0]:
4651 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
4652 # vm and use raw UUID as key
4653 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4654 except Exception:
4655 pass
4656
4657 return vm_dict
4658
4659 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4660 """
4661 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4662 contains a list of all VM's deployed for queried VDC.
4663 The key for a dictionary is VM UUID
4664
4665
4666 Args:
4667 vca - is active VCA connection.
4668 vdc_name - is a vdc name that will be used to query vms action
4669
4670 Returns:
4671 The return dictionary and key for each entry vapp UUID
4672 """
4673 vm_dict = {}
4674 vca = self.connect()
4675
4676 if not vca:
4677 raise vimconn.VimConnConnectionException("self.connect() is failed")
4678
4679 if vdc_name is None:
4680 return vm_dict
4681
4682 content = self.vms_view_action(vdc_name=vdc_name)
4683 try:
4684 vm_list_xmlroot = XmlElementTree.fromstring(content)
4685 for vm_xml in vm_list_xmlroot:
4686 if (
4687 vm_xml.tag.split("}")[1] == "VMRecord"
4688 and vm_xml.attrib["isVAppTemplate"] == "false"
4689 ):
4690 # lookup done by UUID
4691 if isuuid:
4692 if vapp_name in vm_xml.attrib["container"]:
4693 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4694 if "vm-" in rawuuid[0]:
4695 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4696 break
4697 # lookup done by Name
4698 else:
4699 if vapp_name in vm_xml.attrib["name"]:
4700 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4701 if "vm-" in rawuuid[0]:
4702 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4703 break
4704 except Exception:
4705 pass
4706
4707 return vm_dict
4708
4709 def get_network_action(self, network_uuid=None):
4710 """
4711 Method leverages vCloud director and query network based on network uuid
4712
4713 Args:
4714 vca - is active VCA connection.
4715 network_uuid - is a network uuid
4716
4717 Returns:
4718 The return XML respond
4719 """
4720 if network_uuid is None:
4721 return None
4722
4723 url_list = [self.url, "/api/network/", network_uuid]
4724 vm_list_rest_call = "".join(url_list)
4725
4726 if self.client._session:
4727 headers = {
4728 "Accept": "application/*+xml;version=" + API_VERSION,
4729 "x-vcloud-authorization": self.client._session.headers[
4730 "x-vcloud-authorization"
4731 ],
4732 }
4733 response = self.perform_request(
4734 req_type="GET", url=vm_list_rest_call, headers=headers
4735 )
4736
4737 # Retry login if session expired & retry sending request
4738 if response.status_code == 403:
4739 response = self.retry_rest("GET", vm_list_rest_call)
4740
4741 if response.status_code == requests.codes.ok:
4742 return response.text
4743
4744 return None
4745
4746 def get_vcd_network(self, network_uuid=None):
4747 """
4748 Method retrieves available network from vCloud Director
4749
4750 Args:
4751 network_uuid - is VCD network UUID
4752
4753 Each element serialized as key : value pair
4754
4755 Following keys available for access. network_configuration['Gateway'}
4756 <Configuration>
4757 <IpScopes>
4758 <IpScope>
4759 <IsInherited>true</IsInherited>
4760 <Gateway>172.16.252.100</Gateway>
4761 <Netmask>255.255.255.0</Netmask>
4762 <Dns1>172.16.254.201</Dns1>
4763 <Dns2>172.16.254.202</Dns2>
4764 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4765 <IsEnabled>true</IsEnabled>
4766 <IpRanges>
4767 <IpRange>
4768 <StartAddress>172.16.252.1</StartAddress>
4769 <EndAddress>172.16.252.99</EndAddress>
4770 </IpRange>
4771 </IpRanges>
4772 </IpScope>
4773 </IpScopes>
4774 <FenceMode>bridged</FenceMode>
4775
4776 Returns:
4777 The return dictionary and key for each entry vapp UUID
4778 """
4779 network_configuration = {}
4780
4781 if network_uuid is None:
4782 return network_uuid
4783
4784 try:
4785 content = self.get_network_action(network_uuid=network_uuid)
4786 if content is not None:
4787 vm_list_xmlroot = XmlElementTree.fromstring(content)
4788 network_configuration["status"] = vm_list_xmlroot.get("status")
4789 network_configuration["name"] = vm_list_xmlroot.get("name")
4790 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4791
4792 for child in vm_list_xmlroot:
4793 if child.tag.split("}")[1] == "IsShared":
4794 network_configuration["isShared"] = child.text.strip()
4795
4796 if child.tag.split("}")[1] == "Configuration":
4797 for configuration in child.iter():
4798 tagKey = configuration.tag.split("}")[1].strip()
4799 if tagKey != "":
4800 network_configuration[
4801 tagKey
4802 ] = configuration.text.strip()
4803 except Exception as exp:
4804 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4805
4806 raise vimconn.VimConnException(
4807 "get_vcd_network: Failed with Exception {}".format(exp)
4808 )
4809
4810 return network_configuration
4811
4812 def delete_network_action(self, network_uuid=None):
4813 """
4814 Method delete given network from vCloud director
4815
4816 Args:
4817 network_uuid - is a network uuid that client wish to delete
4818
4819 Returns:
4820 The return None or XML respond or false
4821 """
4822 client = self.connect_as_admin()
4823
4824 if not client:
4825 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4826
4827 if network_uuid is None:
4828 return False
4829
4830 url_list = [self.url, "/api/admin/network/", network_uuid]
4831 vm_list_rest_call = "".join(url_list)
4832
4833 if client._session:
4834 headers = {
4835 "Accept": "application/*+xml;version=" + API_VERSION,
4836 "x-vcloud-authorization": client._session.headers[
4837 "x-vcloud-authorization"
4838 ],
4839 }
4840 response = self.perform_request(
4841 req_type="DELETE", url=vm_list_rest_call, headers=headers
4842 )
4843
4844 if response.status_code == 202:
4845 return True
4846
4847 return False
4848
4849 def create_network(
4850 self,
4851 network_name=None,
4852 net_type="bridge",
4853 parent_network_uuid=None,
4854 ip_profile=None,
4855 isshared="true",
4856 ):
4857 """
4858 Method create network in vCloud director
4859
4860 Args:
4861 network_name - is network name to be created.
4862 net_type - can be 'bridge','data','ptp','mgmt'.
4863 ip_profile is a dict containing the IP parameters of the network
4864 isshared - is a boolean
4865 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4866 It optional attribute. by default if no parent network indicate the first available will be used.
4867
4868 Returns:
4869 The return network uuid or return None
4870 """
4871 new_network_name = [network_name, "-", str(uuid.uuid4())]
4872 content = self.create_network_rest(
4873 network_name="".join(new_network_name),
4874 ip_profile=ip_profile,
4875 net_type=net_type,
4876 parent_network_uuid=parent_network_uuid,
4877 isshared=isshared,
4878 )
4879
4880 if content is None:
4881 self.logger.debug("Failed create network {}.".format(network_name))
4882
4883 return None
4884
4885 try:
4886 vm_list_xmlroot = XmlElementTree.fromstring(content)
4887 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4888 if len(vcd_uuid) == 4:
4889 self.logger.info(
4890 "Created new network name: {} uuid: {}".format(
4891 network_name, vcd_uuid[3]
4892 )
4893 )
4894
4895 return vcd_uuid[3]
4896 except Exception:
4897 self.logger.debug("Failed create network {}".format(network_name))
4898
4899 return None
4900
4901 def create_network_rest(
4902 self,
4903 network_name=None,
4904 net_type="bridge",
4905 parent_network_uuid=None,
4906 ip_profile=None,
4907 isshared="true",
4908 ):
4909 """
4910 Method create network in vCloud director
4911
4912 Args:
4913 network_name - is network name to be created.
4914 net_type - can be 'bridge','data','ptp','mgmt'.
4915 ip_profile is a dict containing the IP parameters of the network
4916 isshared - is a boolean
4917 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4918 It optional attribute. by default if no parent network indicate the first available will be used.
4919
4920 Returns:
4921 The return network uuid or return None
4922 """
4923 client_as_admin = self.connect_as_admin()
4924
4925 if not client_as_admin:
4926 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4927
4928 if network_name is None:
4929 return None
4930
4931 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4932 vm_list_rest_call = "".join(url_list)
4933
4934 if client_as_admin._session:
4935 headers = {
4936 "Accept": "application/*+xml;version=" + API_VERSION,
4937 "x-vcloud-authorization": client_as_admin._session.headers[
4938 "x-vcloud-authorization"
4939 ],
4940 }
4941 response = self.perform_request(
4942 req_type="GET", url=vm_list_rest_call, headers=headers
4943 )
4944 provider_network = None
4945 available_networks = None
4946 add_vdc_rest_url = None
4947
4948 if response.status_code != requests.codes.ok:
4949 self.logger.debug(
4950 "REST API call {} failed. Return status code {}".format(
4951 vm_list_rest_call, response.status_code
4952 )
4953 )
4954
4955 return None
4956 else:
4957 try:
4958 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4959 for child in vm_list_xmlroot:
4960 if child.tag.split("}")[1] == "ProviderVdcReference":
4961 provider_network = child.attrib.get("href")
4962 # application/vnd.vmware.admin.providervdc+xml
4963
4964 if child.tag.split("}")[1] == "Link":
4965 if (
4966 child.attrib.get("type")
4967 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4968 and child.attrib.get("rel") == "add"
4969 ):
4970 add_vdc_rest_url = child.attrib.get("href")
4971 except Exception:
4972 self.logger.debug(
4973 "Failed parse respond for rest api call {}".format(
4974 vm_list_rest_call
4975 )
4976 )
4977 self.logger.debug("Respond body {}".format(response.text))
4978
4979 return None
4980
4981 # find pvdc provided available network
4982 response = self.perform_request(
4983 req_type="GET", url=provider_network, headers=headers
4984 )
4985
4986 if response.status_code != requests.codes.ok:
4987 self.logger.debug(
4988 "REST API call {} failed. Return status code {}".format(
4989 vm_list_rest_call, response.status_code
4990 )
4991 )
4992
4993 return None
4994
4995 if parent_network_uuid is None:
4996 try:
4997 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4998 for child in vm_list_xmlroot.iter():
4999 if child.tag.split("}")[1] == "AvailableNetworks":
5000 for networks in child.iter():
5001 # application/vnd.vmware.admin.network+xml
5002 if networks.attrib.get("href") is not None:
5003 available_networks = networks.attrib.get("href")
5004 break
5005 except Exception:
5006 return None
5007
5008 try:
5009 # Configure IP profile of the network
5010 ip_profile = (
5011 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
5012 )
5013
5014 if (
5015 "subnet_address" not in ip_profile
5016 or ip_profile["subnet_address"] is None
5017 ):
5018 subnet_rand = random.randint(0, 255)
5019 ip_base = "192.168.{}.".format(subnet_rand)
5020 ip_profile["subnet_address"] = ip_base + "0/24"
5021 else:
5022 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
5023
5024 if (
5025 "gateway_address" not in ip_profile
5026 or ip_profile["gateway_address"] is None
5027 ):
5028 ip_profile["gateway_address"] = ip_base + "1"
5029
5030 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
5031 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
5032
5033 if (
5034 "dhcp_enabled" not in ip_profile
5035 or ip_profile["dhcp_enabled"] is None
5036 ):
5037 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
5038
5039 if (
5040 "dhcp_start_address" not in ip_profile
5041 or ip_profile["dhcp_start_address"] is None
5042 ):
5043 ip_profile["dhcp_start_address"] = ip_base + "3"
5044
5045 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
5046 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
5047
5048 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
5049 ip_profile["dns_address"] = ip_base + "2"
5050
5051 gateway_address = ip_profile["gateway_address"]
5052 dhcp_count = int(ip_profile["dhcp_count"])
5053 subnet_address = self.convert_cidr_to_netmask(
5054 ip_profile["subnet_address"]
5055 )
5056
5057 if ip_profile["dhcp_enabled"] is True:
5058 dhcp_enabled = "true"
5059 else:
5060 dhcp_enabled = "false"
5061
5062 dhcp_start_address = ip_profile["dhcp_start_address"]
5063
5064 # derive dhcp_end_address from dhcp_start_address & dhcp_count
5065 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
5066 end_ip_int += dhcp_count - 1
5067 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
5068
5069 # ip_version = ip_profile['ip_version']
5070 dns_address = ip_profile["dns_address"]
5071 except KeyError as exp:
5072 self.logger.debug("Create Network REST: Key error {}".format(exp))
5073
5074 raise vimconn.VimConnException(
5075 "Create Network REST: Key error{}".format(exp)
5076 )
5077
5078 # either use client provided UUID or search for a first available
5079 # if both are not defined we return none
5080 if parent_network_uuid is not None:
5081 provider_network = None
5082 available_networks = None
5083 add_vdc_rest_url = None
5084 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
5085 add_vdc_rest_url = "".join(url_list)
5086 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
5087 available_networks = "".join(url_list)
5088
5089 # Creating all networks as Direct Org VDC type networks.
5090 # Unused in case of Underlay (data/ptp) network interface.
5091 fence_mode = "isolated"
5092 is_inherited = "false"
5093 dns_list = dns_address.split(";")
5094 dns1 = dns_list[0]
5095 dns2_text = ""
5096
5097 if len(dns_list) >= 2:
5098 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
5099 dns_list[1]
5100 )
5101
5102 if net_type == "isolated":
5103 fence_mode = "isolated"
5104 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5105 <Description>Openmano created</Description>
5106 <Configuration>
5107 <IpScopes>
5108 <IpScope>
5109 <IsInherited>{1:s}</IsInherited>
5110 <Gateway>{2:s}</Gateway>
5111 <Netmask>{3:s}</Netmask>
5112 <Dns1>{4:s}</Dns1>{5:s}
5113 <IsEnabled>{6:s}</IsEnabled>
5114 <IpRanges>
5115 <IpRange>
5116 <StartAddress>{7:s}</StartAddress>
5117 <EndAddress>{8:s}</EndAddress>
5118 </IpRange>
5119 </IpRanges>
5120 </IpScope>
5121 </IpScopes>
5122 <FenceMode>{9:s}</FenceMode>
5123 </Configuration>
5124 <IsShared>{10:s}</IsShared>
5125 </OrgVdcNetwork> """.format(
5126 escape(network_name),
5127 is_inherited,
5128 gateway_address,
5129 subnet_address,
5130 dns1,
5131 dns2_text,
5132 dhcp_enabled,
5133 dhcp_start_address,
5134 dhcp_end_address,
5135 fence_mode,
5136 isshared,
5137 )
5138 else:
5139 fence_mode = "bridged"
5140 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5141 <Description>Openmano created</Description>
5142 <Configuration>
5143 <IpScopes>
5144 <IpScope>
5145 <IsInherited>{1:s}</IsInherited>
5146 <Gateway>{2:s}</Gateway>
5147 <Netmask>{3:s}</Netmask>
5148 <Dns1>{4:s}</Dns1>{5:s}
5149 <IsEnabled>{6:s}</IsEnabled>
5150 <IpRanges>
5151 <IpRange>
5152 <StartAddress>{7:s}</StartAddress>
5153 <EndAddress>{8:s}</EndAddress>
5154 </IpRange>
5155 </IpRanges>
5156 </IpScope>
5157 </IpScopes>
5158 <ParentNetwork href="{9:s}"/>
5159 <FenceMode>{10:s}</FenceMode>
5160 </Configuration>
5161 <IsShared>{11:s}</IsShared>
5162 </OrgVdcNetwork> """.format(
5163 escape(network_name),
5164 is_inherited,
5165 gateway_address,
5166 subnet_address,
5167 dns1,
5168 dns2_text,
5169 dhcp_enabled,
5170 dhcp_start_address,
5171 dhcp_end_address,
5172 available_networks,
5173 fence_mode,
5174 isshared,
5175 )
5176
5177 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
5178 try:
5179 response = self.perform_request(
5180 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
5181 )
5182
5183 if response.status_code != 201:
5184 self.logger.debug(
5185 "Create Network POST REST API call failed. "
5186 "Return status code {}, response.text: {}".format(
5187 response.status_code, response.text
5188 )
5189 )
5190 else:
5191 network_task = self.get_task_from_response(response.text)
5192 self.logger.debug(
5193 "Create Network REST : Waiting for Network creation complete"
5194 )
5195 time.sleep(5)
5196 result = self.client.get_task_monitor().wait_for_success(
5197 task=network_task
5198 )
5199
5200 if result.get("status") == "success":
5201 return response.text
5202 else:
5203 self.logger.debug(
5204 "create_network_rest task failed. Network Create response : {}".format(
5205 response.text
5206 )
5207 )
5208 except Exception as exp:
5209 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
5210
5211 return None
5212
5213 def convert_cidr_to_netmask(self, cidr_ip=None):
5214 """
5215 Method sets convert CIDR netmask address to normal IP format
5216 Args:
5217 cidr_ip : CIDR IP address
5218 Returns:
5219 netmask : Converted netmask
5220 """
5221 if cidr_ip is not None:
5222 if "/" in cidr_ip:
5223 _, net_bits = cidr_ip.split("/")
5224 netmask = socket.inet_ntoa(
5225 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
5226 )
5227 else:
5228 netmask = cidr_ip
5229
5230 return netmask
5231
5232 return None
5233
5234 def get_provider_rest(self, vca=None):
5235 """
5236 Method gets provider vdc view from vcloud director
5237
5238 Args:
5239 network_name - is network name to be created.
5240 parent_network_uuid - is parent provider vdc network that will be used for mapping.
5241 It optional attribute. by default if no parent network indicate the first available will be used.
5242
5243 Returns:
5244 The return xml content of respond or None
5245 """
5246 url_list = [self.url, "/api/admin"]
5247
5248 if vca:
5249 headers = {
5250 "Accept": "application/*+xml;version=" + API_VERSION,
5251 "x-vcloud-authorization": self.client._session.headers[
5252 "x-vcloud-authorization"
5253 ],
5254 }
5255 response = self.perform_request(
5256 req_type="GET", url="".join(url_list), headers=headers
5257 )
5258
5259 if response.status_code == requests.codes.ok:
5260 return response.text
5261
5262 return None
5263
5264 def create_vdc(self, vdc_name=None):
5265 vdc_dict = {}
5266 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
5267
5268 if xml_content is not None:
5269 try:
5270 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
5271 for child in task_resp_xmlroot:
5272 if child.tag.split("}")[1] == "Owner":
5273 vdc_id = child.attrib.get("href").split("/")[-1]
5274 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
5275
5276 return vdc_dict
5277 except Exception:
5278 self.logger.debug("Respond body {}".format(xml_content))
5279
5280 return None
5281
5282 def create_vdc_from_tmpl_rest(self, vdc_name=None):
5283 """
5284 Method create vdc in vCloud director based on VDC template.
5285 it uses pre-defined template.
5286
5287 Args:
5288 vdc_name - name of a new vdc.
5289
5290 Returns:
5291 The return xml content of respond or None
5292 """
5293 # pre-requesite atleast one vdc template should be available in vCD
5294 self.logger.info("Creating new vdc {}".format(vdc_name))
5295 vca = self.connect_as_admin()
5296
5297 if not vca:
5298 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5299
5300 if vdc_name is None:
5301 return None
5302
5303 url_list = [self.url, "/api/vdcTemplates"]
5304 vm_list_rest_call = "".join(url_list)
5305 headers = {
5306 "Accept": "application/*+xml;version=" + API_VERSION,
5307 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
5308 }
5309 response = self.perform_request(
5310 req_type="GET", url=vm_list_rest_call, headers=headers
5311 )
5312
5313 # container url to a template
5314 vdc_template_ref = None
5315 try:
5316 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5317 for child in vm_list_xmlroot:
5318 # application/vnd.vmware.admin.providervdc+xml
5319 # we need find a template from witch we instantiate VDC
5320 if child.tag.split("}")[1] == "VdcTemplate":
5321 if (
5322 child.attrib.get("type")
5323 == "application/vnd.vmware.admin.vdcTemplate+xml"
5324 ):
5325 vdc_template_ref = child.attrib.get("href")
5326 except Exception:
5327 self.logger.debug(
5328 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5329 )
5330 self.logger.debug("Respond body {}".format(response.text))
5331
5332 return None
5333
5334 # if we didn't found required pre defined template we return None
5335 if vdc_template_ref is None:
5336 return None
5337
5338 try:
5339 # instantiate vdc
5340 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
5341 vm_list_rest_call = "".join(url_list)
5342 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
5343 <Source href="{1:s}"></Source>
5344 <Description>opnemano</Description>
5345 </InstantiateVdcTemplateParams>""".format(
5346 vdc_name, vdc_template_ref
5347 )
5348 headers[
5349 "Content-Type"
5350 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
5351 response = self.perform_request(
5352 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
5353 )
5354 vdc_task = self.get_task_from_response(response.text)
5355 self.client.get_task_monitor().wait_for_success(task=vdc_task)
5356
5357 # if we all ok we respond with content otherwise by default None
5358 if response.status_code >= 200 and response.status_code < 300:
5359 return response.text
5360
5361 return None
5362 except Exception:
5363 self.logger.debug(
5364 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5365 )
5366 self.logger.debug("Respond body {}".format(response.text))
5367
5368 return None
5369
5370 def create_vdc_rest(self, vdc_name=None):
5371 """
5372 Method create network in vCloud director
5373
5374 Args:
5375 vdc_name - vdc name to be created
5376 Returns:
5377 The return response
5378 """
5379 self.logger.info("Creating new vdc {}".format(vdc_name))
5380 vca = self.connect_as_admin()
5381
5382 if not vca:
5383 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5384
5385 if vdc_name is None:
5386 return None
5387
5388 url_list = [self.url, "/api/admin/org/", self.org_uuid]
5389 vm_list_rest_call = "".join(url_list)
5390
5391 if vca._session:
5392 headers = {
5393 "Accept": "application/*+xml;version=" + API_VERSION,
5394 "x-vcloud-authorization": self.client._session.headers[
5395 "x-vcloud-authorization"
5396 ],
5397 }
5398 response = self.perform_request(
5399 req_type="GET", url=vm_list_rest_call, headers=headers
5400 )
5401 provider_vdc_ref = None
5402 add_vdc_rest_url = None
5403 # available_networks = None
5404
5405 if response.status_code != requests.codes.ok:
5406 self.logger.debug(
5407 "REST API call {} failed. Return status code {}".format(
5408 vm_list_rest_call, response.status_code
5409 )
5410 )
5411
5412 return None
5413 else:
5414 try:
5415 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
5416 for child in vm_list_xmlroot:
5417 # application/vnd.vmware.admin.providervdc+xml
5418 if child.tag.split("}")[1] == "Link":
5419 if (
5420 child.attrib.get("type")
5421 == "application/vnd.vmware.admin.createVdcParams+xml"
5422 and child.attrib.get("rel") == "add"
5423 ):
5424 add_vdc_rest_url = child.attrib.get("href")
5425 except Exception:
5426 self.logger.debug(
5427 "Failed parse respond for rest api call {}".format(
5428 vm_list_rest_call
5429 )
5430 )
5431 self.logger.debug("Respond body {}".format(response.text))
5432
5433 return None
5434
5435 response = self.get_provider_rest(vca=vca)
5436 try:
5437 vm_list_xmlroot = XmlElementTree.fromstring(response)
5438 for child in vm_list_xmlroot:
5439 if child.tag.split("}")[1] == "ProviderVdcReferences":
5440 for sub_child in child:
5441 provider_vdc_ref = sub_child.attrib.get("href")
5442 except Exception:
5443 self.logger.debug(
5444 "Failed parse respond for rest api call {}".format(
5445 vm_list_rest_call
5446 )
5447 )
5448 self.logger.debug("Respond body {}".format(response))
5449
5450 return None
5451
5452 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
5453 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
5454 <AllocationModel>ReservationPool</AllocationModel>
5455 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
5456 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
5457 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
5458 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
5459 <ProviderVdcReference
5460 name="Main Provider"
5461 href="{2:s}" />
5462 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
5463 escape(vdc_name), escape(vdc_name), provider_vdc_ref
5464 )
5465 headers[
5466 "Content-Type"
5467 ] = "application/vnd.vmware.admin.createVdcParams+xml"
5468 response = self.perform_request(
5469 req_type="POST",
5470 url=add_vdc_rest_url,
5471 headers=headers,
5472 data=data,
5473 )
5474
5475 # if we all ok we respond with content otherwise by default None
5476 if response.status_code == 201:
5477 return response.text
5478
5479 return None
5480
5481 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5482 """
5483 Method retrieve vapp detail from vCloud director
5484
5485 Args:
5486 vapp_uuid - is vapp identifier.
5487
5488 Returns:
5489 The return network uuid or return None
5490 """
5491 parsed_respond = {}
5492 vca = None
5493
5494 if need_admin_access:
5495 vca = self.connect_as_admin()
5496 else:
5497 vca = self.client
5498
5499 if not vca:
5500 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5501 if vapp_uuid is None:
5502 return None
5503
5504 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5505 get_vapp_restcall = "".join(url_list)
5506
5507 if vca._session:
5508 headers = {
5509 "Accept": "application/*+xml;version=" + API_VERSION,
5510 "x-vcloud-authorization": vca._session.headers[
5511 "x-vcloud-authorization"
5512 ],
5513 }
5514 response = self.perform_request(
5515 req_type="GET", url=get_vapp_restcall, headers=headers
5516 )
5517
5518 if response.status_code == 403:
5519 if need_admin_access is False:
5520 response = self.retry_rest("GET", get_vapp_restcall)
5521
5522 if response.status_code != requests.codes.ok:
5523 self.logger.debug(
5524 "REST API call {} failed. Return status code {}".format(
5525 get_vapp_restcall, response.status_code
5526 )
5527 )
5528
5529 return parsed_respond
5530
5531 try:
5532 xmlroot_respond = XmlElementTree.fromstring(response.text)
5533 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5534 "ovfDescriptorUploaded"
5535 ]
5536 namespaces = {
5537 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5538 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5539 "vmw": "http://www.vmware.com/schema/ovf",
5540 "vm": "http://www.vmware.com/vcloud/v1.5",
5541 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5542 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5543 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5544 }
5545
5546 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5547 if created_section is not None:
5548 parsed_respond["created"] = created_section.text
5549
5550 network_section = xmlroot_respond.find(
5551 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5552 )
5553 if (
5554 network_section is not None
5555 and "networkName" in network_section.attrib
5556 ):
5557 parsed_respond["networkname"] = network_section.attrib[
5558 "networkName"
5559 ]
5560
5561 ipscopes_section = xmlroot_respond.find(
5562 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5563 namespaces,
5564 )
5565 if ipscopes_section is not None:
5566 for ipscope in ipscopes_section:
5567 for scope in ipscope:
5568 tag_key = scope.tag.split("}")[1]
5569 if tag_key == "IpRanges":
5570 ip_ranges = scope.getchildren()
5571 for ipblock in ip_ranges:
5572 for block in ipblock:
5573 parsed_respond[
5574 block.tag.split("}")[1]
5575 ] = block.text
5576 else:
5577 parsed_respond[tag_key] = scope.text
5578
5579 # parse children section for other attrib
5580 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5581 if children_section is not None:
5582 parsed_respond["name"] = children_section.attrib["name"]
5583 parsed_respond["nestedHypervisorEnabled"] = (
5584 children_section.attrib["nestedHypervisorEnabled"]
5585 if "nestedHypervisorEnabled" in children_section.attrib
5586 else None
5587 )
5588 parsed_respond["deployed"] = children_section.attrib["deployed"]
5589 parsed_respond["status"] = children_section.attrib["status"]
5590 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5591 -1
5592 ]
5593 network_adapter = children_section.find(
5594 "vm:NetworkConnectionSection", namespaces
5595 )
5596 nic_list = []
5597 for adapters in network_adapter:
5598 adapter_key = adapters.tag.split("}")[1]
5599 if adapter_key == "PrimaryNetworkConnectionIndex":
5600 parsed_respond["primarynetwork"] = adapters.text
5601
5602 if adapter_key == "NetworkConnection":
5603 vnic = {}
5604 if "network" in adapters.attrib:
5605 vnic["network"] = adapters.attrib["network"]
5606 for adapter in adapters:
5607 setting_key = adapter.tag.split("}")[1]
5608 vnic[setting_key] = adapter.text
5609 nic_list.append(vnic)
5610
5611 for link in children_section:
5612 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5613 if link.attrib["rel"] == "screen:acquireTicket":
5614 parsed_respond["acquireTicket"] = link.attrib
5615
5616 if link.attrib["rel"] == "screen:acquireMksTicket":
5617 parsed_respond["acquireMksTicket"] = link.attrib
5618
5619 parsed_respond["interfaces"] = nic_list
5620 vCloud_extension_section = children_section.find(
5621 "xmlns:VCloudExtension", namespaces
5622 )
5623 if vCloud_extension_section is not None:
5624 vm_vcenter_info = {}
5625 vim_info = vCloud_extension_section.find(
5626 "vmext:VmVimInfo", namespaces
5627 )
5628 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5629
5630 if vmext is not None:
5631 vm_vcenter_info["vm_moref_id"] = vmext.find(
5632 "vmext:MoRef", namespaces
5633 ).text
5634
5635 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5636
5637 virtual_hardware_section = children_section.find(
5638 "ovf:VirtualHardwareSection", namespaces
5639 )
5640 vm_virtual_hardware_info = {}
5641 if virtual_hardware_section is not None:
5642 for item in virtual_hardware_section.iterfind(
5643 "ovf:Item", namespaces
5644 ):
5645 if (
5646 item.find("rasd:Description", namespaces).text
5647 == "Hard disk"
5648 ):
5649 disk_size = item.find(
5650 "rasd:HostResource", namespaces
5651 ).attrib["{" + namespaces["vm"] + "}capacity"]
5652 vm_virtual_hardware_info["disk_size"] = disk_size
5653 break
5654
5655 for link in virtual_hardware_section:
5656 if (
5657 link.tag.split("}")[1] == "Link"
5658 and "rel" in link.attrib
5659 ):
5660 if link.attrib["rel"] == "edit" and link.attrib[
5661 "href"
5662 ].endswith("/disks"):
5663 vm_virtual_hardware_info[
5664 "disk_edit_href"
5665 ] = link.attrib["href"]
5666 break
5667
5668 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5669 except Exception as exp:
5670 self.logger.info(
5671 "Error occurred calling rest api for getting vApp details {}".format(
5672 exp
5673 )
5674 )
5675
5676 return parsed_respond
5677
5678 def acquire_console(self, vm_uuid=None):
5679 if vm_uuid is None:
5680 return None
5681
5682 if self.client._session:
5683 headers = {
5684 "Accept": "application/*+xml;version=" + API_VERSION,
5685 "x-vcloud-authorization": self.client._session.headers[
5686 "x-vcloud-authorization"
5687 ],
5688 }
5689 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
5690 console_dict = vm_dict["acquireTicket"]
5691 console_rest_call = console_dict["href"]
5692
5693 response = self.perform_request(
5694 req_type="POST", url=console_rest_call, headers=headers
5695 )
5696
5697 if response.status_code == 403:
5698 response = self.retry_rest("POST", console_rest_call)
5699
5700 if response.status_code == requests.codes.ok:
5701 return response.text
5702
5703 return None
5704
5705 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5706 """
5707 Method retrieve vm disk details
5708
5709 Args:
5710 vapp_uuid - is vapp identifier.
5711 flavor_disk - disk size as specified in VNFD (flavor)
5712
5713 Returns:
5714 The return network uuid or return None
5715 """
5716 status = None
5717 try:
5718 # Flavor disk is in GB convert it into MB
5719 flavor_disk = int(flavor_disk) * 1024
5720 vm_details = self.get_vapp_details_rest(vapp_uuid)
5721
5722 if vm_details:
5723 vm_name = vm_details["name"]
5724 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5725
5726 if vm_details and "vm_virtual_hardware" in vm_details:
5727 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5728 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5729 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5730
5731 if flavor_disk > vm_disk:
5732 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5733 self.logger.info(
5734 "Modify disk of VM {} from {} to {} MB".format(
5735 vm_name, vm_disk, flavor_disk
5736 )
5737 )
5738 else:
5739 status = True
5740 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5741
5742 return status
5743 except Exception as exp:
5744 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5745
5746 def modify_vm_disk_rest(self, disk_href, disk_size):
5747 """
5748 Method retrieve modify vm disk size
5749
5750 Args:
5751 disk_href - vCD API URL to GET and PUT disk data
5752 disk_size - disk size as specified in VNFD (flavor)
5753
5754 Returns:
5755 The return network uuid or return None
5756 """
5757 if disk_href is None or disk_size is None:
5758 return None
5759
5760 if self.client._session:
5761 headers = {
5762 "Accept": "application/*+xml;version=" + API_VERSION,
5763 "x-vcloud-authorization": self.client._session.headers[
5764 "x-vcloud-authorization"
5765 ],
5766 }
5767 response = self.perform_request(
5768 req_type="GET", url=disk_href, headers=headers
5769 )
5770
5771 if response.status_code == 403:
5772 response = self.retry_rest("GET", disk_href)
5773
5774 if response.status_code != requests.codes.ok:
5775 self.logger.debug(
5776 "GET REST API call {} failed. Return status code {}".format(
5777 disk_href, response.status_code
5778 )
5779 )
5780
5781 return None
5782
5783 try:
5784 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5785 namespaces = {
5786 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5787 }
5788 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5789
5790 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5791 if item.find("rasd:Description", namespaces).text == "Hard disk":
5792 disk_item = item.find("rasd:HostResource", namespaces)
5793 if disk_item is not None:
5794 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5795 disk_size
5796 )
5797 break
5798
5799 data = lxmlElementTree.tostring(
5800 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5801 )
5802
5803 # Send PUT request to modify disk size
5804 headers[
5805 "Content-Type"
5806 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5807
5808 response = self.perform_request(
5809 req_type="PUT", url=disk_href, headers=headers, data=data
5810 )
5811 if response.status_code == 403:
5812 add_headers = {"Content-Type": headers["Content-Type"]}
5813 response = self.retry_rest("PUT", disk_href, add_headers, data)
5814
5815 if response.status_code != 202:
5816 self.logger.debug(
5817 "PUT REST API call {} failed. Return status code {}".format(
5818 disk_href, response.status_code
5819 )
5820 )
5821 else:
5822 modify_disk_task = self.get_task_from_response(response.text)
5823 result = self.client.get_task_monitor().wait_for_success(
5824 task=modify_disk_task
5825 )
5826 if result.get("status") == "success":
5827 return True
5828 else:
5829 return False
5830
5831 return None
5832 except Exception as exp:
5833 self.logger.info(
5834 "Error occurred calling rest api for modifing disk size {}".format(exp)
5835 )
5836
5837 return None
5838
5839 def add_serial_device(self, vapp_uuid):
5840 """
5841 Method to attach a serial device to a VM
5842
5843 Args:
5844 vapp_uuid - uuid of vApp/VM
5845
5846 Returns:
5847 """
5848 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5849 _, content = self.get_vcenter_content()
5850 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5851
5852 if vm_moref_id:
5853 try:
5854 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5855 self.logger.info(
5856 "VM {} is currently on host {}".format(vm_obj, host_obj)
5857 )
5858 if host_obj and vm_obj:
5859 spec = vim.vm.ConfigSpec()
5860 spec.deviceChange = []
5861 serial_spec = vim.vm.device.VirtualDeviceSpec()
5862 serial_spec.operation = "add"
5863 serial_port = vim.vm.device.VirtualSerialPort()
5864 serial_port.yieldOnPoll = True
5865 backing = serial_port.URIBackingInfo()
5866 backing.serviceURI = "tcp://:65500"
5867 backing.direction = "server"
5868 serial_port.backing = backing
5869 serial_spec.device = serial_port
5870 spec.deviceChange.append(serial_spec)
5871 vm_obj.ReconfigVM_Task(spec=spec)
5872 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5873 except vmodl.MethodFault as error:
5874 self.logger.error("Error occurred while adding PCI devices {} ", error)
5875
5876 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5877 """
5878 Method to attach pci devices to VM
5879
5880 Args:
5881 vapp_uuid - uuid of vApp/VM
5882 pci_devices - pci devices infromation as specified in VNFD (flavor)
5883
5884 Returns:
5885 The status of add pci device task , vm object and
5886 vcenter_conect object
5887 """
5888 vm_obj = None
5889 self.logger.info(
5890 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5891 )
5892 vcenter_conect, content = self.get_vcenter_content()
5893 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5894
5895 if vm_moref_id:
5896 try:
5897 no_of_pci_devices = len(pci_devices)
5898 if no_of_pci_devices > 0:
5899 # Get VM and its host
5900 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5901 self.logger.info(
5902 "VM {} is currently on host {}".format(vm_obj, host_obj)
5903 )
5904
5905 if host_obj and vm_obj:
5906 # get PCI devies from host on which vapp is currently installed
5907 avilable_pci_devices = self.get_pci_devices(
5908 host_obj, no_of_pci_devices
5909 )
5910
5911 if avilable_pci_devices is None:
5912 # find other hosts with active pci devices
5913 (
5914 new_host_obj,
5915 avilable_pci_devices,
5916 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5917
5918 if (
5919 new_host_obj is not None
5920 and avilable_pci_devices is not None
5921 and len(avilable_pci_devices) > 0
5922 ):
5923 # Migrate vm to the host where PCI devices are availble
5924 self.logger.info(
5925 "Relocate VM {} on new host {}".format(
5926 vm_obj, new_host_obj
5927 )
5928 )
5929
5930 task = self.relocate_vm(new_host_obj, vm_obj)
5931 if task is not None:
5932 result = self.wait_for_vcenter_task(
5933 task, vcenter_conect
5934 )
5935 self.logger.info(
5936 "Migrate VM status: {}".format(result)
5937 )
5938 host_obj = new_host_obj
5939 else:
5940 self.logger.info(
5941 "Fail to migrate VM : {}".format(result)
5942 )
5943 raise vimconn.VimConnNotFoundException(
5944 "Fail to migrate VM : {} to host {}".format(
5945 vmname_andid, new_host_obj
5946 )
5947 )
5948
5949 if (
5950 host_obj is not None
5951 and avilable_pci_devices is not None
5952 and len(avilable_pci_devices) > 0
5953 ):
5954 # Add PCI devices one by one
5955 for pci_device in avilable_pci_devices:
5956 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5957 if task:
5958 status = self.wait_for_vcenter_task(
5959 task, vcenter_conect
5960 )
5961
5962 if status:
5963 self.logger.info(
5964 "Added PCI device {} to VM {}".format(
5965 pci_device, str(vm_obj)
5966 )
5967 )
5968 else:
5969 self.logger.error(
5970 "Fail to add PCI device {} to VM {}".format(
5971 pci_device, str(vm_obj)
5972 )
5973 )
5974
5975 return True, vm_obj, vcenter_conect
5976 else:
5977 self.logger.error(
5978 "Currently there is no host with"
5979 " {} number of avaialble PCI devices required for VM {}".format(
5980 no_of_pci_devices, vmname_andid
5981 )
5982 )
5983
5984 raise vimconn.VimConnNotFoundException(
5985 "Currently there is no host with {} "
5986 "number of avaialble PCI devices required for VM {}".format(
5987 no_of_pci_devices, vmname_andid
5988 )
5989 )
5990 else:
5991 self.logger.debug(
5992 "No infromation about PCI devices {} ", pci_devices
5993 )
5994 except vmodl.MethodFault as error:
5995 self.logger.error("Error occurred while adding PCI devices {} ", error)
5996
5997 return None, vm_obj, vcenter_conect
5998
5999 def get_vm_obj(self, content, mob_id):
6000 """
6001 Method to get the vsphere VM object associated with a given morf ID
6002 Args:
6003 vapp_uuid - uuid of vApp/VM
6004 content - vCenter content object
6005 mob_id - mob_id of VM
6006
6007 Returns:
6008 VM and host object
6009 """
6010 vm_obj = None
6011 host_obj = None
6012
6013 try:
6014 container = content.viewManager.CreateContainerView(
6015 content.rootFolder, [vim.VirtualMachine], True
6016 )
6017 for vm in container.view:
6018 mobID = vm._GetMoId()
6019
6020 if mobID == mob_id:
6021 vm_obj = vm
6022 host_obj = vm_obj.runtime.host
6023 break
6024 except Exception as exp:
6025 self.logger.error("Error occurred while finding VM object : {}".format(exp))
6026
6027 return host_obj, vm_obj
6028
6029 def get_pci_devices(self, host, need_devices):
6030 """
6031 Method to get the details of pci devices on given host
6032 Args:
6033 host - vSphere host object
6034 need_devices - number of pci devices needed on host
6035
6036 Returns:
6037 array of pci devices
6038 """
6039 all_devices = []
6040 all_device_ids = []
6041 used_devices_ids = []
6042
6043 try:
6044 if host:
6045 pciPassthruInfo = host.config.pciPassthruInfo
6046 pciDevies = host.hardware.pciDevice
6047
6048 for pci_status in pciPassthruInfo:
6049 if pci_status.passthruActive:
6050 for device in pciDevies:
6051 if device.id == pci_status.id:
6052 all_device_ids.append(device.id)
6053 all_devices.append(device)
6054
6055 # check if devices are in use
6056 avalible_devices = all_devices
6057 for vm in host.vm:
6058 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
6059 vm_devices = vm.config.hardware.device
6060 for device in vm_devices:
6061 if type(device) is vim.vm.device.VirtualPCIPassthrough:
6062 if device.backing.id in all_device_ids:
6063 for use_device in avalible_devices:
6064 if use_device.id == device.backing.id:
6065 avalible_devices.remove(use_device)
6066
6067 used_devices_ids.append(device.backing.id)
6068 self.logger.debug(
6069 "Device {} from devices {}"
6070 "is in use".format(device.backing.id, device)
6071 )
6072 if len(avalible_devices) < need_devices:
6073 self.logger.debug(
6074 "Host {} don't have {} number of active devices".format(
6075 host, need_devices
6076 )
6077 )
6078 self.logger.debug(
6079 "found only {} devices {}".format(
6080 len(avalible_devices), avalible_devices
6081 )
6082 )
6083
6084 return None
6085 else:
6086 required_devices = avalible_devices[:need_devices]
6087 self.logger.info(
6088 "Found {} PCI devices on host {} but required only {}".format(
6089 len(avalible_devices), host, need_devices
6090 )
6091 )
6092 self.logger.info(
6093 "Retruning {} devices as {}".format(need_devices, required_devices)
6094 )
6095
6096 return required_devices
6097 except Exception as exp:
6098 self.logger.error(
6099 "Error {} occurred while finding pci devices on host: {}".format(
6100 exp, host
6101 )
6102 )
6103
6104 return None
6105
6106 def get_host_and_PCIdevices(self, content, need_devices):
6107 """
6108 Method to get the details of pci devices infromation on all hosts
6109
6110 Args:
6111 content - vSphere host object
6112 need_devices - number of pci devices needed on host
6113
6114 Returns:
6115 array of pci devices and host object
6116 """
6117 host_obj = None
6118 pci_device_objs = None
6119
6120 try:
6121 if content:
6122 container = content.viewManager.CreateContainerView(
6123 content.rootFolder, [vim.HostSystem], True
6124 )
6125 for host in container.view:
6126 devices = self.get_pci_devices(host, need_devices)
6127
6128 if devices:
6129 host_obj = host
6130 pci_device_objs = devices
6131 break
6132 except Exception as exp:
6133 self.logger.error(
6134 "Error {} occurred while finding pci devices on host: {}".format(
6135 exp, host_obj
6136 )
6137 )
6138
6139 return host_obj, pci_device_objs
6140
6141 def relocate_vm(self, dest_host, vm):
6142 """
6143 Method to get the relocate VM to new host
6144
6145 Args:
6146 dest_host - vSphere host object
6147 vm - vSphere VM object
6148
6149 Returns:
6150 task object
6151 """
6152 task = None
6153
6154 try:
6155 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
6156 task = vm.Relocate(relocate_spec)
6157 self.logger.info(
6158 "Migrating {} to destination host {}".format(vm, dest_host)
6159 )
6160 except Exception as exp:
6161 self.logger.error(
6162 "Error occurred while relocate VM {} to new host {}: {}".format(
6163 dest_host, vm, exp
6164 )
6165 )
6166
6167 return task
6168
6169 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
6170 """
6171 Waits and provides updates on a vSphere task
6172 """
6173 while task.info.state == vim.TaskInfo.State.running:
6174 time.sleep(2)
6175
6176 if task.info.state == vim.TaskInfo.State.success:
6177 if task.info.result is not None and not hideResult:
6178 self.logger.info(
6179 "{} completed successfully, result: {}".format(
6180 actionName, task.info.result
6181 )
6182 )
6183 else:
6184 self.logger.info("Task {} completed successfully.".format(actionName))
6185 else:
6186 self.logger.error(
6187 "{} did not complete successfully: {} ".format(
6188 actionName, task.info.error
6189 )
6190 )
6191
6192 return task.info.result
6193
6194 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
6195 """
6196 Method to add pci device in given VM
6197
6198 Args:
6199 host_object - vSphere host object
6200 vm_object - vSphere VM object
6201 host_pci_dev - host_pci_dev must be one of the devices from the
6202 host_object.hardware.pciDevice list
6203 which is configured as a PCI passthrough device
6204
6205 Returns:
6206 task object
6207 """
6208 task = None
6209
6210 if vm_object and host_object and host_pci_dev:
6211 try:
6212 # Add PCI device to VM
6213 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
6214 host=None
6215 ).pciPassthrough
6216 systemid_by_pciid = {
6217 item.pciDevice.id: item.systemId for item in pci_passthroughs
6218 }
6219
6220 if host_pci_dev.id not in systemid_by_pciid:
6221 self.logger.error(
6222 "Device {} is not a passthrough device ".format(host_pci_dev)
6223 )
6224 return None
6225
6226 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
6227 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
6228 deviceId=deviceId,
6229 id=host_pci_dev.id,
6230 systemId=systemid_by_pciid[host_pci_dev.id],
6231 vendorId=host_pci_dev.vendorId,
6232 deviceName=host_pci_dev.deviceName,
6233 )
6234
6235 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
6236 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
6237 new_device_config.operation = "add"
6238 vmConfigSpec = vim.vm.ConfigSpec()
6239 vmConfigSpec.deviceChange = [new_device_config]
6240 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
6241 self.logger.info(
6242 "Adding PCI device {} into VM {} from host {} ".format(
6243 host_pci_dev, vm_object, host_object
6244 )
6245 )
6246 except Exception as exp:
6247 self.logger.error(
6248 "Error occurred while adding pci devive {} to VM {}: {}".format(
6249 host_pci_dev, vm_object, exp
6250 )
6251 )
6252
6253 return task
6254
6255 def get_vm_vcenter_info(self):
6256 """
6257 Method to get details of vCenter and vm
6258
6259 Args:
6260 vapp_uuid - uuid of vApp or VM
6261
6262 Returns:
6263 Moref Id of VM and deails of vCenter
6264 """
6265 vm_vcenter_info = {}
6266
6267 if self.vcenter_ip is not None:
6268 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
6269 else:
6270 raise vimconn.VimConnException(
6271 message="vCenter IP is not provided."
6272 " Please provide vCenter IP while attaching datacenter "
6273 "to tenant in --config"
6274 )
6275
6276 if self.vcenter_port is not None:
6277 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
6278 else:
6279 raise vimconn.VimConnException(
6280 message="vCenter port is not provided."
6281 " Please provide vCenter port while attaching datacenter "
6282 "to tenant in --config"
6283 )
6284
6285 if self.vcenter_user is not None:
6286 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
6287 else:
6288 raise vimconn.VimConnException(
6289 message="vCenter user is not provided."
6290 " Please provide vCenter user while attaching datacenter "
6291 "to tenant in --config"
6292 )
6293
6294 if self.vcenter_password is not None:
6295 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
6296 else:
6297 raise vimconn.VimConnException(
6298 message="vCenter user password is not provided."
6299 " Please provide vCenter user password while attaching datacenter "
6300 "to tenant in --config"
6301 )
6302
6303 return vm_vcenter_info
6304
6305 def get_vm_pci_details(self, vmuuid):
6306 """
6307 Method to get VM PCI device details from vCenter
6308
6309 Args:
6310 vm_obj - vSphere VM object
6311
6312 Returns:
6313 dict of PCI devives attached to VM
6314
6315 """
6316 vm_pci_devices_info = {}
6317
6318 try:
6319 _, content = self.get_vcenter_content()
6320 vm_moref_id = self.get_vm_moref_id(vmuuid)
6321 if vm_moref_id:
6322 # Get VM and its host
6323 if content:
6324 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
6325 if host_obj and vm_obj:
6326 vm_pci_devices_info["host_name"] = host_obj.name
6327 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
6328 0
6329 ].spec.ip.ipAddress
6330
6331 for device in vm_obj.config.hardware.device:
6332 if type(device) == vim.vm.device.VirtualPCIPassthrough:
6333 device_details = {
6334 "devide_id": device.backing.id,
6335 "pciSlotNumber": device.slotInfo.pciSlotNumber,
6336 }
6337 vm_pci_devices_info[
6338 device.deviceInfo.label
6339 ] = device_details
6340 else:
6341 self.logger.error(
6342 "Can not connect to vCenter while getting "
6343 "PCI devices infromationn"
6344 )
6345
6346 return vm_pci_devices_info
6347 except Exception as exp:
6348 self.logger.error(
6349 "Error occurred while getting VM information" " for VM : {}".format(exp)
6350 )
6351
6352 raise vimconn.VimConnException(message=exp)
6353
6354 def reserve_memory_for_all_vms(self, vapp, memory_mb):
6355 """
6356 Method to reserve memory for all VMs
6357 Args :
6358 vapp - VApp
6359 memory_mb - Memory in MB
6360 Returns:
6361 None
6362 """
6363 self.logger.info("Reserve memory for all VMs")
6364
6365 for vms in vapp.get_all_vms():
6366 vm_id = vms.get("id").split(":")[-1]
6367 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
6368 self.url, vm_id
6369 )
6370 headers = {
6371 "Accept": "application/*+xml;version=" + API_VERSION,
6372 "x-vcloud-authorization": self.client._session.headers[
6373 "x-vcloud-authorization"
6374 ],
6375 }
6376 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
6377 response = self.perform_request(
6378 req_type="GET", url=url_rest_call, headers=headers
6379 )
6380
6381 if response.status_code == 403:
6382 response = self.retry_rest("GET", url_rest_call)
6383
6384 if response.status_code != 200:
6385 self.logger.error(
6386 "REST call {} failed reason : {}"
6387 "status code : {}".format(
6388 url_rest_call, response.text, response.status_code
6389 )
6390 )
6391 raise vimconn.VimConnException(
6392 "reserve_memory_for_all_vms : Failed to get " "memory"
6393 )
6394
6395 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
6396 contentelem = lxmlElementTree.XML(bytexml)
6397 namespaces = {
6398 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
6399 }
6400 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6401
6402 # Find the reservation element in the response
6403 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
6404 for memelem in memelem_list:
6405 memelem.text = str(memory_mb)
6406
6407 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
6408
6409 response = self.perform_request(
6410 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6411 )
6412
6413 if response.status_code == 403:
6414 add_headers = {"Content-Type": headers["Content-Type"]}
6415 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6416
6417 if response.status_code != 202:
6418 self.logger.error(
6419 "REST call {} failed reason : {}"
6420 "status code : {} ".format(
6421 url_rest_call, response.text, response.status_code
6422 )
6423 )
6424 raise vimconn.VimConnException(
6425 "reserve_memory_for_all_vms : Failed to update "
6426 "virtual hardware memory section"
6427 )
6428 else:
6429 mem_task = self.get_task_from_response(response.text)
6430 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
6431
6432 if result.get("status") == "success":
6433 self.logger.info(
6434 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
6435 )
6436 else:
6437 self.logger.error(
6438 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
6439 )
6440
6441 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
6442 """
6443 Configure VApp network config with org vdc network
6444 Args :
6445 vapp - VApp
6446 Returns:
6447 None
6448 """
6449
6450 self.logger.info(
6451 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
6452 )
6453
6454 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
6455 self.url, vapp_id
6456 )
6457
6458 headers = {
6459 "Accept": "application/*+xml;version=" + API_VERSION,
6460 "x-vcloud-authorization": self.client._session.headers[
6461 "x-vcloud-authorization"
6462 ],
6463 }
6464 response = self.perform_request(
6465 req_type="GET", url=url_rest_call, headers=headers
6466 )
6467
6468 if response.status_code == 403:
6469 response = self.retry_rest("GET", url_rest_call)
6470
6471 if response.status_code != 200:
6472 self.logger.error(
6473 "REST call {} failed reason : {}"
6474 "status code : {}".format(
6475 url_rest_call, response.text, response.status_code
6476 )
6477 )
6478 raise vimconn.VimConnException(
6479 "connect_vapp_to_org_vdc_network : Failed to get "
6480 "network config section"
6481 )
6482
6483 data = response.text
6484 headers[
6485 "Content-Type"
6486 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
6487 net_id = self.get_network_id_by_name(net_name)
6488 if not net_id:
6489 raise vimconn.VimConnException(
6490 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
6491 )
6492
6493 bytexml = bytes(bytearray(data, encoding="utf-8"))
6494 newelem = lxmlElementTree.XML(bytexml)
6495 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6496 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6497 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6498
6499 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6500 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6501 if parentnetworklist:
6502 for pn in parentnetworklist:
6503 if "href" not in pn.keys():
6504 id_val = pn.get("id")
6505 href_val = "{}/api/network/{}".format(self.url, id_val)
6506 pn.set("href", href_val)
6507
6508 newstr = """<NetworkConfig networkName="{}">
6509 <Configuration>
6510 <ParentNetwork href="{}/api/network/{}"/>
6511 <FenceMode>bridged</FenceMode>
6512 </Configuration>
6513 </NetworkConfig>
6514 """.format(
6515 net_name, self.url, net_id
6516 )
6517 newcfgelem = lxmlElementTree.fromstring(newstr)
6518 if nwcfglist:
6519 nwcfglist[0].addnext(newcfgelem)
6520
6521 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6522
6523 response = self.perform_request(
6524 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6525 )
6526
6527 if response.status_code == 403:
6528 add_headers = {"Content-Type": headers["Content-Type"]}
6529 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6530
6531 if response.status_code != 202:
6532 self.logger.error(
6533 "REST call {} failed reason : {}"
6534 "status code : {} ".format(
6535 url_rest_call, response.text, response.status_code
6536 )
6537 )
6538 raise vimconn.VimConnException(
6539 "connect_vapp_to_org_vdc_network : Failed to update "
6540 "network config section"
6541 )
6542 else:
6543 vapp_task = self.get_task_from_response(response.text)
6544 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6545 if result.get("status") == "success":
6546 self.logger.info(
6547 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6548 "network {}".format(vapp_id, net_name)
6549 )
6550 else:
6551 self.logger.error(
6552 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6553 "connect to network {}".format(vapp_id, net_name)
6554 )
6555
6556 def remove_primary_network_adapter_from_all_vms(self, vapp):
6557 """
6558 Method to remove network adapter type to vm
6559 Args :
6560 vapp - VApp
6561 Returns:
6562 None
6563 """
6564 self.logger.info("Removing network adapter from all VMs")
6565
6566 for vms in vapp.get_all_vms():
6567 vm_id = vms.get("id").split(":")[-1]
6568
6569 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6570 self.url, vm_id
6571 )
6572
6573 headers = {
6574 "Accept": "application/*+xml;version=" + API_VERSION,
6575 "x-vcloud-authorization": self.client._session.headers[
6576 "x-vcloud-authorization"
6577 ],
6578 }
6579 response = self.perform_request(
6580 req_type="GET", url=url_rest_call, headers=headers
6581 )
6582
6583 if response.status_code == 403:
6584 response = self.retry_rest("GET", url_rest_call)
6585
6586 if response.status_code != 200:
6587 self.logger.error(
6588 "REST call {} failed reason : {}"
6589 "status code : {}".format(
6590 url_rest_call, response.text, response.status_code
6591 )
6592 )
6593 raise vimconn.VimConnException(
6594 "remove_primary_network_adapter : Failed to get "
6595 "network connection section"
6596 )
6597
6598 data = response.text
6599 data = data.split('<Link rel="edit"')[0]
6600
6601 headers[
6602 "Content-Type"
6603 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6604
6605 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6606 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6607 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6608 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6609 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6610 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6611 xmlns:vmw="http://www.vmware.com/schema/ovf"
6612 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6613 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6614 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6615 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6616 ovf:required="false">
6617 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6618 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6619 <Link rel="edit" href="{url}"
6620 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6621 </NetworkConnectionSection>""".format(
6622 url=url_rest_call
6623 )
6624 response = self.perform_request(
6625 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6626 )
6627
6628 if response.status_code == 403:
6629 add_headers = {"Content-Type": headers["Content-Type"]}
6630 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6631
6632 if response.status_code != 202:
6633 self.logger.error(
6634 "REST call {} failed reason : {}"
6635 "status code : {} ".format(
6636 url_rest_call, response.text, response.status_code
6637 )
6638 )
6639 raise vimconn.VimConnException(
6640 "remove_primary_network_adapter : Failed to update "
6641 "network connection section"
6642 )
6643 else:
6644 nic_task = self.get_task_from_response(response.text)
6645 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6646 if result.get("status") == "success":
6647 self.logger.info(
6648 "remove_primary_network_adapter(): VM {} conneced to "
6649 "default NIC type".format(vm_id)
6650 )
6651 else:
6652 self.logger.error(
6653 "remove_primary_network_adapter(): VM {} failed to "
6654 "connect NIC type".format(vm_id)
6655 )
6656
6657 def add_network_adapter_to_vms(
6658 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6659 ):
6660 """
6661 Method to add network adapter type to vm
6662 Args :
6663 network_name - name of network
6664 primary_nic_index - int value for primary nic index
6665 nicIndex - int value for nic index
6666 nic_type - specify model name to which add to vm
6667 Returns:
6668 None
6669 """
6670
6671 self.logger.info(
6672 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6673 network_name, nicIndex, nic_type
6674 )
6675 )
6676 try:
6677 ip_address = None
6678 floating_ip = False
6679 mac_address = None
6680 if "floating_ip" in net:
6681 floating_ip = net["floating_ip"]
6682
6683 # Stub for ip_address feature
6684 if "ip_address" in net:
6685 ip_address = net["ip_address"]
6686
6687 if "mac_address" in net:
6688 mac_address = net["mac_address"]
6689
6690 if floating_ip:
6691 allocation_mode = "POOL"
6692 elif ip_address:
6693 allocation_mode = "MANUAL"
6694 else:
6695 allocation_mode = "DHCP"
6696
6697 if not nic_type:
6698 for vms in vapp.get_all_vms():
6699 vm_id = vms.get("id").split(":")[-1]
6700
6701 url_rest_call = (
6702 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6703 self.url, vm_id
6704 )
6705 )
6706
6707 headers = {
6708 "Accept": "application/*+xml;version=" + API_VERSION,
6709 "x-vcloud-authorization": self.client._session.headers[
6710 "x-vcloud-authorization"
6711 ],
6712 }
6713 response = self.perform_request(
6714 req_type="GET", url=url_rest_call, headers=headers
6715 )
6716
6717 if response.status_code == 403:
6718 response = self.retry_rest("GET", url_rest_call)
6719
6720 if response.status_code != 200:
6721 self.logger.error(
6722 "REST call {} failed reason : {}"
6723 "status code : {}".format(
6724 url_rest_call, response.text, response.status_code
6725 )
6726 )
6727 raise vimconn.VimConnException(
6728 "add_network_adapter_to_vms : Failed to get "
6729 "network connection section"
6730 )
6731
6732 data = response.text
6733 data = data.split('<Link rel="edit"')[0]
6734 if "<PrimaryNetworkConnectionIndex>" not in data:
6735 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6736 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6737 <NetworkConnection network="{}">
6738 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6739 <IsConnected>true</IsConnected>
6740 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6741 </NetworkConnection>""".format(
6742 primary_nic_index, network_name, nicIndex, allocation_mode
6743 )
6744
6745 # Stub for ip_address feature
6746 if ip_address:
6747 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6748 item = item.replace(
6749 "</NetworkConnectionIndex>\n",
6750 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6751 )
6752
6753 if mac_address:
6754 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6755 item = item.replace(
6756 "</IsConnected>\n",
6757 "</IsConnected>\n{}\n".format(mac_tag),
6758 )
6759
6760 data = data.replace(
6761 "</ovf:Info>\n",
6762 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6763 )
6764 else:
6765 self.logger.debug("add_network_adapter PrimaryNIC in data")
6766 new_item = """<NetworkConnection network="{}">
6767 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6768 <IsConnected>true</IsConnected>
6769 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6770 </NetworkConnection>""".format(
6771 network_name, nicIndex, allocation_mode
6772 )
6773
6774 # Stub for ip_address feature
6775 if ip_address:
6776 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6777 new_item = new_item.replace(
6778 "</NetworkConnectionIndex>\n",
6779 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6780 )
6781
6782 if mac_address:
6783 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6784 new_item = new_item.replace(
6785 "</IsConnected>\n",
6786 "</IsConnected>\n{}\n".format(mac_tag),
6787 )
6788
6789 data = data + new_item + "</NetworkConnectionSection>"
6790
6791 headers[
6792 "Content-Type"
6793 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6794
6795 response = self.perform_request(
6796 req_type="PUT", url=url_rest_call, headers=headers, data=data
6797 )
6798
6799 if response.status_code == 403:
6800 add_headers = {"Content-Type": headers["Content-Type"]}
6801 response = self.retry_rest(
6802 "PUT", url_rest_call, add_headers, data
6803 )
6804
6805 if response.status_code != 202:
6806 self.logger.error(
6807 "REST call {} failed reason : {}"
6808 "status code : {} ".format(
6809 url_rest_call, response.text, response.status_code
6810 )
6811 )
6812 raise vimconn.VimConnException(
6813 "add_network_adapter_to_vms : Failed to update "
6814 "network connection section"
6815 )
6816 else:
6817 nic_task = self.get_task_from_response(response.text)
6818 result = self.client.get_task_monitor().wait_for_success(
6819 task=nic_task
6820 )
6821
6822 if result.get("status") == "success":
6823 self.logger.info(
6824 "add_network_adapter_to_vms(): VM {} conneced to "
6825 "default NIC type".format(vm_id)
6826 )
6827 else:
6828 self.logger.error(
6829 "add_network_adapter_to_vms(): VM {} failed to "
6830 "connect NIC type".format(vm_id)
6831 )
6832 else:
6833 for vms in vapp.get_all_vms():
6834 vm_id = vms.get("id").split(":")[-1]
6835
6836 url_rest_call = (
6837 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6838 self.url, vm_id
6839 )
6840 )
6841
6842 headers = {
6843 "Accept": "application/*+xml;version=" + API_VERSION,
6844 "x-vcloud-authorization": self.client._session.headers[
6845 "x-vcloud-authorization"
6846 ],
6847 }
6848 response = self.perform_request(
6849 req_type="GET", url=url_rest_call, headers=headers
6850 )
6851
6852 if response.status_code == 403:
6853 response = self.retry_rest("GET", url_rest_call)
6854
6855 if response.status_code != 200:
6856 self.logger.error(
6857 "REST call {} failed reason : {}"
6858 "status code : {}".format(
6859 url_rest_call, response.text, response.status_code
6860 )
6861 )
6862 raise vimconn.VimConnException(
6863 "add_network_adapter_to_vms : Failed to get "
6864 "network connection section"
6865 )
6866 data = response.text
6867 data = data.split('<Link rel="edit"')[0]
6868 vcd_netadapter_type = nic_type
6869
6870 if nic_type in ["SR-IOV", "VF"]:
6871 vcd_netadapter_type = "SRIOVETHERNETCARD"
6872
6873 if "<PrimaryNetworkConnectionIndex>" not in data:
6874 self.logger.debug(
6875 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6876 nic_type
6877 )
6878 )
6879 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6880 <NetworkConnection network="{}">
6881 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6882 <IsConnected>true</IsConnected>
6883 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6884 <NetworkAdapterType>{}</NetworkAdapterType>
6885 </NetworkConnection>""".format(
6886 primary_nic_index,
6887 network_name,
6888 nicIndex,
6889 allocation_mode,
6890 vcd_netadapter_type,
6891 )
6892
6893 # Stub for ip_address feature
6894 if ip_address:
6895 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6896 item = item.replace(
6897 "</NetworkConnectionIndex>\n",
6898 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6899 )
6900
6901 if mac_address:
6902 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6903 item = item.replace(
6904 "</IsConnected>\n",
6905 "</IsConnected>\n{}\n".format(mac_tag),
6906 )
6907
6908 data = data.replace(
6909 "</ovf:Info>\n",
6910 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6911 )
6912 else:
6913 self.logger.debug(
6914 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6915 nic_type
6916 )
6917 )
6918 new_item = """<NetworkConnection network="{}">
6919 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6920 <IsConnected>true</IsConnected>
6921 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6922 <NetworkAdapterType>{}</NetworkAdapterType>
6923 </NetworkConnection>""".format(
6924 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6925 )
6926
6927 # Stub for ip_address feature
6928 if ip_address:
6929 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6930 new_item = new_item.replace(
6931 "</NetworkConnectionIndex>\n",
6932 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6933 )
6934
6935 if mac_address:
6936 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6937 new_item = new_item.replace(
6938 "</IsConnected>\n",
6939 "</IsConnected>\n{}\n".format(mac_tag),
6940 )
6941
6942 data = data + new_item + "</NetworkConnectionSection>"
6943
6944 headers[
6945 "Content-Type"
6946 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6947
6948 response = self.perform_request(
6949 req_type="PUT", url=url_rest_call, headers=headers, data=data
6950 )
6951
6952 if response.status_code == 403:
6953 add_headers = {"Content-Type": headers["Content-Type"]}
6954 response = self.retry_rest(
6955 "PUT", url_rest_call, add_headers, data
6956 )
6957
6958 if response.status_code != 202:
6959 self.logger.error(
6960 "REST call {} failed reason : {}"
6961 "status code : {}".format(
6962 url_rest_call, response.text, response.status_code
6963 )
6964 )
6965 raise vimconn.VimConnException(
6966 "add_network_adapter_to_vms : Failed to update "
6967 "network connection section"
6968 )
6969 else:
6970 nic_task = self.get_task_from_response(response.text)
6971 result = self.client.get_task_monitor().wait_for_success(
6972 task=nic_task
6973 )
6974
6975 if result.get("status") == "success":
6976 self.logger.info(
6977 "add_network_adapter_to_vms(): VM {} "
6978 "conneced to NIC type {}".format(vm_id, nic_type)
6979 )
6980 else:
6981 self.logger.error(
6982 "add_network_adapter_to_vms(): VM {} "
6983 "failed to connect NIC type {}".format(vm_id, nic_type)
6984 )
6985 except Exception as exp:
6986 self.logger.error(
6987 "add_network_adapter_to_vms() : exception occurred "
6988 "while adding Network adapter"
6989 )
6990
6991 raise vimconn.VimConnException(message=exp)
6992
6993 def set_numa_affinity(self, vmuuid, paired_threads_id):
6994 """
6995 Method to assign numa affinity in vm configuration parammeters
6996 Args :
6997 vmuuid - vm uuid
6998 paired_threads_id - one or more virtual processor
6999 numbers
7000 Returns:
7001 return if True
7002 """
7003 try:
7004 vcenter_conect, content = self.get_vcenter_content()
7005 vm_moref_id = self.get_vm_moref_id(vmuuid)
7006 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7007
7008 if vm_obj:
7009 config_spec = vim.vm.ConfigSpec()
7010 config_spec.extraConfig = []
7011 opt = vim.option.OptionValue()
7012 opt.key = "numa.nodeAffinity"
7013 opt.value = str(paired_threads_id)
7014 config_spec.extraConfig.append(opt)
7015 task = vm_obj.ReconfigVM_Task(config_spec)
7016
7017 if task:
7018 self.wait_for_vcenter_task(task, vcenter_conect)
7019 extra_config = vm_obj.config.extraConfig
7020 flag = False
7021
7022 for opts in extra_config:
7023 if "numa.nodeAffinity" in opts.key:
7024 flag = True
7025 self.logger.info(
7026 "set_numa_affinity: Sucessfully assign numa affinity "
7027 "value {} for vm {}".format(opt.value, vm_obj)
7028 )
7029
7030 if flag:
7031 return
7032 else:
7033 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
7034 except Exception as exp:
7035 self.logger.error(
7036 "set_numa_affinity : exception occurred while setting numa affinity "
7037 "for VM {} : {}".format(vm_obj, vm_moref_id)
7038 )
7039
7040 raise vimconn.VimConnException(
7041 "set_numa_affinity : Error {} failed to assign numa "
7042 "affinity".format(exp)
7043 )
7044
7045 def cloud_init(self, vapp, cloud_config):
7046 """
7047 Method to inject ssh-key
7048 vapp - vapp object
7049 cloud_config a dictionary with:
7050 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
7051 'users': (optional) list of users to be inserted, each item is a dict with:
7052 'name': (mandatory) user name,
7053 'key-pairs': (optional) list of strings with the public key to be inserted to the user
7054 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
7055 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
7056 'config-files': (optional). List of files to be transferred. Each item is a dict with:
7057 'dest': (mandatory) string with the destination absolute path
7058 'encoding': (optional, by default text). Can be one of:
7059 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
7060 'content' (mandatory): string with the content of the file
7061 'permissions': (optional) string with file permissions, typically octal notation '0644'
7062 'owner': (optional) file owner, string with the format 'owner:group'
7063 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
7064 """
7065 try:
7066 if not isinstance(cloud_config, dict):
7067 raise Exception(
7068 "cloud_init : parameter cloud_config is not a dictionary"
7069 )
7070 else:
7071 key_pairs = []
7072 userdata = []
7073
7074 if "key-pairs" in cloud_config:
7075 key_pairs = cloud_config["key-pairs"]
7076
7077 if "users" in cloud_config:
7078 userdata = cloud_config["users"]
7079
7080 self.logger.debug("cloud_init : Guest os customization started..")
7081 customize_script = self.format_script(
7082 key_pairs=key_pairs, users_list=userdata
7083 )
7084 customize_script = customize_script.replace("&", "&amp;")
7085 self.guest_customization(vapp, customize_script)
7086 except Exception as exp:
7087 self.logger.error(
7088 "cloud_init : exception occurred while injecting " "ssh-key"
7089 )
7090
7091 raise vimconn.VimConnException(
7092 "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
7093 )
7094
7095 def format_script(self, key_pairs=[], users_list=[]):
7096 bash_script = """#!/bin/sh
7097 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
7098 if [ "$1" = "precustomization" ];then
7099 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
7100 """
7101
7102 keys = "\n".join(key_pairs)
7103 if keys:
7104 keys_data = """
7105 if [ ! -d /root/.ssh ];then
7106 mkdir /root/.ssh
7107 chown root:root /root/.ssh
7108 chmod 700 /root/.ssh
7109 touch /root/.ssh/authorized_keys
7110 chown root:root /root/.ssh/authorized_keys
7111 chmod 600 /root/.ssh/authorized_keys
7112 # make centos with selinux happy
7113 which restorecon && restorecon -Rv /root/.ssh
7114 else
7115 touch /root/.ssh/authorized_keys
7116 chown root:root /root/.ssh/authorized_keys
7117 chmod 600 /root/.ssh/authorized_keys
7118 fi
7119 echo '{key}' >> /root/.ssh/authorized_keys
7120 """.format(
7121 key=keys
7122 )
7123
7124 bash_script += keys_data
7125
7126 for user in users_list:
7127 if "name" in user:
7128 user_name = user["name"]
7129
7130 if "key-pairs" in user:
7131 user_keys = "\n".join(user["key-pairs"])
7132 else:
7133 user_keys = None
7134
7135 add_user_name = """
7136 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
7137 """.format(
7138 user_name=user_name
7139 )
7140
7141 bash_script += add_user_name
7142
7143 if user_keys:
7144 user_keys_data = """
7145 mkdir /home/{user_name}/.ssh
7146 chown {user_name}:{user_name} /home/{user_name}/.ssh
7147 chmod 700 /home/{user_name}/.ssh
7148 touch /home/{user_name}/.ssh/authorized_keys
7149 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
7150 chmod 600 /home/{user_name}/.ssh/authorized_keys
7151 # make centos with selinux happy
7152 which restorecon && restorecon -Rv /home/{user_name}/.ssh
7153 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
7154 """.format(
7155 user_name=user_name, user_key=user_keys
7156 )
7157 bash_script += user_keys_data
7158
7159 return bash_script + "\n\tfi"
7160
7161 def guest_customization(self, vapp, customize_script):
7162 """
7163 Method to customize guest os
7164 vapp - Vapp object
7165 customize_script - Customize script to be run at first boot of VM.
7166 """
7167 for vm in vapp.get_all_vms():
7168 vm_id = vm.get("id").split(":")[-1]
7169 vm_name = vm.get("name")
7170 vm_name = vm_name.replace("_", "-")
7171
7172 vm_customization_url = (
7173 "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
7174 )
7175 headers = {
7176 "Accept": "application/*+xml;version=" + API_VERSION,
7177 "x-vcloud-authorization": self.client._session.headers[
7178 "x-vcloud-authorization"
7179 ],
7180 }
7181
7182 headers[
7183 "Content-Type"
7184 ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
7185
7186 data = """<GuestCustomizationSection
7187 xmlns="http://www.vmware.com/vcloud/v1.5"
7188 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
7189 ovf:required="false" href="{}"
7190 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
7191 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
7192 <Enabled>true</Enabled>
7193 <ChangeSid>false</ChangeSid>
7194 <VirtualMachineId>{}</VirtualMachineId>
7195 <JoinDomainEnabled>false</JoinDomainEnabled>
7196 <UseOrgSettings>false</UseOrgSettings>
7197 <AdminPasswordEnabled>false</AdminPasswordEnabled>
7198 <AdminPasswordAuto>true</AdminPasswordAuto>
7199 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
7200 <AdminAutoLogonCount>0</AdminAutoLogonCount>
7201 <ResetPasswordRequired>false</ResetPasswordRequired>
7202 <CustomizationScript>{}</CustomizationScript>
7203 <ComputerName>{}</ComputerName>
7204 <Link href="{}"
7205 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
7206 </GuestCustomizationSection>
7207 """.format(
7208 vm_customization_url,
7209 vm_id,
7210 customize_script,
7211 vm_name,
7212 vm_customization_url,
7213 )
7214
7215 response = self.perform_request(
7216 req_type="PUT", url=vm_customization_url, headers=headers, data=data
7217 )
7218 if response.status_code == 202:
7219 guest_task = self.get_task_from_response(response.text)
7220 self.client.get_task_monitor().wait_for_success(task=guest_task)
7221 self.logger.info(
7222 "guest_customization : customized guest os task "
7223 "completed for VM {}".format(vm_name)
7224 )
7225 else:
7226 self.logger.error(
7227 "guest_customization : task for customized guest os"
7228 "failed for VM {}".format(vm_name)
7229 )
7230
7231 raise vimconn.VimConnException(
7232 "guest_customization : failed to perform"
7233 "guest os customization on VM {}".format(vm_name)
7234 )
7235
7236 def add_new_disk(self, vapp_uuid, disk_size):
7237 """
7238 Method to create an empty vm disk
7239
7240 Args:
7241 vapp_uuid - is vapp identifier.
7242 disk_size - size of disk to be created in GB
7243
7244 Returns:
7245 None
7246 """
7247 status = False
7248 vm_details = None
7249 try:
7250 # Disk size in GB, convert it into MB
7251 if disk_size is not None:
7252 disk_size_mb = int(disk_size) * 1024
7253 vm_details = self.get_vapp_details_rest(vapp_uuid)
7254
7255 if vm_details and "vm_virtual_hardware" in vm_details:
7256 self.logger.info(
7257 "Adding disk to VM: {} disk size:{}GB".format(
7258 vm_details["name"], disk_size
7259 )
7260 )
7261 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
7262 status = self.add_new_disk_rest(disk_href, disk_size_mb)
7263 except Exception as exp:
7264 msg = "Error occurred while creating new disk {}.".format(exp)
7265 self.rollback_newvm(vapp_uuid, msg)
7266
7267 if status:
7268 self.logger.info(
7269 "Added new disk to VM: {} disk size:{}GB".format(
7270 vm_details["name"], disk_size
7271 )
7272 )
7273 else:
7274 # If failed to add disk, delete VM
7275 msg = "add_new_disk: Failed to add new disk to {}".format(
7276 vm_details["name"]
7277 )
7278 self.rollback_newvm(vapp_uuid, msg)
7279
7280 def add_new_disk_rest(self, disk_href, disk_size_mb):
7281 """
7282 Retrives vApp Disks section & add new empty disk
7283
7284 Args:
7285 disk_href: Disk section href to addd disk
7286 disk_size_mb: Disk size in MB
7287
7288 Returns: Status of add new disk task
7289 """
7290 status = False
7291 if self.client._session:
7292 headers = {
7293 "Accept": "application/*+xml;version=" + API_VERSION,
7294 "x-vcloud-authorization": self.client._session.headers[
7295 "x-vcloud-authorization"
7296 ],
7297 }
7298 response = self.perform_request(
7299 req_type="GET", url=disk_href, headers=headers
7300 )
7301
7302 if response.status_code == 403:
7303 response = self.retry_rest("GET", disk_href)
7304
7305 if response.status_code != requests.codes.ok:
7306 self.logger.error(
7307 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
7308 disk_href, response.status_code
7309 )
7310 )
7311
7312 return status
7313
7314 try:
7315 # Find but type & max of instance IDs assigned to disks
7316 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
7317 namespaces = {
7318 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
7319 }
7320 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
7321 instance_id = 0
7322
7323 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
7324 if item.find("rasd:Description", namespaces).text == "Hard disk":
7325 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
7326
7327 if inst_id > instance_id:
7328 instance_id = inst_id
7329 disk_item = item.find("rasd:HostResource", namespaces)
7330 bus_subtype = disk_item.attrib[
7331 "{" + namespaces["xmlns"] + "}busSubType"
7332 ]
7333 bus_type = disk_item.attrib[
7334 "{" + namespaces["xmlns"] + "}busType"
7335 ]
7336
7337 instance_id = instance_id + 1
7338 new_item = """<Item>
7339 <rasd:Description>Hard disk</rasd:Description>
7340 <rasd:ElementName>New disk</rasd:ElementName>
7341 <rasd:HostResource
7342 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
7343 vcloud:capacity="{}"
7344 vcloud:busSubType="{}"
7345 vcloud:busType="{}"></rasd:HostResource>
7346 <rasd:InstanceID>{}</rasd:InstanceID>
7347 <rasd:ResourceType>17</rasd:ResourceType>
7348 </Item>""".format(
7349 disk_size_mb, bus_subtype, bus_type, instance_id
7350 )
7351
7352 new_data = response.text
7353 # Add new item at the bottom
7354 new_data = new_data.replace(
7355 "</Item>\n</RasdItemsList>",
7356 "</Item>\n{}\n</RasdItemsList>".format(new_item),
7357 )
7358
7359 # Send PUT request to modify virtual hardware section with new disk
7360 headers[
7361 "Content-Type"
7362 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
7363
7364 response = self.perform_request(
7365 req_type="PUT", url=disk_href, data=new_data, headers=headers
7366 )
7367
7368 if response.status_code == 403:
7369 add_headers = {"Content-Type": headers["Content-Type"]}
7370 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
7371
7372 if response.status_code != 202:
7373 self.logger.error(
7374 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
7375 disk_href, response.status_code, response.text
7376 )
7377 )
7378 else:
7379 add_disk_task = self.get_task_from_response(response.text)
7380 result = self.client.get_task_monitor().wait_for_success(
7381 task=add_disk_task
7382 )
7383
7384 if result.get("status") == "success":
7385 status = True
7386 else:
7387 self.logger.error(
7388 "Add new disk REST task failed to add {} MB disk".format(
7389 disk_size_mb
7390 )
7391 )
7392 except Exception as exp:
7393 self.logger.error(
7394 "Error occurred calling rest api for creating new disk {}".format(exp)
7395 )
7396
7397 return status
7398
7399 def add_existing_disk(
7400 self,
7401 catalogs=None,
7402 image_id=None,
7403 size=None,
7404 template_name=None,
7405 vapp_uuid=None,
7406 ):
7407 """
7408 Method to add existing disk to vm
7409 Args :
7410 catalogs - List of VDC catalogs
7411 image_id - Catalog ID
7412 template_name - Name of template in catalog
7413 vapp_uuid - UUID of vApp
7414 Returns:
7415 None
7416 """
7417 disk_info = None
7418 vcenter_conect, content = self.get_vcenter_content()
7419 # find moref-id of vm in image
7420 catalog_vm_info = self.get_vapp_template_details(
7421 catalogs=catalogs,
7422 image_id=image_id,
7423 )
7424
7425 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
7426 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
7427 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
7428 "vm_moref_id", None
7429 )
7430
7431 if catalog_vm_moref_id:
7432 self.logger.info(
7433 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
7434 )
7435 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
7436
7437 if catalog_vm_obj:
7438 # find existing disk
7439 disk_info = self.find_disk(catalog_vm_obj)
7440 else:
7441 exp_msg = "No VM with image id {} found".format(image_id)
7442 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7443 else:
7444 exp_msg = "No Image found with image ID {} ".format(image_id)
7445 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
7446
7447 if disk_info:
7448 self.logger.info("Existing disk_info : {}".format(disk_info))
7449 # get VM
7450 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7451 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
7452
7453 if vm_obj:
7454 status = self.add_disk(
7455 vcenter_conect=vcenter_conect,
7456 vm=vm_obj,
7457 disk_info=disk_info,
7458 size=size,
7459 vapp_uuid=vapp_uuid,
7460 )
7461
7462 if status:
7463 self.logger.info(
7464 "Disk from image id {} added to {}".format(
7465 image_id, vm_obj.config.name
7466 )
7467 )
7468 else:
7469 msg = "No disk found with image id {} to add in VM {}".format(
7470 image_id, vm_obj.config.name
7471 )
7472 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
7473
7474 def find_disk(self, vm_obj):
7475 """
7476 Method to find details of existing disk in VM
7477 Args:
7478 vm_obj - vCenter object of VM
7479 Returns:
7480 disk_info : dict of disk details
7481 """
7482 disk_info = {}
7483 if vm_obj:
7484 try:
7485 devices = vm_obj.config.hardware.device
7486
7487 for device in devices:
7488 if type(device) is vim.vm.device.VirtualDisk:
7489 if isinstance(
7490 device.backing,
7491 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
7492 ) and hasattr(device.backing, "fileName"):
7493 disk_info["full_path"] = device.backing.fileName
7494 disk_info["datastore"] = device.backing.datastore
7495 disk_info["capacityKB"] = device.capacityInKB
7496 break
7497 except Exception as exp:
7498 self.logger.error(
7499 "find_disk() : exception occurred while "
7500 "getting existing disk details :{}".format(exp)
7501 )
7502
7503 return disk_info
7504
7505 def add_disk(
7506 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
7507 ):
7508 """
7509 Method to add existing disk in VM
7510 Args :
7511 vcenter_conect - vCenter content object
7512 vm - vCenter vm object
7513 disk_info : dict of disk details
7514 Returns:
7515 status : status of add disk task
7516 """
7517 datastore = disk_info["datastore"] if "datastore" in disk_info else None
7518 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
7519 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
7520 if size is not None:
7521 # Convert size from GB to KB
7522 sizeKB = int(size) * 1024 * 1024
7523 # compare size of existing disk and user given size.Assign whicherver is greater
7524 self.logger.info(
7525 "Add Existing disk : sizeKB {} , capacityKB {}".format(
7526 sizeKB, capacityKB
7527 )
7528 )
7529
7530 if sizeKB > capacityKB:
7531 capacityKB = sizeKB
7532
7533 if datastore and fullpath and capacityKB:
7534 try:
7535 spec = vim.vm.ConfigSpec()
7536 # get all disks on a VM, set unit_number to the next available
7537 unit_number = 0
7538 for dev in vm.config.hardware.device:
7539 if hasattr(dev.backing, "fileName"):
7540 unit_number = int(dev.unitNumber) + 1
7541 # unit_number 7 reserved for scsi controller
7542
7543 if unit_number == 7:
7544 unit_number += 1
7545
7546 if isinstance(dev, vim.vm.device.VirtualDisk):
7547 # vim.vm.device.VirtualSCSIController
7548 controller_key = dev.controllerKey
7549
7550 self.logger.info(
7551 "Add Existing disk : unit number {} , controller key {}".format(
7552 unit_number, controller_key
7553 )
7554 )
7555 # add disk here
7556 dev_changes = []
7557 disk_spec = vim.vm.device.VirtualDeviceSpec()
7558 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7559 disk_spec.device = vim.vm.device.VirtualDisk()
7560 disk_spec.device.backing = (
7561 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
7562 )
7563 disk_spec.device.backing.thinProvisioned = True
7564 disk_spec.device.backing.diskMode = "persistent"
7565 disk_spec.device.backing.datastore = datastore
7566 disk_spec.device.backing.fileName = fullpath
7567
7568 disk_spec.device.unitNumber = unit_number
7569 disk_spec.device.capacityInKB = capacityKB
7570 disk_spec.device.controllerKey = controller_key
7571 dev_changes.append(disk_spec)
7572 spec.deviceChange = dev_changes
7573 task = vm.ReconfigVM_Task(spec=spec)
7574 status = self.wait_for_vcenter_task(task, vcenter_conect)
7575
7576 return status
7577 except Exception as exp:
7578 exp_msg = (
7579 "add_disk() : exception {} occurred while adding disk "
7580 "{} to vm {}".format(exp, fullpath, vm.config.name)
7581 )
7582 self.rollback_newvm(vapp_uuid, exp_msg)
7583 else:
7584 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
7585 disk_info
7586 )
7587 self.rollback_newvm(vapp_uuid, msg)
7588
7589 def get_vcenter_content(self):
7590 """
7591 Get the vsphere content object
7592 """
7593 try:
7594 vm_vcenter_info = self.get_vm_vcenter_info()
7595 except Exception as exp:
7596 self.logger.error(
7597 "Error occurred while getting vCenter infromationn"
7598 " for VM : {}".format(exp)
7599 )
7600
7601 raise vimconn.VimConnException(message=exp)
7602
7603 context = None
7604 if hasattr(ssl, "_create_unverified_context"):
7605 context = ssl._create_unverified_context()
7606
7607 vcenter_conect = SmartConnect(
7608 host=vm_vcenter_info["vm_vcenter_ip"],
7609 user=vm_vcenter_info["vm_vcenter_user"],
7610 pwd=vm_vcenter_info["vm_vcenter_password"],
7611 port=int(vm_vcenter_info["vm_vcenter_port"]),
7612 sslContext=context,
7613 )
7614 atexit.register(Disconnect, vcenter_conect)
7615 content = vcenter_conect.RetrieveContent()
7616
7617 return vcenter_conect, content
7618
7619 def get_vm_moref_id(self, vapp_uuid):
7620 """
7621 Get the moref_id of given VM
7622 """
7623 try:
7624 if vapp_uuid:
7625 vm_details = self.get_vapp_details_rest(
7626 vapp_uuid, need_admin_access=True
7627 )
7628
7629 if vm_details and "vm_vcenter_info" in vm_details:
7630 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
7631
7632 return vm_moref_id
7633 except Exception as exp:
7634 self.logger.error(
7635 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
7636 )
7637
7638 return None
7639
7640 def get_vapp_template_details(
7641 self, catalogs=None, image_id=None, template_name=None
7642 ):
7643 """
7644 Method to get vApp template details
7645 Args :
7646 catalogs - list of VDC catalogs
7647 image_id - Catalog ID to find
7648 template_name : template name in catalog
7649 Returns:
7650 parsed_respond : dict of vApp tempalte details
7651 """
7652 parsed_response = {}
7653
7654 vca = self.connect_as_admin()
7655 if not vca:
7656 raise vimconn.VimConnConnectionException("Failed to connect vCD")
7657
7658 try:
7659 org, _ = self.get_vdc_details()
7660 catalog = self.get_catalog_obj(image_id, catalogs)
7661 if catalog:
7662 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
7663 catalog_items = [items.attrib]
7664
7665 if len(catalog_items) == 1:
7666 headers = {
7667 "Accept": "application/*+xml;version=" + API_VERSION,
7668 "x-vcloud-authorization": vca._session.headers[
7669 "x-vcloud-authorization"
7670 ],
7671 }
7672 response = self.perform_request(
7673 req_type="GET",
7674 url=catalog_items[0].get("href"),
7675 headers=headers,
7676 )
7677 catalogItem = XmlElementTree.fromstring(response.text)
7678 entity = [
7679 child
7680 for child in catalogItem
7681 if child.get("type")
7682 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
7683 ][0]
7684 vapp_tempalte_href = entity.get("href")
7685 # get vapp details and parse moref id
7686
7687 namespaces = {
7688 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7689 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7690 "vmw": "http://www.vmware.com/schema/ovf",
7691 "vm": "http://www.vmware.com/vcloud/v1.5",
7692 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7693 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7694 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7695 }
7696
7697 if vca._session:
7698 response = self.perform_request(
7699 req_type="GET", url=vapp_tempalte_href, headers=headers
7700 )
7701
7702 if response.status_code != requests.codes.ok:
7703 self.logger.debug(
7704 "REST API call {} failed. Return status code {}".format(
7705 vapp_tempalte_href, response.status_code
7706 )
7707 )
7708 else:
7709 xmlroot_respond = XmlElementTree.fromstring(response.text)
7710 children_section = xmlroot_respond.find(
7711 "vm:Children/", namespaces
7712 )
7713
7714 if children_section is not None:
7715 vCloud_extension_section = children_section.find(
7716 "xmlns:VCloudExtension", namespaces
7717 )
7718
7719 if vCloud_extension_section is not None:
7720 vm_vcenter_info = {}
7721 vim_info = vCloud_extension_section.find(
7722 "vmext:VmVimInfo", namespaces
7723 )
7724 vmext = vim_info.find(
7725 "vmext:VmVimObjectRef", namespaces
7726 )
7727
7728 if vmext is not None:
7729 vm_vcenter_info["vm_moref_id"] = vmext.find(
7730 "vmext:MoRef", namespaces
7731 ).text
7732
7733 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7734 except Exception as exp:
7735 self.logger.info(
7736 "Error occurred calling rest api for getting vApp details {}".format(
7737 exp
7738 )
7739 )
7740
7741 return parsed_response
7742
7743 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7744 """
7745 Method to delete vApp
7746 Args :
7747 vapp_uuid - vApp UUID
7748 msg - Error message to be logged
7749 exp_type : Exception type
7750 Returns:
7751 None
7752 """
7753 if vapp_uuid:
7754 self.delete_vminstance(vapp_uuid)
7755 else:
7756 msg = "No vApp ID"
7757
7758 self.logger.error(msg)
7759
7760 if exp_type == "Genric":
7761 raise vimconn.VimConnException(msg)
7762 elif exp_type == "NotFound":
7763 raise vimconn.VimConnNotFoundException(message=msg)
7764
7765 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
7766 """
7767 Method to attach SRIOV adapters to VM
7768
7769 Args:
7770 vapp_uuid - uuid of vApp/VM
7771 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
7772 vmname_andid - vmname
7773
7774 Returns:
7775 The status of add SRIOV adapter task , vm object and
7776 vcenter_conect object
7777 """
7778 vm_obj = None
7779 vcenter_conect, content = self.get_vcenter_content()
7780 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
7781
7782 if vm_moref_id:
7783 try:
7784 no_of_sriov_devices = len(sriov_nets)
7785 if no_of_sriov_devices > 0:
7786 # Get VM and its host
7787 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
7788 self.logger.info(
7789 "VM {} is currently on host {}".format(vm_obj, host_obj)
7790 )
7791
7792 if host_obj and vm_obj:
7793 # get SRIOV devies from host on which vapp is currently installed
7794 avilable_sriov_devices = self.get_sriov_devices(
7795 host_obj,
7796 no_of_sriov_devices,
7797 )
7798
7799 if len(avilable_sriov_devices) == 0:
7800 # find other hosts with active pci devices
7801 (
7802 new_host_obj,
7803 avilable_sriov_devices,
7804 ) = self.get_host_and_sriov_devices(
7805 content,
7806 no_of_sriov_devices,
7807 )
7808
7809 if (
7810 new_host_obj is not None
7811 and len(avilable_sriov_devices) > 0
7812 ):
7813 # Migrate vm to the host where SRIOV devices are available
7814 self.logger.info(
7815 "Relocate VM {} on new host {}".format(
7816 vm_obj, new_host_obj
7817 )
7818 )
7819 task = self.relocate_vm(new_host_obj, vm_obj)
7820
7821 if task is not None:
7822 result = self.wait_for_vcenter_task(
7823 task, vcenter_conect
7824 )
7825 self.logger.info(
7826 "Migrate VM status: {}".format(result)
7827 )
7828 host_obj = new_host_obj
7829 else:
7830 self.logger.info(
7831 "Fail to migrate VM : {}".format(result)
7832 )
7833
7834 raise vimconn.VimConnNotFoundException(
7835 "Fail to migrate VM : {} to host {}".format(
7836 vmname_andid, new_host_obj
7837 )
7838 )
7839
7840 if (
7841 host_obj is not None
7842 and avilable_sriov_devices is not None
7843 and len(avilable_sriov_devices) > 0
7844 ):
7845 # Add SRIOV devices one by one
7846 for sriov_net in sriov_nets:
7847 network_name = sriov_net.get("net_id")
7848 self.create_dvPort_group(network_name)
7849
7850 if (
7851 sriov_net.get("type") == "VF"
7852 or sriov_net.get("type") == "SR-IOV"
7853 ):
7854 # add vlan ID ,Modify portgroup for vlan ID
7855 self.configure_vlanID(
7856 content, vcenter_conect, network_name
7857 )
7858
7859 task = self.add_sriov_to_vm(
7860 content,
7861 vm_obj,
7862 host_obj,
7863 network_name,
7864 avilable_sriov_devices[0],
7865 )
7866
7867 if task:
7868 status = self.wait_for_vcenter_task(
7869 task, vcenter_conect
7870 )
7871
7872 if status:
7873 self.logger.info(
7874 "Added SRIOV {} to VM {}".format(
7875 no_of_sriov_devices, str(vm_obj)
7876 )
7877 )
7878 else:
7879 self.logger.error(
7880 "Fail to add SRIOV {} to VM {}".format(
7881 no_of_sriov_devices, str(vm_obj)
7882 )
7883 )
7884
7885 raise vimconn.VimConnUnexpectedResponse(
7886 "Fail to add SRIOV adapter in VM {}".format(
7887 str(vm_obj)
7888 )
7889 )
7890
7891 return True, vm_obj, vcenter_conect
7892 else:
7893 self.logger.error(
7894 "Currently there is no host with"
7895 " {} number of avaialble SRIOV "
7896 "VFs required for VM {}".format(
7897 no_of_sriov_devices, vmname_andid
7898 )
7899 )
7900
7901 raise vimconn.VimConnNotFoundException(
7902 "Currently there is no host with {} "
7903 "number of avaialble SRIOV devices required for VM {}".format(
7904 no_of_sriov_devices, vmname_andid
7905 )
7906 )
7907 else:
7908 self.logger.debug(
7909 "No infromation about SRIOV devices {} ", sriov_nets
7910 )
7911 except vmodl.MethodFault as error:
7912 self.logger.error("Error occurred while adding SRIOV {} ", error)
7913
7914 return None, vm_obj, vcenter_conect
7915
7916 def get_sriov_devices(self, host, no_of_vfs):
7917 """
7918 Method to get the details of SRIOV devices on given host
7919 Args:
7920 host - vSphere host object
7921 no_of_vfs - number of VFs needed on host
7922
7923 Returns:
7924 array of SRIOV devices
7925 """
7926 sriovInfo = []
7927
7928 if host:
7929 for device in host.config.pciPassthruInfo:
7930 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7931 if device.numVirtualFunction >= no_of_vfs:
7932 sriovInfo.append(device)
7933 break
7934
7935 return sriovInfo
7936
7937 def get_host_and_sriov_devices(self, content, no_of_vfs):
7938 """
7939 Method to get the details of SRIOV devices infromation on all hosts
7940
7941 Args:
7942 content - vSphere host object
7943 no_of_vfs - number of pci VFs needed on host
7944
7945 Returns:
7946 array of SRIOV devices and host object
7947 """
7948 host_obj = None
7949 sriov_device_objs = None
7950
7951 try:
7952 if content:
7953 container = content.viewManager.CreateContainerView(
7954 content.rootFolder, [vim.HostSystem], True
7955 )
7956
7957 for host in container.view:
7958 devices = self.get_sriov_devices(host, no_of_vfs)
7959
7960 if devices:
7961 host_obj = host
7962 sriov_device_objs = devices
7963 break
7964 except Exception as exp:
7965 self.logger.error(
7966 "Error {} occurred while finding SRIOV devices on host: {}".format(
7967 exp, host_obj
7968 )
7969 )
7970
7971 return host_obj, sriov_device_objs
7972
7973 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
7974 """
7975 Method to add SRIOV adapter to vm
7976
7977 Args:
7978 host_obj - vSphere host object
7979 vm_obj - vSphere vm object
7980 content - vCenter content object
7981 network_name - name of distributed virtaul portgroup
7982 sriov_device - SRIOV device info
7983
7984 Returns:
7985 task object
7986 """
7987 devices = []
7988 vnic_label = "sriov nic"
7989
7990 try:
7991 dvs_portgr = self.get_dvport_group(network_name)
7992 network_name = dvs_portgr.name
7993 nic = vim.vm.device.VirtualDeviceSpec()
7994 # VM device
7995 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
7996 nic.device = vim.vm.device.VirtualSriovEthernetCard()
7997 nic.device.addressType = "assigned"
7998 # nic.device.key = 13016
7999 nic.device.deviceInfo = vim.Description()
8000 nic.device.deviceInfo.label = vnic_label
8001 nic.device.deviceInfo.summary = network_name
8002 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
8003
8004 nic.device.backing.network = self.get_obj(
8005 content, [vim.Network], network_name
8006 )
8007 nic.device.backing.deviceName = network_name
8008 nic.device.backing.useAutoDetect = False
8009 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
8010 nic.device.connectable.startConnected = True
8011 nic.device.connectable.allowGuestControl = True
8012
8013 nic.device.sriovBacking = (
8014 vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
8015 )
8016 nic.device.sriovBacking.physicalFunctionBacking = (
8017 vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
8018 )
8019 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
8020
8021 devices.append(nic)
8022 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
8023 task = vm_obj.ReconfigVM_Task(vmconf)
8024
8025 return task
8026 except Exception as exp:
8027 self.logger.error(
8028 "Error {} occurred while adding SRIOV adapter in VM: {}".format(
8029 exp, vm_obj
8030 )
8031 )
8032
8033 return None
8034
8035 def create_dvPort_group(self, network_name):
8036 """
8037 Method to create disributed virtual portgroup
8038
8039 Args:
8040 network_name - name of network/portgroup
8041
8042 Returns:
8043 portgroup key
8044 """
8045 try:
8046 new_network_name = [network_name, "-", str(uuid.uuid4())]
8047 network_name = "".join(new_network_name)
8048 vcenter_conect, content = self.get_vcenter_content()
8049
8050 dv_switch = self.get_obj(
8051 content, [vim.DistributedVirtualSwitch], self.dvs_name
8052 )
8053
8054 if dv_switch:
8055 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8056 dv_pg_spec.name = network_name
8057
8058 dv_pg_spec.type = (
8059 vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
8060 )
8061 dv_pg_spec.defaultPortConfig = (
8062 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8063 )
8064 dv_pg_spec.defaultPortConfig.securityPolicy = (
8065 vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
8066 )
8067 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
8068 vim.BoolPolicy(value=False)
8069 )
8070 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
8071 vim.BoolPolicy(value=False)
8072 )
8073 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
8074 value=False
8075 )
8076
8077 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
8078 self.wait_for_vcenter_task(task, vcenter_conect)
8079
8080 dvPort_group = self.get_obj(
8081 content, [vim.dvs.DistributedVirtualPortgroup], network_name
8082 )
8083
8084 if dvPort_group:
8085 self.logger.info(
8086 "Created disributed virtaul port group: {}".format(dvPort_group)
8087 )
8088 return dvPort_group.key
8089 else:
8090 self.logger.debug(
8091 "No disributed virtual switch found with name {}".format(
8092 network_name
8093 )
8094 )
8095
8096 except Exception as exp:
8097 self.logger.error(
8098 "Error occurred while creating disributed virtaul port group {}"
8099 " : {}".format(network_name, exp)
8100 )
8101
8102 return None
8103
8104 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
8105 """
8106 Method to reconfigure disributed virtual portgroup
8107
8108 Args:
8109 dvPort_group_name - name of disributed virtual portgroup
8110 content - vCenter content object
8111 config_info - disributed virtual portgroup configuration
8112
8113 Returns:
8114 task object
8115 """
8116 try:
8117 dvPort_group = self.get_dvport_group(dvPort_group_name)
8118
8119 if dvPort_group:
8120 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
8121 dv_pg_spec.configVersion = dvPort_group.config.configVersion
8122 dv_pg_spec.defaultPortConfig = (
8123 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
8124 )
8125
8126 if "vlanID" in config_info:
8127 dv_pg_spec.defaultPortConfig.vlan = (
8128 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
8129 )
8130 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
8131
8132 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
8133
8134 return task
8135 else:
8136 return None
8137 except Exception as exp:
8138 self.logger.error(
8139 "Error occurred while reconfiguraing disributed virtaul port group {}"
8140 " : {}".format(dvPort_group_name, exp)
8141 )
8142
8143 return None
8144
8145 def destroy_dvport_group(self, dvPort_group_name):
8146 """
8147 Method to destroy disributed virtual portgroup
8148
8149 Args:
8150 network_name - name of network/portgroup
8151
8152 Returns:
8153 True if portgroup successfully got deleted else false
8154 """
8155 vcenter_conect, _ = self.get_vcenter_content()
8156
8157 try:
8158 status = None
8159 dvPort_group = self.get_dvport_group(dvPort_group_name)
8160
8161 if dvPort_group:
8162 task = dvPort_group.Destroy_Task()
8163 status = self.wait_for_vcenter_task(task, vcenter_conect)
8164
8165 return status
8166 except vmodl.MethodFault as exp:
8167 self.logger.error(
8168 "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
8169 exp, dvPort_group_name
8170 )
8171 )
8172
8173 return None
8174
8175 def get_dvport_group(self, dvPort_group_name):
8176 """
8177 Method to get disributed virtual portgroup
8178
8179 Args:
8180 network_name - name of network/portgroup
8181
8182 Returns:
8183 portgroup object
8184 """
8185 _, content = self.get_vcenter_content()
8186 dvPort_group = None
8187
8188 try:
8189 container = content.viewManager.CreateContainerView(
8190 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
8191 )
8192
8193 for item in container.view:
8194 if item.key == dvPort_group_name:
8195 dvPort_group = item
8196 break
8197
8198 return dvPort_group
8199 except vmodl.MethodFault as exp:
8200 self.logger.error(
8201 "Caught vmodl fault {} for disributed virtual port group {}".format(
8202 exp, dvPort_group_name
8203 )
8204 )
8205
8206 return None
8207
8208 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
8209 """
8210 Method to get disributed virtual portgroup vlanID
8211
8212 Args:
8213 network_name - name of network/portgroup
8214
8215 Returns:
8216 vlan ID
8217 """
8218 vlanId = None
8219
8220 try:
8221 dvPort_group = self.get_dvport_group(dvPort_group_name)
8222
8223 if dvPort_group:
8224 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
8225 except vmodl.MethodFault as exp:
8226 self.logger.error(
8227 "Caught vmodl fault {} for disributed virtaul port group {}".format(
8228 exp, dvPort_group_name
8229 )
8230 )
8231
8232 return vlanId
8233
8234 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
8235 """
8236 Method to configure vlanID in disributed virtual portgroup vlanID
8237
8238 Args:
8239 network_name - name of network/portgroup
8240
8241 Returns:
8242 None
8243 """
8244 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
8245
8246 if vlanID == 0:
8247 # configure vlanID
8248 vlanID = self.genrate_vlanID(dvPort_group_name)
8249 config = {"vlanID": vlanID}
8250 task = self.reconfig_portgroup(
8251 content, dvPort_group_name, config_info=config
8252 )
8253
8254 if task:
8255 status = self.wait_for_vcenter_task(task, vcenter_conect)
8256
8257 if status:
8258 self.logger.info(
8259 "Reconfigured Port group {} for vlan ID {}".format(
8260 dvPort_group_name, vlanID
8261 )
8262 )
8263 else:
8264 self.logger.error(
8265 "Fail reconfigure portgroup {} for vlanID{}".format(
8266 dvPort_group_name, vlanID
8267 )
8268 )
8269
8270 def genrate_vlanID(self, network_name):
8271 """
8272 Method to get unused vlanID
8273 Args:
8274 network_name - name of network/portgroup
8275 Returns:
8276 vlanID
8277 """
8278 vlan_id = None
8279 used_ids = []
8280
8281 if self.config.get("vlanID_range") is None:
8282 raise vimconn.VimConnConflictException(
8283 "You must provide a 'vlanID_range' "
8284 "at config value before creating sriov network with vlan tag"
8285 )
8286
8287 if "used_vlanIDs" not in self.persistent_info:
8288 self.persistent_info["used_vlanIDs"] = {}
8289 else:
8290 used_ids = list(self.persistent_info["used_vlanIDs"].values())
8291
8292 for vlanID_range in self.config.get("vlanID_range"):
8293 start_vlanid, end_vlanid = vlanID_range.split("-")
8294
8295 if start_vlanid > end_vlanid:
8296 raise vimconn.VimConnConflictException(
8297 "Invalid vlan ID range {}".format(vlanID_range)
8298 )
8299
8300 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
8301 if vid not in used_ids:
8302 vlan_id = vid
8303 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
8304 return vlan_id
8305
8306 if vlan_id is None:
8307 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
8308
8309 def get_obj(self, content, vimtype, name):
8310 """
8311 Get the vsphere object associated with a given text name
8312 """
8313 obj = None
8314 container = content.viewManager.CreateContainerView(
8315 content.rootFolder, vimtype, True
8316 )
8317
8318 for item in container.view:
8319 if item.name == name:
8320 obj = item
8321 break
8322
8323 return obj
8324
8325 def insert_media_to_vm(self, vapp, image_id):
8326 """
8327 Method to insert media CD-ROM (ISO image) from catalog to vm.
8328 vapp - vapp object to get vm id
8329 Image_id - image id for cdrom to be inerted to vm
8330 """
8331 # create connection object
8332 vca = self.connect()
8333 try:
8334 # fetching catalog details
8335 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
8336
8337 if vca._session:
8338 headers = {
8339 "Accept": "application/*+xml;version=" + API_VERSION,
8340 "x-vcloud-authorization": vca._session.headers[
8341 "x-vcloud-authorization"
8342 ],
8343 }
8344 response = self.perform_request(
8345 req_type="GET", url=rest_url, headers=headers
8346 )
8347
8348 if response.status_code != 200:
8349 self.logger.error(
8350 "REST call {} failed reason : {}"
8351 "status code : {}".format(
8352 rest_url, response.text, response.status_code
8353 )
8354 )
8355
8356 raise vimconn.VimConnException(
8357 "insert_media_to_vm(): Failed to get " "catalog details"
8358 )
8359
8360 # searching iso name and id
8361 iso_name, media_id = self.get_media_details(vca, response.text)
8362
8363 if iso_name and media_id:
8364 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
8365 <ns6:MediaInsertOrEjectParams
8366 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
8367 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
8368 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
8369 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
8370 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
8371 xmlns:ns7="http://www.vmware.com/schema/ovf"
8372 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
8373 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
8374 <ns6:Media
8375 type="application/vnd.vmware.vcloud.media+xml"
8376 name="{}"
8377 id="urn:vcloud:media:{}"
8378 href="https://{}/api/media/{}"/>
8379 </ns6:MediaInsertOrEjectParams>""".format(
8380 iso_name, media_id, self.url, media_id
8381 )
8382
8383 for vms in vapp.get_all_vms():
8384 vm_id = vms.get("id").split(":")[-1]
8385
8386 headers[
8387 "Content-Type"
8388 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
8389 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
8390 self.url, vm_id
8391 )
8392
8393 response = self.perform_request(
8394 req_type="POST", url=rest_url, data=data, headers=headers
8395 )
8396
8397 if response.status_code != 202:
8398 error_msg = (
8399 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
8400 "Status code {}".format(response.text, response.status_code)
8401 )
8402 self.logger.error(error_msg)
8403
8404 raise vimconn.VimConnException(error_msg)
8405 else:
8406 task = self.get_task_from_response(response.text)
8407 result = self.client.get_task_monitor().wait_for_success(
8408 task=task
8409 )
8410
8411 if result.get("status") == "success":
8412 self.logger.info(
8413 "insert_media_to_vm(): Sucessfully inserted media ISO"
8414 " image to vm {}".format(vm_id)
8415 )
8416 except Exception as exp:
8417 self.logger.error(
8418 "insert_media_to_vm() : exception occurred "
8419 "while inserting media CD-ROM"
8420 )
8421
8422 raise vimconn.VimConnException(message=exp)
8423
8424 def get_media_details(self, vca, content):
8425 """
8426 Method to get catalog item details
8427 vca - connection object
8428 content - Catalog details
8429 Return - Media name, media id
8430 """
8431 cataloghref_list = []
8432 try:
8433 if content:
8434 vm_list_xmlroot = XmlElementTree.fromstring(content)
8435
8436 for child in vm_list_xmlroot.iter():
8437 if "CatalogItem" in child.tag:
8438 cataloghref_list.append(child.attrib.get("href"))
8439
8440 if cataloghref_list is not None:
8441 for href in cataloghref_list:
8442 if href:
8443 headers = {
8444 "Accept": "application/*+xml;version=" + API_VERSION,
8445 "x-vcloud-authorization": vca._session.headers[
8446 "x-vcloud-authorization"
8447 ],
8448 }
8449 response = self.perform_request(
8450 req_type="GET", url=href, headers=headers
8451 )
8452
8453 if response.status_code != 200:
8454 self.logger.error(
8455 "REST call {} failed reason : {}"
8456 "status code : {}".format(
8457 href, response.text, response.status_code
8458 )
8459 )
8460
8461 raise vimconn.VimConnException(
8462 "get_media_details : Failed to get "
8463 "catalogitem details"
8464 )
8465
8466 list_xmlroot = XmlElementTree.fromstring(response.text)
8467
8468 for child in list_xmlroot.iter():
8469 if "Entity" in child.tag:
8470 if "media" in child.attrib.get("href"):
8471 name = child.attrib.get("name")
8472 media_id = (
8473 child.attrib.get("href").split("/").pop()
8474 )
8475
8476 return name, media_id
8477 else:
8478 self.logger.debug("Media name and id not found")
8479
8480 return False, False
8481 except Exception as exp:
8482 self.logger.error(
8483 "get_media_details : exception occurred " "getting media details"
8484 )
8485
8486 raise vimconn.VimConnException(message=exp)
8487
8488 def retry_rest(self, method, url, add_headers=None, data=None):
8489 """Method to get Token & retry respective REST request
8490 Args:
8491 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
8492 url - request url to be used
8493 add_headers - Additional headers (optional)
8494 data - Request payload data to be passed in request
8495 Returns:
8496 response - Response of request
8497 """
8498 response = None
8499
8500 # Get token
8501 self.get_token()
8502
8503 if self.client._session:
8504 headers = {
8505 "Accept": "application/*+xml;version=" + API_VERSION,
8506 "x-vcloud-authorization": self.client._session.headers[
8507 "x-vcloud-authorization"
8508 ],
8509 }
8510
8511 if add_headers:
8512 headers.update(add_headers)
8513
8514 if method == "GET":
8515 response = self.perform_request(req_type="GET", url=url, headers=headers)
8516 elif method == "PUT":
8517 response = self.perform_request(
8518 req_type="PUT", url=url, headers=headers, data=data
8519 )
8520 elif method == "POST":
8521 response = self.perform_request(
8522 req_type="POST", url=url, headers=headers, data=data
8523 )
8524 elif method == "DELETE":
8525 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
8526
8527 return response
8528
8529 def get_token(self):
8530 """Generate a new token if expired
8531
8532 Returns:
8533 The return client object that letter can be used to connect to vCloud director as admin for VDC
8534 """
8535 self.client = self.connect()
8536
8537 def get_vdc_details(self):
8538 """Get VDC details using pyVcloud Lib
8539
8540 Returns org and vdc object
8541 """
8542 vdc = None
8543
8544 try:
8545 org = Org(self.client, resource=self.client.get_org())
8546 vdc = org.get_vdc(self.tenant_name)
8547 except Exception as e:
8548 # pyvcloud not giving a specific exception, Refresh nevertheless
8549 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
8550
8551 # Retry once, if failed by refreshing token
8552 if vdc is None:
8553 self.get_token()
8554 org = Org(self.client, resource=self.client.get_org())
8555 vdc = org.get_vdc(self.tenant_name)
8556
8557 return org, vdc
8558
8559 def perform_request(self, req_type, url, headers=None, data=None):
8560 """Perform the POST/PUT/GET/DELETE request."""
8561 # Log REST request details
8562 self.log_request(req_type, url=url, headers=headers, data=data)
8563 # perform request and return its result
8564
8565 if req_type == "GET":
8566 response = requests.get(url=url, headers=headers, verify=False)
8567 elif req_type == "PUT":
8568 response = requests.put(url=url, headers=headers, data=data, verify=False)
8569 elif req_type == "POST":
8570 response = requests.post(url=url, headers=headers, data=data, verify=False)
8571 elif req_type == "DELETE":
8572 response = requests.delete(url=url, headers=headers, verify=False)
8573
8574 # Log the REST response
8575 self.log_response(response)
8576
8577 return response
8578
8579 def log_request(self, req_type, url=None, headers=None, data=None):
8580 """Logs REST request details"""
8581
8582 if req_type is not None:
8583 self.logger.debug("Request type: {}".format(req_type))
8584
8585 if url is not None:
8586 self.logger.debug("Request url: {}".format(url))
8587
8588 if headers is not None:
8589 for header in headers:
8590 self.logger.debug(
8591 "Request header: {}: {}".format(header, headers[header])
8592 )
8593
8594 if data is not None:
8595 self.logger.debug("Request data: {}".format(data))
8596
8597 def log_response(self, response):
8598 """Logs REST response details"""
8599
8600 self.logger.debug("Response status code: {} ".format(response.status_code))
8601
8602 def get_task_from_response(self, content):
8603 """
8604 content - API response.text(response.text)
8605 return task object
8606 """
8607 xmlroot = XmlElementTree.fromstring(content)
8608
8609 if xmlroot.tag.split("}")[1] == "Task":
8610 return xmlroot
8611 else:
8612 for ele in xmlroot:
8613 if ele.tag.split("}")[1] == "Tasks":
8614 task = ele[0]
8615 break
8616
8617 return task
8618
8619 def power_on_vapp(self, vapp_id, vapp_name):
8620 """
8621 vapp_id - vApp uuid
8622 vapp_name - vAapp name
8623 return - Task object
8624 """
8625 headers = {
8626 "Accept": "application/*+xml;version=" + API_VERSION,
8627 "x-vcloud-authorization": self.client._session.headers[
8628 "x-vcloud-authorization"
8629 ],
8630 }
8631
8632 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
8633 self.url, vapp_id
8634 )
8635 response = self.perform_request(
8636 req_type="POST", url=poweron_href, headers=headers
8637 )
8638
8639 if response.status_code != 202:
8640 self.logger.error(
8641 "REST call {} failed reason : {}"
8642 "status code : {} ".format(
8643 poweron_href, response.text, response.status_code
8644 )
8645 )
8646
8647 raise vimconn.VimConnException(
8648 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
8649 )
8650 else:
8651 poweron_task = self.get_task_from_response(response.text)
8652
8653 return poweron_task
8654
8655 def migrate_instance(self, vm_id, compute_host=None):
8656 """
8657 Migrate a vdu
8658 param:
8659 vm_id: ID of an instance
8660 compute_host: Host to migrate the vdu to
8661 """
8662 # TODO: Add support for migration
8663 raise vimconn.VimConnNotImplemented("Should have implemented this")
8664
8665 def resize_instance(self, vm_id, flavor_id=None):
8666 """
8667 resize a vdu
8668 param:
8669 vm_id: ID of an instance
8670 flavor_id: flavor_id to resize the vdu to
8671 """
8672 # TODO: Add support for resize
8673 raise vimconn.VimConnNotImplemented("Should have implemented this")