39033cf2487f58cd66359ec5841d31dc5984ff19
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_network_list(self, filter_dict={}):
697 """Obtain tenant networks of VIM
698 Filter_dict can be:
699 name: network name OR/AND
700 id: network uuid OR/AND
701 shared: boolean OR/AND
702 tenant_id: tenant OR/AND
703 admin_state_up: boolean
704 status: 'ACTIVE'
705
706 [{key : value , key : value}]
707
708 Returns the network list of dictionaries:
709 [{<the fields at Filter_dict plus some VIM specific>}, ...]
710 List can be empty
711 """
712
713 self.logger.debug(
714 "get_network_list(): retrieving network list for vcd {}".format(
715 self.tenant_name
716 )
717 )
718
719 if not self.tenant_name:
720 raise vimconn.VimConnConnectionException("Tenant name is empty.")
721
722 _, vdc = self.get_vdc_details()
723 if vdc is None:
724 raise vimconn.VimConnConnectionException(
725 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
726 )
727
728 try:
729 vdcid = vdc.get("id").split(":")[3]
730
731 if self.client._session:
732 headers = {
733 "Accept": "application/*+xml;version=" + API_VERSION,
734 "x-vcloud-authorization": self.client._session.headers[
735 "x-vcloud-authorization"
736 ],
737 }
738 response = self.perform_request(
739 req_type="GET", url=vdc.get("href"), headers=headers
740 )
741
742 if response.status_code != 200:
743 self.logger.error("Failed to get vdc content")
744 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
745 else:
746 content = XmlElementTree.fromstring(response.text)
747
748 network_list = []
749 for item in content:
750 if item.tag.split("}")[-1] == "AvailableNetworks":
751 for net in item:
752 response = self.perform_request(
753 req_type="GET", url=net.get("href"), headers=headers
754 )
755
756 if response.status_code != 200:
757 self.logger.error("Failed to get network content")
758 raise vimconn.VimConnNotFoundException(
759 "Failed to get network content"
760 )
761 else:
762 net_details = XmlElementTree.fromstring(response.text)
763
764 filter_entry = {}
765 net_uuid = net_details.get("id").split(":")
766
767 if len(net_uuid) != 4:
768 continue
769 else:
770 net_uuid = net_uuid[3]
771 # create dict entry
772 self.logger.debug(
773 "get_network_list(): Adding net {}"
774 " to a list vcd id {} network {}".format(
775 net_uuid, vdcid, net_details.get("name")
776 )
777 )
778 filter_entry["name"] = net_details.get("name")
779 filter_entry["id"] = net_uuid
780
781 if [
782 i.text
783 for i in net_details
784 if i.tag.split("}")[-1] == "IsShared"
785 ][0] == "true":
786 shared = True
787 else:
788 shared = False
789
790 filter_entry["shared"] = shared
791 filter_entry["tenant_id"] = vdcid
792
793 if int(net_details.get("status")) == 1:
794 filter_entry["admin_state_up"] = True
795 else:
796 filter_entry["admin_state_up"] = False
797
798 filter_entry["status"] = "ACTIVE"
799 filter_entry["type"] = "bridge"
800 filtered_entry = filter_entry.copy()
801
802 if filter_dict is not None and filter_dict:
803 # we remove all the key : value we don't care and match only
804 # respected field
805 filtered_dict = set(filter_entry.keys()) - set(
806 filter_dict
807 )
808
809 for unwanted_key in filtered_dict:
810 del filter_entry[unwanted_key]
811
812 if filter_dict == filter_entry:
813 network_list.append(filtered_entry)
814 else:
815 network_list.append(filtered_entry)
816 except Exception as e:
817 self.logger.debug("Error in get_network_list", exc_info=True)
818
819 if isinstance(e, vimconn.VimConnException):
820 raise
821 else:
822 raise vimconn.VimConnNotFoundException(
823 "Failed : Networks list not found {} ".format(e)
824 )
825
826 self.logger.debug("Returning {}".format(network_list))
827
828 return network_list
829
830 def get_network(self, net_id):
831 """Method obtains network details of net_id VIM network
832 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
833 try:
834 _, vdc = self.get_vdc_details()
835 vdc_id = vdc.get("id").split(":")[3]
836
837 if self.client._session:
838 headers = {
839 "Accept": "application/*+xml;version=" + API_VERSION,
840 "x-vcloud-authorization": self.client._session.headers[
841 "x-vcloud-authorization"
842 ],
843 }
844 response = self.perform_request(
845 req_type="GET", url=vdc.get("href"), headers=headers
846 )
847
848 if response.status_code != 200:
849 self.logger.error("Failed to get vdc content")
850 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
851 else:
852 content = XmlElementTree.fromstring(response.text)
853
854 filter_dict = {}
855
856 for item in content:
857 if item.tag.split("}")[-1] == "AvailableNetworks":
858 for net in item:
859 response = self.perform_request(
860 req_type="GET", url=net.get("href"), headers=headers
861 )
862
863 if response.status_code != 200:
864 self.logger.error("Failed to get network content")
865 raise vimconn.VimConnNotFoundException(
866 "Failed to get network content"
867 )
868 else:
869 net_details = XmlElementTree.fromstring(response.text)
870
871 vdc_network_id = net_details.get("id").split(":")
872 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
873 filter_dict["name"] = net_details.get("name")
874 filter_dict["id"] = vdc_network_id[3]
875
876 if [
877 i.text
878 for i in net_details
879 if i.tag.split("}")[-1] == "IsShared"
880 ][0] == "true":
881 shared = True
882 else:
883 shared = False
884
885 filter_dict["shared"] = shared
886 filter_dict["tenant_id"] = vdc_id
887
888 if int(net_details.get("status")) == 1:
889 filter_dict["admin_state_up"] = True
890 else:
891 filter_dict["admin_state_up"] = False
892
893 filter_dict["status"] = "ACTIVE"
894 filter_dict["type"] = "bridge"
895 self.logger.debug("Returning {}".format(filter_dict))
896
897 return filter_dict
898 else:
899 raise vimconn.VimConnNotFoundException(
900 "Network {} not found".format(net_id)
901 )
902 except Exception as e:
903 self.logger.debug("Error in get_network")
904 self.logger.debug(traceback.format_exc())
905
906 if isinstance(e, vimconn.VimConnException):
907 raise
908 else:
909 raise vimconn.VimConnNotFoundException(
910 "Failed : Network not found {} ".format(e)
911 )
912
913 return filter_dict
914
915 def delete_network(self, net_id, created_items=None):
916 """
917 Removes a tenant network from VIM and its associated elements
918 :param net_id: VIM identifier of the network, provided by method new_network
919 :param created_items: dictionary with extra items to be deleted. provided by method new_network
920 Returns the network identifier or raises an exception upon error or when network is not found
921 """
922 vcd_network = self.get_vcd_network(network_uuid=net_id)
923 if vcd_network is not None and vcd_network:
924 if self.delete_network_action(network_uuid=net_id):
925 return net_id
926 else:
927 raise vimconn.VimConnNotFoundException(
928 "Network {} not found".format(net_id)
929 )
930
931 def refresh_nets_status(self, net_list):
932 """Get the status of the networks
933 Params: the list of network identifiers
934 Returns a dictionary with:
935 net_id: #VIM id of this network
936 status: #Mandatory. Text with one of:
937 # DELETED (not found at vim)
938 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
939 # OTHER (Vim reported other status not understood)
940 # ERROR (VIM indicates an ERROR status)
941 # ACTIVE, INACTIVE, DOWN (admin down),
942 # BUILD (on building process)
943 #
944 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
945 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
946
947 """
948 dict_entry = {}
949 try:
950 for net in net_list:
951 errormsg = ""
952 vcd_network = self.get_vcd_network(network_uuid=net)
953 if vcd_network is not None and vcd_network:
954 if vcd_network["status"] == "1":
955 status = "ACTIVE"
956 else:
957 status = "DOWN"
958 else:
959 status = "DELETED"
960 errormsg = "Network not found."
961
962 dict_entry[net] = {
963 "status": status,
964 "error_msg": errormsg,
965 "vim_info": yaml.safe_dump(vcd_network),
966 }
967 except Exception:
968 self.logger.debug("Error in refresh_nets_status")
969 self.logger.debug(traceback.format_exc())
970
971 return dict_entry
972
973 def get_flavor(self, flavor_id):
974 """Obtain flavor details from the VIM
975 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
976 """
977 if flavor_id not in vimconnector.flavorlist:
978 raise vimconn.VimConnNotFoundException("Flavor not found.")
979
980 return vimconnector.flavorlist[flavor_id]
981
982 def new_flavor(self, flavor_data):
983 """Adds a tenant flavor to VIM
984 flavor_data contains a dictionary with information, keys:
985 name: flavor name
986 ram: memory (cloud type) in MBytes
987 vpcus: cpus (cloud type)
988 extended: EPA parameters
989 - numas: #items requested in same NUMA
990 memory: number of 1G huge pages memory
991 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
992 threads
993 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
994 - name: interface name
995 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
996 bandwidth: X Gbps; requested guarantee bandwidth
997 vpci: requested virtual PCI address
998 disk: disk size
999 is_public:
1000 #TODO to concrete
1001 Returns the flavor identifier"""
1002
1003 # generate a new uuid put to internal dict and return it.
1004 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1005 new_flavor = flavor_data
1006 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1007 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1008 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1009
1010 if not isinstance(ram, int):
1011 raise vimconn.VimConnException("Non-integer value for ram")
1012 elif not isinstance(cpu, int):
1013 raise vimconn.VimConnException("Non-integer value for cpu")
1014 elif not isinstance(disk, int):
1015 raise vimconn.VimConnException("Non-integer value for disk")
1016
1017 extended_flv = flavor_data.get("extended")
1018 if extended_flv:
1019 numas = extended_flv.get("numas")
1020 if numas:
1021 for numa in numas:
1022 # overwrite ram and vcpus
1023 if "memory" in numa:
1024 ram = numa["memory"] * 1024
1025
1026 if "paired-threads" in numa:
1027 cpu = numa["paired-threads"] * 2
1028 elif "cores" in numa:
1029 cpu = numa["cores"]
1030 elif "threads" in numa:
1031 cpu = numa["threads"]
1032
1033 new_flavor[FLAVOR_RAM_KEY] = ram
1034 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1035 new_flavor[FLAVOR_DISK_KEY] = disk
1036 # generate a new uuid put to internal dict and return it.
1037 flavor_id = uuid.uuid4()
1038 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1039 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1040
1041 return str(flavor_id)
1042
1043 def delete_flavor(self, flavor_id):
1044 """Deletes a tenant flavor from VIM identify by its id
1045
1046 Returns the used id or raise an exception
1047 """
1048 if flavor_id not in vimconnector.flavorlist:
1049 raise vimconn.VimConnNotFoundException("Flavor not found.")
1050
1051 vimconnector.flavorlist.pop(flavor_id, None)
1052
1053 return flavor_id
1054
1055 def new_image(self, image_dict):
1056 """
1057 Adds a tenant image to VIM
1058 Returns:
1059 200, image-id if the image is created
1060 <0, message if there is an error
1061 """
1062 return self.get_image_id_from_path(image_dict["location"])
1063
1064 def delete_image(self, image_id):
1065 """
1066 Deletes a tenant image from VIM
1067 Args:
1068 image_id is ID of Image to be deleted
1069 Return:
1070 returns the image identifier in UUID format or raises an exception on error
1071 """
1072 conn = self.connect_as_admin()
1073
1074 if not conn:
1075 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1076
1077 # Get Catalog details
1078 url_list = [self.url, "/api/catalog/", image_id]
1079 catalog_herf = "".join(url_list)
1080
1081 headers = {
1082 "Accept": "application/*+xml;version=" + API_VERSION,
1083 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1084 }
1085
1086 response = self.perform_request(
1087 req_type="GET", url=catalog_herf, headers=headers
1088 )
1089
1090 if response.status_code != requests.codes.ok:
1091 self.logger.debug(
1092 "delete_image():GET REST API call {} failed. "
1093 "Return status code {}".format(catalog_herf, response.status_code)
1094 )
1095
1096 raise vimconn.VimConnNotFoundException(
1097 "Fail to get image {}".format(image_id)
1098 )
1099
1100 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1101 namespaces = {
1102 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1103 }
1104 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1105
1106 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1107 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1108
1109 for catalogItem in catalogItems:
1110 catalogItem_href = catalogItem.attrib["href"]
1111
1112 response = self.perform_request(
1113 req_type="GET", url=catalogItem_href, headers=headers
1114 )
1115
1116 if response.status_code != requests.codes.ok:
1117 self.logger.debug(
1118 "delete_image():GET REST API call {} failed. "
1119 "Return status code {}".format(catalog_herf, response.status_code)
1120 )
1121 raise vimconn.VimConnNotFoundException(
1122 "Fail to get catalogItem {} for catalog {}".format(
1123 catalogItem, image_id
1124 )
1125 )
1126
1127 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1128 namespaces = {
1129 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1130 }
1131 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1132 catalogitem_remove_href = lxmlroot_respond.find(
1133 "xmlns:Link[@rel='remove']", namespaces
1134 ).attrib["href"]
1135
1136 # Remove catalogItem
1137 response = self.perform_request(
1138 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1139 )
1140
1141 if response.status_code == requests.codes.no_content:
1142 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1143 else:
1144 raise vimconn.VimConnException(
1145 "Fail to delete Catalog Item {}".format(catalogItem)
1146 )
1147
1148 # Remove catalog
1149 url_list = [self.url, "/api/admin/catalog/", image_id]
1150 catalog_remove_herf = "".join(url_list)
1151 response = self.perform_request(
1152 req_type="DELETE", url=catalog_remove_herf, headers=headers
1153 )
1154
1155 if response.status_code == requests.codes.no_content:
1156 self.logger.debug("Deleted Catalog {}".format(image_id))
1157
1158 return image_id
1159 else:
1160 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1161
1162 def catalog_exists(self, catalog_name, catalogs):
1163 """
1164
1165 :param catalog_name:
1166 :param catalogs:
1167 :return:
1168 """
1169 for catalog in catalogs:
1170 if catalog["name"] == catalog_name:
1171 return catalog["id"]
1172
1173 def create_vimcatalog(self, vca=None, catalog_name=None):
1174 """Create new catalog entry in vCloud director.
1175
1176 Args
1177 vca: vCloud director.
1178 catalog_name catalog that client wish to create. Note no validation done for a name.
1179 Client must make sure that provide valid string representation.
1180
1181 Returns catalog id if catalog created else None.
1182
1183 """
1184 try:
1185 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1186
1187 if lxml_catalog_element:
1188 id_attr_value = lxml_catalog_element.get("id")
1189 return id_attr_value.split(":")[-1]
1190
1191 catalogs = vca.list_catalogs()
1192 except Exception as ex:
1193 self.logger.error(
1194 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1195 catalog_name, ex
1196 )
1197 )
1198 raise
1199 return self.catalog_exists(catalog_name, catalogs)
1200
1201 # noinspection PyIncorrectDocstring
1202 def upload_ovf(
1203 self,
1204 vca=None,
1205 catalog_name=None,
1206 image_name=None,
1207 media_file_name=None,
1208 description="",
1209 progress=False,
1210 chunk_bytes=128 * 1024,
1211 ):
1212 """
1213 Uploads a OVF file to a vCloud catalog
1214
1215 :param chunk_bytes:
1216 :param progress:
1217 :param description:
1218 :param image_name:
1219 :param vca:
1220 :param catalog_name: (str): The name of the catalog to upload the media.
1221 :param media_file_name: (str): The name of the local media file to upload.
1222 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1223 """
1224 os.path.isfile(media_file_name)
1225 statinfo = os.stat(media_file_name)
1226
1227 # find a catalog entry where we upload OVF.
1228 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1229 # status change.
1230 # if VCD can parse OVF we upload VMDK file
1231 try:
1232 for catalog in vca.list_catalogs():
1233 if catalog_name != catalog["name"]:
1234 continue
1235 catalog_href = "{}/api/catalog/{}/action/upload".format(
1236 self.url, catalog["id"]
1237 )
1238 data = """
1239 <UploadVAppTemplateParams name="{}"
1240 xmlns="http://www.vmware.com/vcloud/v1.5"
1241 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1242 <Description>{} vApp Template</Description>
1243 </UploadVAppTemplateParams>
1244 """.format(
1245 catalog_name, description
1246 )
1247
1248 if self.client:
1249 headers = {
1250 "Accept": "application/*+xml;version=" + API_VERSION,
1251 "x-vcloud-authorization": self.client._session.headers[
1252 "x-vcloud-authorization"
1253 ],
1254 }
1255 headers[
1256 "Content-Type"
1257 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1258
1259 response = self.perform_request(
1260 req_type="POST", url=catalog_href, headers=headers, data=data
1261 )
1262
1263 if response.status_code == requests.codes.created:
1264 catalogItem = XmlElementTree.fromstring(response.text)
1265 entity = [
1266 child
1267 for child in catalogItem
1268 if child.get("type")
1269 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1270 ][0]
1271 href = entity.get("href")
1272 template = href
1273
1274 response = self.perform_request(
1275 req_type="GET", url=href, headers=headers
1276 )
1277
1278 if response.status_code == requests.codes.ok:
1279 headers["Content-Type"] = "Content-Type text/xml"
1280 result = re.search(
1281 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1282 response.text,
1283 )
1284
1285 if result:
1286 transfer_href = result.group(1)
1287
1288 response = self.perform_request(
1289 req_type="PUT",
1290 url=transfer_href,
1291 headers=headers,
1292 data=open(media_file_name, "rb"),
1293 )
1294
1295 if response.status_code != requests.codes.ok:
1296 self.logger.debug(
1297 "Failed create vApp template for catalog name {} and image {}".format(
1298 catalog_name, media_file_name
1299 )
1300 )
1301 return False
1302
1303 # TODO fix this with aync block
1304 time.sleep(5)
1305
1306 self.logger.debug(
1307 "vApp template for catalog name {} and image {}".format(
1308 catalog_name, media_file_name
1309 )
1310 )
1311
1312 # uploading VMDK file
1313 # check status of OVF upload and upload remaining files.
1314 response = self.perform_request(
1315 req_type="GET", url=template, headers=headers
1316 )
1317
1318 if response.status_code == requests.codes.ok:
1319 result = re.search(
1320 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1321 )
1322
1323 if result:
1324 link_href = result.group(1)
1325
1326 # we skip ovf since it already uploaded.
1327 if "ovf" in link_href:
1328 continue
1329
1330 # The OVF file and VMDK must be in a same directory
1331 head, _ = os.path.split(media_file_name)
1332 file_vmdk = head + "/" + link_href.split("/")[-1]
1333
1334 if not os.path.isfile(file_vmdk):
1335 return False
1336
1337 statinfo = os.stat(file_vmdk)
1338 if statinfo.st_size == 0:
1339 return False
1340
1341 hrefvmdk = link_href
1342
1343 if progress:
1344 widgets = [
1345 "Uploading file: ",
1346 Percentage(),
1347 " ",
1348 Bar(),
1349 " ",
1350 ETA(),
1351 " ",
1352 FileTransferSpeed(),
1353 ]
1354 progress_bar = ProgressBar(
1355 widgets=widgets, maxval=statinfo.st_size
1356 ).start()
1357
1358 bytes_transferred = 0
1359 f = open(file_vmdk, "rb")
1360
1361 while bytes_transferred < statinfo.st_size:
1362 my_bytes = f.read(chunk_bytes)
1363 if len(my_bytes) <= chunk_bytes:
1364 headers["Content-Range"] = "bytes {}-{}/{}".format(
1365 bytes_transferred,
1366 len(my_bytes) - 1,
1367 statinfo.st_size,
1368 )
1369 headers["Content-Length"] = str(len(my_bytes))
1370 response = requests.put(
1371 url=hrefvmdk,
1372 headers=headers,
1373 data=my_bytes,
1374 verify=False,
1375 )
1376
1377 if response.status_code == requests.codes.ok:
1378 bytes_transferred += len(my_bytes)
1379 if progress:
1380 progress_bar.update(bytes_transferred)
1381 else:
1382 self.logger.debug(
1383 "file upload failed with error: [{}] {}".format(
1384 response.status_code, response.text
1385 )
1386 )
1387
1388 f.close()
1389
1390 return False
1391
1392 f.close()
1393 if progress:
1394 progress_bar.finish()
1395 time.sleep(10)
1396
1397 return True
1398 else:
1399 self.logger.debug(
1400 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1401 catalog_name, media_file_name
1402 )
1403 )
1404 return False
1405 except Exception as exp:
1406 self.logger.debug(
1407 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1408 catalog_name, media_file_name, exp
1409 )
1410 )
1411
1412 raise vimconn.VimConnException(
1413 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1414 catalog_name, media_file_name, exp
1415 )
1416 )
1417
1418 self.logger.debug(
1419 "Failed retrieve catalog name {} for OVF file {}".format(
1420 catalog_name, media_file_name
1421 )
1422 )
1423
1424 return False
1425
1426 def upload_vimimage(
1427 self,
1428 vca=None,
1429 catalog_name=None,
1430 media_name=None,
1431 medial_file_name=None,
1432 progress=False,
1433 ):
1434 """Upload media file"""
1435 # TODO add named parameters for readability
1436 return self.upload_ovf(
1437 vca=vca,
1438 catalog_name=catalog_name,
1439 image_name=media_name.split(".")[0],
1440 media_file_name=medial_file_name,
1441 description="medial_file_name",
1442 progress=progress,
1443 )
1444
1445 def validate_uuid4(self, uuid_string=None):
1446 """Method validate correct format of UUID.
1447
1448 Return: true if string represent valid uuid
1449 """
1450 try:
1451 uuid.UUID(uuid_string, version=4)
1452 except ValueError:
1453 return False
1454
1455 return True
1456
1457 def get_catalogid(self, catalog_name=None, catalogs=None):
1458 """Method check catalog and return catalog ID in UUID format.
1459
1460 Args
1461 catalog_name: catalog name as string
1462 catalogs: list of catalogs.
1463
1464 Return: catalogs uuid
1465 """
1466 for catalog in catalogs:
1467 if catalog["name"] == catalog_name:
1468 catalog_id = catalog["id"]
1469 return catalog_id
1470
1471 return None
1472
1473 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1474 """Method check catalog and return catalog name lookup done by catalog UUID.
1475
1476 Args
1477 catalog_name: catalog name as string
1478 catalogs: list of catalogs.
1479
1480 Return: catalogs name or None
1481 """
1482 if not self.validate_uuid4(uuid_string=catalog_uuid):
1483 return None
1484
1485 for catalog in catalogs:
1486 catalog_id = catalog.get("id")
1487
1488 if catalog_id == catalog_uuid:
1489 return catalog.get("name")
1490
1491 return None
1492
1493 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1494 """Method check catalog and return catalog name lookup done by catalog UUID.
1495
1496 Args
1497 catalog_name: catalog name as string
1498 catalogs: list of catalogs.
1499
1500 Return: catalogs name or None
1501 """
1502 if not self.validate_uuid4(uuid_string=catalog_uuid):
1503 return None
1504
1505 for catalog in catalogs:
1506 catalog_id = catalog.get("id")
1507
1508 if catalog_id == catalog_uuid:
1509 return catalog
1510
1511 return None
1512
1513 def get_image_id_from_path(self, path=None, progress=False):
1514 """Method upload OVF image to vCloud director.
1515
1516 Each OVF image represented as single catalog entry in vcloud director.
1517 The method check for existing catalog entry. The check done by file name without file extension.
1518
1519 if given catalog name already present method will respond with existing catalog uuid otherwise
1520 it will create new catalog entry and upload OVF file to newly created catalog.
1521
1522 If method can't create catalog entry or upload a file it will throw exception.
1523
1524 Method accept boolean flag progress that will output progress bar. It useful method
1525 for standalone upload use case. In case to test large file upload.
1526
1527 Args
1528 path: - valid path to OVF file.
1529 progress - boolean progress bar show progress bar.
1530
1531 Return: if image uploaded correct method will provide image catalog UUID.
1532 """
1533 if not path:
1534 raise vimconn.VimConnException("Image path can't be None.")
1535
1536 if not os.path.isfile(path):
1537 raise vimconn.VimConnException("Can't read file. File not found.")
1538
1539 if not os.access(path, os.R_OK):
1540 raise vimconn.VimConnException(
1541 "Can't read file. Check file permission to read."
1542 )
1543
1544 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1545
1546 _, filename = os.path.split(path)
1547 _, file_extension = os.path.splitext(path)
1548 if file_extension != ".ovf":
1549 self.logger.debug(
1550 "Wrong file extension {} connector support only OVF container.".format(
1551 file_extension
1552 )
1553 )
1554
1555 raise vimconn.VimConnException(
1556 "Wrong container. vCloud director supports only OVF."
1557 )
1558
1559 catalog_name = os.path.splitext(filename)[0]
1560 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1561 self.logger.debug(
1562 "File name {} Catalog Name {} file path {} "
1563 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1564 )
1565
1566 try:
1567 org, _ = self.get_vdc_details()
1568 catalogs = org.list_catalogs()
1569 except Exception as exp:
1570 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1571
1572 raise vimconn.VimConnException(
1573 "Failed get catalogs() with Exception {} ".format(exp)
1574 )
1575
1576 if len(catalogs) == 0:
1577 self.logger.info(
1578 "Creating a new catalog entry {} in vcloud director".format(
1579 catalog_name
1580 )
1581 )
1582
1583 if self.create_vimcatalog(org, catalog_md5_name) is None:
1584 raise vimconn.VimConnException(
1585 "Failed create new catalog {} ".format(catalog_md5_name)
1586 )
1587
1588 result = self.upload_vimimage(
1589 vca=org,
1590 catalog_name=catalog_md5_name,
1591 media_name=filename,
1592 medial_file_name=path,
1593 progress=progress,
1594 )
1595
1596 if not result:
1597 raise vimconn.VimConnException(
1598 "Failed create vApp template for catalog {} ".format(catalog_name)
1599 )
1600
1601 return self.get_catalogid(catalog_name, catalogs)
1602 else:
1603 for catalog in catalogs:
1604 # search for existing catalog if we find same name we return ID
1605 # TODO optimize this
1606 if catalog["name"] == catalog_md5_name:
1607 self.logger.debug(
1608 "Found existing catalog entry for {} "
1609 "catalog id {}".format(
1610 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1611 )
1612 )
1613
1614 return self.get_catalogid(catalog_md5_name, catalogs)
1615
1616 # if we didn't find existing catalog we create a new one and upload image.
1617 self.logger.debug(
1618 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1619 )
1620 if self.create_vimcatalog(org, catalog_md5_name) is None:
1621 raise vimconn.VimConnException(
1622 "Failed create new catalog {} ".format(catalog_md5_name)
1623 )
1624
1625 result = self.upload_vimimage(
1626 vca=org,
1627 catalog_name=catalog_md5_name,
1628 media_name=filename,
1629 medial_file_name=path,
1630 progress=progress,
1631 )
1632 if not result:
1633 raise vimconn.VimConnException(
1634 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1635 )
1636
1637 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1638
1639 def get_image_list(self, filter_dict={}):
1640 """Obtain tenant images from VIM
1641 Filter_dict can be:
1642 name: image name
1643 id: image uuid
1644 checksum: image checksum
1645 location: image path
1646 Returns the image list of dictionaries:
1647 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1648 List can be empty
1649 """
1650 try:
1651 org, _ = self.get_vdc_details()
1652 image_list = []
1653 catalogs = org.list_catalogs()
1654
1655 if len(catalogs) == 0:
1656 return image_list
1657 else:
1658 for catalog in catalogs:
1659 catalog_uuid = catalog.get("id")
1660 name = catalog.get("name")
1661 filtered_dict = {}
1662
1663 if filter_dict.get("name") and filter_dict["name"] != name:
1664 continue
1665
1666 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1667 continue
1668
1669 filtered_dict["name"] = name
1670 filtered_dict["id"] = catalog_uuid
1671 image_list.append(filtered_dict)
1672
1673 self.logger.debug(
1674 "List of already created catalog items: {}".format(image_list)
1675 )
1676
1677 return image_list
1678 except Exception as exp:
1679 raise vimconn.VimConnException(
1680 "Exception occured while retriving catalog items {}".format(exp)
1681 )
1682
1683 def get_namebyvappid(self, vapp_uuid=None):
1684 """Method returns vApp name from vCD and lookup done by vapp_id.
1685
1686 Args:
1687 vapp_uuid: vappid is application identifier
1688
1689 Returns:
1690 The return vApp name otherwise None
1691 """
1692 try:
1693 if self.client and vapp_uuid:
1694 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1695 headers = {
1696 "Accept": "application/*+xml;version=" + API_VERSION,
1697 "x-vcloud-authorization": self.client._session.headers[
1698 "x-vcloud-authorization"
1699 ],
1700 }
1701
1702 response = self.perform_request(
1703 req_type="GET", url=vapp_call, headers=headers
1704 )
1705
1706 # Retry login if session expired & retry sending request
1707 if response.status_code == 403:
1708 response = self.retry_rest("GET", vapp_call)
1709
1710 tree = XmlElementTree.fromstring(response.text)
1711
1712 return tree.attrib["name"] if "name" in tree.attrib else None
1713 except Exception as e:
1714 self.logger.exception(e)
1715
1716 return None
1717
1718 return None
1719
1720 def new_vminstance(
1721 self,
1722 name=None,
1723 description="",
1724 start=False,
1725 image_id=None,
1726 flavor_id=None,
1727 affinity_group_list=[],
1728 net_list=[],
1729 cloud_config=None,
1730 disk_list=None,
1731 availability_zone_index=None,
1732 availability_zone_list=None,
1733 ):
1734 """Adds a VM instance to VIM
1735 Params:
1736 'start': (boolean) indicates if VM must start or created in pause mode.
1737 'image_id','flavor_id': image and flavor VIM id to use for the VM
1738 'net_list': list of interfaces, each one is a dictionary with:
1739 'name': (optional) name for the interface.
1740 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1741 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1742 capabilities
1743 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1744 'mac_address': (optional) mac address to assign to this interface
1745 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1746 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1747 for tagging VF
1748 'type': (mandatory) can be one of:
1749 'virtual', in this case always connected to a network of type 'net_type=bridge'
1750 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1751 data/ptp network or it can created unconnected
1752 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1753 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1754 are allocated on the same physical NIC
1755 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1756 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1757 or True, it must apply the default VIM behaviour
1758 After execution the method will add the key:
1759 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1760 interface. 'net_list' is modified
1761 'cloud_config': (optional) dictionary with:
1762 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1763 'users': (optional) list of users to be inserted, each item is a dict with:
1764 'name': (mandatory) user name,
1765 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1766 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1767 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1768 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1769 'dest': (mandatory) string with the destination absolute path
1770 'encoding': (optional, by default text). Can be one of:
1771 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1772 'content' (mandatory): string with the content of the file
1773 'permissions': (optional) string with file permissions, typically octal notation '0644'
1774 'owner': (optional) file owner, string with the format 'owner:group'
1775 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1776 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1777 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1778 'size': (mandatory) string with the size of the disk in GB
1779 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1780 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1781 availability_zone_index is None
1782 Returns a tuple with the instance identifier and created_items or raises an exception on error
1783 created_items can be None or a dictionary where this method can include key-values that will be passed to
1784 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1785 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1786 as not present.
1787 """
1788 self.logger.info("Creating new instance for entry {}".format(name))
1789 self.logger.debug(
1790 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1791 "availability_zone_index {} availability_zone_list {}".format(
1792 description,
1793 start,
1794 image_id,
1795 flavor_id,
1796 net_list,
1797 cloud_config,
1798 disk_list,
1799 availability_zone_index,
1800 availability_zone_list,
1801 )
1802 )
1803
1804 # new vm name = vmname + tenant_id + uuid
1805 new_vm_name = [name, "-", str(uuid.uuid4())]
1806 vmname_andid = "".join(new_vm_name)
1807
1808 for net in net_list:
1809 if net["type"] == "PCI-PASSTHROUGH":
1810 raise vimconn.VimConnNotSupportedException(
1811 "Current vCD version does not support type : {}".format(net["type"])
1812 )
1813
1814 if len(net_list) > 10:
1815 raise vimconn.VimConnNotSupportedException(
1816 "The VM hardware versions 7 and above support upto 10 NICs only"
1817 )
1818
1819 # if vm already deployed we return existing uuid
1820 # we check for presence of VDC, Catalog entry and Flavor.
1821 org, vdc = self.get_vdc_details()
1822 if vdc is None:
1823 raise vimconn.VimConnNotFoundException(
1824 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
1825 name
1826 )
1827 )
1828
1829 catalogs = org.list_catalogs()
1830 if catalogs is None:
1831 # Retry once, if failed by refreshing token
1832 self.get_token()
1833 org = Org(self.client, resource=self.client.get_org())
1834 catalogs = org.list_catalogs()
1835
1836 if catalogs is None:
1837 raise vimconn.VimConnNotFoundException(
1838 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
1839 name
1840 )
1841 )
1842
1843 catalog_hash_name = self.get_catalogbyid(
1844 catalog_uuid=image_id, catalogs=catalogs
1845 )
1846 if catalog_hash_name:
1847 self.logger.info(
1848 "Found catalog entry {} for image id {}".format(
1849 catalog_hash_name, image_id
1850 )
1851 )
1852 else:
1853 raise vimconn.VimConnNotFoundException(
1854 "new_vminstance(): Failed create vApp {}: "
1855 "(Failed retrieve catalog information {})".format(name, image_id)
1856 )
1857
1858 # Set vCPU and Memory based on flavor.
1859 vm_cpus = None
1860 vm_memory = None
1861 vm_disk = None
1862 numas = None
1863
1864 if flavor_id is not None:
1865 if flavor_id not in vimconnector.flavorlist:
1866 raise vimconn.VimConnNotFoundException(
1867 "new_vminstance(): Failed create vApp {}: "
1868 "Failed retrieve flavor information "
1869 "flavor id {}".format(name, flavor_id)
1870 )
1871 else:
1872 try:
1873 flavor = vimconnector.flavorlist[flavor_id]
1874 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1875 vm_memory = flavor[FLAVOR_RAM_KEY]
1876 vm_disk = flavor[FLAVOR_DISK_KEY]
1877 extended = flavor.get("extended", None)
1878
1879 if extended:
1880 numas = extended.get("numas", None)
1881 except Exception as exp:
1882 raise vimconn.VimConnException(
1883 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
1884 )
1885
1886 # image upload creates template name as catalog name space Template.
1887 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1888 # power_on = 'false'
1889 # if start:
1890 # power_on = 'true'
1891
1892 # client must provide at least one entry in net_list if not we report error
1893 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1894 # If no mgmt, then the 1st NN in netlist is considered as primary net.
1895 primary_net = None
1896 primary_netname = None
1897 primary_net_href = None
1898 # network_mode = 'bridged'
1899 if net_list is not None and len(net_list) > 0:
1900 for net in net_list:
1901 if "use" in net and net["use"] == "mgmt" and not primary_net:
1902 primary_net = net
1903
1904 if primary_net is None:
1905 primary_net = net_list[0]
1906
1907 try:
1908 primary_net_id = primary_net["net_id"]
1909 url_list = [self.url, "/api/network/", primary_net_id]
1910 primary_net_href = "".join(url_list)
1911 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1912
1913 if "name" in network_dict:
1914 primary_netname = network_dict["name"]
1915 except KeyError:
1916 raise vimconn.VimConnException(
1917 "Corrupted flavor. {}".format(primary_net)
1918 )
1919 else:
1920 raise vimconn.VimConnUnexpectedResponse(
1921 "new_vminstance(): Failed network list is empty."
1922 )
1923
1924 # use: 'data', 'bridge', 'mgmt'
1925 # create vApp. Set vcpu and ram based on flavor id.
1926 try:
1927 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1928 if not vdc_obj:
1929 raise vimconn.VimConnNotFoundException(
1930 "new_vminstance(): Failed to get VDC object"
1931 )
1932
1933 for retry in (1, 2):
1934 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1935 catalog_items = [items.attrib]
1936
1937 if len(catalog_items) == 1:
1938 if self.client:
1939 headers = {
1940 "Accept": "application/*+xml;version=" + API_VERSION,
1941 "x-vcloud-authorization": self.client._session.headers[
1942 "x-vcloud-authorization"
1943 ],
1944 }
1945
1946 response = self.perform_request(
1947 req_type="GET",
1948 url=catalog_items[0].get("href"),
1949 headers=headers,
1950 )
1951 catalogItem = XmlElementTree.fromstring(response.text)
1952 entity = [
1953 child
1954 for child in catalogItem
1955 if child.get("type")
1956 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1957 ][0]
1958 vapp_tempalte_href = entity.get("href")
1959
1960 response = self.perform_request(
1961 req_type="GET", url=vapp_tempalte_href, headers=headers
1962 )
1963
1964 if response.status_code != requests.codes.ok:
1965 self.logger.debug(
1966 "REST API call {} failed. Return status code {}".format(
1967 vapp_tempalte_href, response.status_code
1968 )
1969 )
1970 else:
1971 result = (response.text).replace("\n", " ")
1972
1973 vapp_template_tree = XmlElementTree.fromstring(response.text)
1974 children_element = [
1975 child for child in vapp_template_tree if "Children" in child.tag
1976 ][0]
1977 vm_element = [child for child in children_element if "Vm" in child.tag][
1978 0
1979 ]
1980 vm_name = vm_element.get("name")
1981 vm_id = vm_element.get("id")
1982 vm_href = vm_element.get("href")
1983
1984 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
1985 # result).group(1)
1986 memory_mb = re.search(
1987 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
1988 result,
1989 ).group(1)
1990 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
1991
1992 headers[
1993 "Content-Type"
1994 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
1995 vdc_id = vdc.get("id").split(":")[-1]
1996 instantiate_vapp_href = (
1997 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
1998 self.url, vdc_id
1999 )
2000 )
2001
2002 with open(
2003 os.path.join(
2004 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2005 ),
2006 "r",
2007 ) as f:
2008 template = f.read()
2009
2010 data = template.format(
2011 vmname_andid,
2012 primary_netname,
2013 primary_net_href,
2014 vapp_tempalte_href,
2015 vm_href,
2016 vm_id,
2017 vm_name,
2018 primary_netname,
2019 cpu=vm_cpus,
2020 core=1,
2021 memory=vm_memory,
2022 )
2023
2024 response = self.perform_request(
2025 req_type="POST",
2026 url=instantiate_vapp_href,
2027 headers=headers,
2028 data=data,
2029 )
2030
2031 if response.status_code != 201:
2032 self.logger.error(
2033 "REST call {} failed reason : {}"
2034 "status code : {}".format(
2035 instantiate_vapp_href, response.text, response.status_code
2036 )
2037 )
2038 raise vimconn.VimConnException(
2039 "new_vminstance(): Failed to create"
2040 "vAapp {}".format(vmname_andid)
2041 )
2042 else:
2043 vapptask = self.get_task_from_response(response.text)
2044
2045 if vapptask is None and retry == 1:
2046 self.get_token() # Retry getting token
2047 continue
2048 else:
2049 break
2050
2051 if vapptask is None or vapptask is False:
2052 raise vimconn.VimConnUnexpectedResponse(
2053 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2054 )
2055
2056 # wait for task to complete
2057 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2058
2059 if result.get("status") == "success":
2060 self.logger.debug(
2061 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2062 )
2063 else:
2064 raise vimconn.VimConnUnexpectedResponse(
2065 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2066 )
2067 except Exception as exp:
2068 raise vimconn.VimConnUnexpectedResponse(
2069 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2070 vmname_andid, exp
2071 )
2072 )
2073
2074 # we should have now vapp in undeployed state.
2075 try:
2076 vdc_obj = VDC(self.client, href=vdc.get("href"))
2077 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2078 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2079 vapp = VApp(self.client, resource=vapp_resource)
2080 except Exception as exp:
2081 raise vimconn.VimConnUnexpectedResponse(
2082 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2083 vmname_andid, exp
2084 )
2085 )
2086
2087 if vapp_uuid is None:
2088 raise vimconn.VimConnUnexpectedResponse(
2089 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2090 vmname_andid
2091 )
2092 )
2093
2094 # Add PCI passthrough/SRIOV configrations
2095 pci_devices_info = []
2096 reserve_memory = False
2097
2098 for net in net_list:
2099 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2100 pci_devices_info.append(net)
2101 elif (
2102 net["type"] == "VF"
2103 or net["type"] == "SR-IOV"
2104 or net["type"] == "VFnotShared"
2105 ) and "net_id" in net:
2106 reserve_memory = True
2107
2108 # Add PCI
2109 if len(pci_devices_info) > 0:
2110 self.logger.info(
2111 "Need to add PCI devices {} into VM {}".format(
2112 pci_devices_info, vmname_andid
2113 )
2114 )
2115 PCI_devices_status, _, _ = self.add_pci_devices(
2116 vapp_uuid, pci_devices_info, vmname_andid
2117 )
2118
2119 if PCI_devices_status:
2120 self.logger.info(
2121 "Added PCI devives {} to VM {}".format(
2122 pci_devices_info, vmname_andid
2123 )
2124 )
2125 reserve_memory = True
2126 else:
2127 self.logger.info(
2128 "Fail to add PCI devives {} to VM {}".format(
2129 pci_devices_info, vmname_andid
2130 )
2131 )
2132
2133 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2134 self.add_serial_device(vapp_uuid)
2135
2136 if vm_disk:
2137 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2138 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2139 if result:
2140 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2141
2142 # Add new or existing disks to vApp
2143 if disk_list:
2144 added_existing_disk = False
2145 for disk in disk_list:
2146 if "device_type" in disk and disk["device_type"] == "cdrom":
2147 image_id = disk["image_id"]
2148 # Adding CD-ROM to VM
2149 # will revisit code once specification ready to support this feature
2150 self.insert_media_to_vm(vapp, image_id)
2151 elif "image_id" in disk and disk["image_id"] is not None:
2152 self.logger.debug(
2153 "Adding existing disk from image {} to vm {} ".format(
2154 disk["image_id"], vapp_uuid
2155 )
2156 )
2157 self.add_existing_disk(
2158 catalogs=catalogs,
2159 image_id=disk["image_id"],
2160 size=disk["size"],
2161 template_name=templateName,
2162 vapp_uuid=vapp_uuid,
2163 )
2164 added_existing_disk = True
2165 else:
2166 # Wait till added existing disk gets reflected into vCD database/API
2167 if added_existing_disk:
2168 time.sleep(5)
2169 added_existing_disk = False
2170 self.add_new_disk(vapp_uuid, disk["size"])
2171
2172 if numas:
2173 # Assigning numa affinity setting
2174 for numa in numas:
2175 if "paired-threads-id" in numa:
2176 paired_threads_id = numa["paired-threads-id"]
2177 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2178
2179 # add NICs & connect to networks in netlist
2180 try:
2181 vdc_obj = VDC(self.client, href=vdc.get("href"))
2182 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2183 vapp = VApp(self.client, resource=vapp_resource)
2184 vapp_id = vapp_resource.get("id").split(":")[-1]
2185
2186 self.logger.info("Removing primary NIC: ")
2187 # First remove all NICs so that NIC properties can be adjusted as needed
2188 self.remove_primary_network_adapter_from_all_vms(vapp)
2189
2190 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2191 primary_nic_index = 0
2192 nicIndex = 0
2193 for net in net_list:
2194 # openmano uses network id in UUID format.
2195 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2196 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2197 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2198
2199 if "net_id" not in net:
2200 continue
2201
2202 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2203 # Same will be returned in refresh_vms_status() as vim_interface_id
2204 net["vim_id"] = net[
2205 "net_id"
2206 ] # Provide the same VIM identifier as the VIM network
2207
2208 interface_net_id = net["net_id"]
2209 interface_net_name = self.get_network_name_by_id(
2210 network_uuid=interface_net_id
2211 )
2212 interface_network_mode = net["use"]
2213
2214 if interface_network_mode == "mgmt":
2215 primary_nic_index = nicIndex
2216
2217 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2218 - DHCP (The IP address is obtained from a DHCP service.)
2219 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2220 - NONE (No IP addressing mode specified.)"""
2221
2222 if primary_netname is not None:
2223 self.logger.debug(
2224 "new_vminstance(): Filtering by net name {}".format(
2225 interface_net_name
2226 )
2227 )
2228 nets = [
2229 n
2230 for n in self.get_network_list()
2231 if n.get("name") == interface_net_name
2232 ]
2233
2234 if len(nets) == 1:
2235 self.logger.info(
2236 "new_vminstance(): Found requested network: {}".format(
2237 nets[0].get("name")
2238 )
2239 )
2240
2241 if interface_net_name != primary_netname:
2242 # connect network to VM - with all DHCP by default
2243 self.logger.info(
2244 "new_vminstance(): Attaching net {} to vapp".format(
2245 interface_net_name
2246 )
2247 )
2248 self.connect_vapp_to_org_vdc_network(
2249 vapp_id, nets[0].get("name")
2250 )
2251
2252 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2253 nic_type = "VMXNET3"
2254 if "type" in net and net["type"] not in type_list:
2255 # fetching nic type from vnf
2256 if "model" in net:
2257 if net["model"] is not None:
2258 if (
2259 net["model"].lower() == "paravirt"
2260 or net["model"].lower() == "virtio"
2261 ):
2262 nic_type = "VMXNET3"
2263 else:
2264 nic_type = net["model"]
2265
2266 self.logger.info(
2267 "new_vminstance(): adding network adapter "
2268 "to a network {}".format(nets[0].get("name"))
2269 )
2270 self.add_network_adapter_to_vms(
2271 vapp,
2272 nets[0].get("name"),
2273 primary_nic_index,
2274 nicIndex,
2275 net,
2276 nic_type=nic_type,
2277 )
2278 else:
2279 self.logger.info(
2280 "new_vminstance(): adding network adapter "
2281 "to a network {}".format(nets[0].get("name"))
2282 )
2283
2284 if net["type"] in ["SR-IOV", "VF"]:
2285 nic_type = net["type"]
2286 self.add_network_adapter_to_vms(
2287 vapp,
2288 nets[0].get("name"),
2289 primary_nic_index,
2290 nicIndex,
2291 net,
2292 nic_type=nic_type,
2293 )
2294 nicIndex += 1
2295
2296 # cloud-init for ssh-key injection
2297 if cloud_config:
2298 # Create a catalog which will be carrying the config drive ISO
2299 # This catalog is deleted during vApp deletion. The catalog name carries
2300 # vApp UUID and thats how it gets identified during its deletion.
2301 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2302 self.logger.info(
2303 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2304 config_drive_catalog_name
2305 )
2306 )
2307 config_drive_catalog_id = self.create_vimcatalog(
2308 org, config_drive_catalog_name
2309 )
2310
2311 if config_drive_catalog_id is None:
2312 error_msg = (
2313 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2314 "ISO".format(config_drive_catalog_name)
2315 )
2316 raise Exception(error_msg)
2317
2318 # Create config-drive ISO
2319 _, userdata = self._create_user_data(cloud_config)
2320 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2321 iso_path = self.create_config_drive_iso(userdata)
2322 self.logger.debug(
2323 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2324 iso_path
2325 )
2326 )
2327
2328 self.logger.info(
2329 "new_vminstance(): uploading iso to catalog {}".format(
2330 config_drive_catalog_name
2331 )
2332 )
2333 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2334 # Attach the config-drive ISO to the VM
2335 self.logger.info(
2336 "new_vminstance(): Attaching the config-drive ISO to the VM"
2337 )
2338 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2339 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2340
2341 # If VM has PCI devices or SRIOV reserve memory for VM
2342 if reserve_memory:
2343 self.reserve_memory_for_all_vms(vapp, memory_mb)
2344
2345 self.logger.debug(
2346 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2347 )
2348
2349 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2350 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2351 if result.get("status") == "success":
2352 self.logger.info(
2353 "new_vminstance(): Successfully power on "
2354 "vApp {}".format(vmname_andid)
2355 )
2356 else:
2357 self.logger.error(
2358 "new_vminstance(): failed to power on vApp "
2359 "{}".format(vmname_andid)
2360 )
2361
2362 except Exception as exp:
2363 try:
2364 self.delete_vminstance(vapp_uuid)
2365 except Exception as exp2:
2366 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2367 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2368 self.logger.error(
2369 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2370 name, exp
2371 )
2372 )
2373 raise vimconn.VimConnException(
2374 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2375 name, exp
2376 )
2377 )
2378 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2379 wait_time = 0
2380 vapp_uuid = None
2381 while wait_time <= MAX_WAIT_TIME:
2382 try:
2383 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2384 vapp = VApp(self.client, resource=vapp_resource)
2385 except Exception as exp:
2386 raise vimconn.VimConnUnexpectedResponse(
2387 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2388 vmname_andid, exp
2389 )
2390 )
2391
2392 # if vapp and vapp.me.deployed:
2393 if vapp and vapp_resource.get("deployed") == "true":
2394 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2395 break
2396 else:
2397 self.logger.debug(
2398 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2399 )
2400 time.sleep(INTERVAL_TIME)
2401
2402 wait_time += INTERVAL_TIME
2403
2404 # SET Affinity Rule for VM
2405 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2406 # While creating VIM account user has to pass the Host Group names in availability_zone list
2407 # "availability_zone" is a part of VIM "config" parameters
2408 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2409 # Host groups are referred as availability zones
2410 # With following procedure, deployed VM will be added into a VM group.
2411 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2412 if availability_zone_list:
2413 self.logger.debug(
2414 "Existing Host Groups in VIM {}".format(
2415 self.config.get("availability_zone")
2416 )
2417 )
2418 # Admin access required for creating Affinity rules
2419 client = self.connect_as_admin()
2420
2421 if not client:
2422 raise vimconn.VimConnConnectionException(
2423 "Failed to connect vCD as admin"
2424 )
2425 else:
2426 self.client = client
2427
2428 if self.client:
2429 headers = {
2430 "Accept": "application/*+xml;version=27.0",
2431 "x-vcloud-authorization": self.client._session.headers[
2432 "x-vcloud-authorization"
2433 ],
2434 }
2435
2436 # Step1: Get provider vdc details from organization
2437 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2438 if pvdc_href is not None:
2439 # Step2: Found required pvdc, now get resource pool information
2440 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2441 if respool_href is None:
2442 # Raise error if respool_href not found
2443 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2444 pvdc_href
2445 )
2446 self.log_message(msg)
2447
2448 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2449 # get availability Zone
2450 vm_az = self.get_vm_availability_zone(
2451 availability_zone_index, availability_zone_list
2452 )
2453
2454 # check if provided av zone(hostGroup) is present in vCD VIM
2455 status = self.check_availibility_zone(vm_az, respool_href, headers)
2456 if status is False:
2457 msg = (
2458 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2459 "resource pool {} status: {}"
2460 ).format(vm_az, respool_href, status)
2461 self.log_message(msg)
2462 else:
2463 self.logger.debug(
2464 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2465 )
2466
2467 # Step4: Find VM group references to create vm group
2468 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2469 if vmgrp_href is None:
2470 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2471 self.log_message(msg)
2472
2473 # Step5: Create a VmGroup with name az_VmGroup
2474 vmgrp_name = (
2475 vm_az + "_" + name
2476 ) # Formed VM Group name = Host Group name + VM name
2477 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2478 if status is not True:
2479 msg = "new_vminstance(): Error in creating VM group {}".format(
2480 vmgrp_name
2481 )
2482 self.log_message(msg)
2483
2484 # VM Group url to add vms to vm group
2485 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2486
2487 # Step6: Add VM to VM Group
2488 # Find VM uuid from vapp_uuid
2489 vm_details = self.get_vapp_details_rest(vapp_uuid)
2490 vm_uuid = vm_details["vmuuid"]
2491
2492 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2493 if status is not True:
2494 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2495 vmgrp_name
2496 )
2497 self.log_message(msg)
2498
2499 # Step7: Create VM to Host affinity rule
2500 addrule_href = self.get_add_rule_reference(respool_href, headers)
2501 if addrule_href is None:
2502 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2503 respool_href
2504 )
2505 self.log_message(msg)
2506
2507 status = self.create_vm_to_host_affinity_rule(
2508 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2509 )
2510 if status is False:
2511 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2512 name, vm_az
2513 )
2514 self.log_message(msg)
2515 else:
2516 self.logger.debug(
2517 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2518 name, vm_az
2519 )
2520 )
2521 # Reset token to a normal user to perform other operations
2522 self.get_token()
2523
2524 if vapp_uuid is not None:
2525 return vapp_uuid, None
2526 else:
2527 raise vimconn.VimConnUnexpectedResponse(
2528 "new_vminstance(): Failed create new vm instance {}".format(name)
2529 )
2530
2531 def create_config_drive_iso(self, user_data):
2532 tmpdir = tempfile.mkdtemp()
2533 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2534 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2535 os.makedirs(latest_dir)
2536 with open(
2537 os.path.join(latest_dir, "meta_data.json"), "w"
2538 ) as meta_file_obj, open(
2539 os.path.join(latest_dir, "user_data"), "w"
2540 ) as userdata_file_obj:
2541 userdata_file_obj.write(user_data)
2542 meta_file_obj.write(
2543 json.dumps(
2544 {
2545 "availability_zone": "nova",
2546 "launch_index": 0,
2547 "name": "ConfigDrive",
2548 "uuid": str(uuid.uuid4()),
2549 }
2550 )
2551 )
2552 genisoimage_cmd = (
2553 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2554 iso_path=iso_path, source_dir_path=tmpdir
2555 )
2556 )
2557 self.logger.info(
2558 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2559 genisoimage_cmd
2560 )
2561 )
2562
2563 try:
2564 FNULL = open(os.devnull, "w")
2565 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2566 except subprocess.CalledProcessError as e:
2567 shutil.rmtree(tmpdir, ignore_errors=True)
2568 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2569 e
2570 )
2571 self.logger.error(error_msg)
2572 raise Exception(error_msg)
2573
2574 return iso_path
2575
2576 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2577 if not os.path.isfile(iso_file_path):
2578 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2579 iso_file_path
2580 )
2581 self.logger.error(error_msg)
2582 raise Exception(error_msg)
2583
2584 iso_file_stat = os.stat(iso_file_path)
2585 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2586 <Media
2587 xmlns="http://www.vmware.com/vcloud/v1.5"
2588 name="{iso_name}"
2589 size="{iso_size}"
2590 imageType="iso">
2591 <Description>ISO image for config-drive</Description>
2592 </Media>""".format(
2593 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2594 )
2595 headers = {
2596 "Accept": "application/*+xml;version=" + API_VERSION,
2597 "x-vcloud-authorization": self.client._session.headers[
2598 "x-vcloud-authorization"
2599 ],
2600 }
2601 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2602 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2603 response = self.perform_request(
2604 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2605 )
2606
2607 if response.status_code != 201:
2608 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2609 catalog_href
2610 )
2611 self.logger.error(error_msg)
2612 raise Exception(error_msg)
2613
2614 catalogItem = XmlElementTree.fromstring(response.text)
2615 entity = [
2616 child
2617 for child in catalogItem
2618 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2619 ][0]
2620 entity_href = entity.get("href")
2621
2622 response = self.perform_request(
2623 req_type="GET", url=entity_href, headers=headers
2624 )
2625 if response.status_code != 200:
2626 raise Exception(
2627 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2628 entity_href
2629 )
2630 )
2631
2632 match = re.search(
2633 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2634 response.text,
2635 re.DOTALL,
2636 )
2637 if match:
2638 media_upload_href = match.group(1)
2639 else:
2640 raise Exception(
2641 "Could not parse the upload URL for the media file from the last response"
2642 )
2643 upload_iso_task = self.get_task_from_response(response.text)
2644 headers["Content-Type"] = "application/octet-stream"
2645 response = self.perform_request(
2646 req_type="PUT",
2647 url=media_upload_href,
2648 headers=headers,
2649 data=open(iso_file_path, "rb"),
2650 )
2651
2652 if response.status_code != 200:
2653 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2654
2655 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2656 if result.get("status") != "success":
2657 raise Exception(
2658 "The upload iso task failed with status {}".format(result.get("status"))
2659 )
2660
2661 def set_availability_zones(self):
2662 """
2663 Set vim availability zone
2664 """
2665 vim_availability_zones = None
2666 availability_zone = None
2667
2668 if "availability_zone" in self.config:
2669 vim_availability_zones = self.config.get("availability_zone")
2670
2671 if isinstance(vim_availability_zones, str):
2672 availability_zone = [vim_availability_zones]
2673 elif isinstance(vim_availability_zones, list):
2674 availability_zone = vim_availability_zones
2675 else:
2676 return availability_zone
2677
2678 return availability_zone
2679
2680 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2681 """
2682 Return the availability zone to be used by the created VM.
2683 returns: The VIM availability zone to be used or None
2684 """
2685 if availability_zone_index is None:
2686 if not self.config.get("availability_zone"):
2687 return None
2688 elif isinstance(self.config.get("availability_zone"), str):
2689 return self.config["availability_zone"]
2690 else:
2691 return self.config["availability_zone"][0]
2692
2693 vim_availability_zones = self.availability_zone
2694
2695 # check if VIM offer enough availability zones describe in the VNFD
2696 if vim_availability_zones and len(availability_zone_list) <= len(
2697 vim_availability_zones
2698 ):
2699 # check if all the names of NFV AV match VIM AV names
2700 match_by_index = False
2701 for av in availability_zone_list:
2702 if av not in vim_availability_zones:
2703 match_by_index = True
2704 break
2705
2706 if match_by_index:
2707 self.logger.debug(
2708 "Required Availability zone or Host Group not found in VIM config"
2709 )
2710 self.logger.debug(
2711 "Input Availability zone list: {}".format(availability_zone_list)
2712 )
2713 self.logger.debug(
2714 "VIM configured Availability zones: {}".format(
2715 vim_availability_zones
2716 )
2717 )
2718 self.logger.debug("VIM Availability zones will be used by index")
2719 return vim_availability_zones[availability_zone_index]
2720 else:
2721 return availability_zone_list[availability_zone_index]
2722 else:
2723 raise vimconn.VimConnConflictException(
2724 "No enough availability zones at VIM for this deployment"
2725 )
2726
2727 def create_vm_to_host_affinity_rule(
2728 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2729 ):
2730 """Method to create VM to Host Affinity rule in vCD
2731
2732 Args:
2733 addrule_href - href to make a POST request
2734 vmgrpname - name of the VM group created
2735 hostgrpnmae - name of the host group created earlier
2736 polarity - Affinity or Anti-affinity (default: Affinity)
2737 headers - headers to make REST call
2738
2739 Returns:
2740 True- if rule is created
2741 False- Failed to create rule due to some error
2742
2743 """
2744 task_status = False
2745 rule_name = polarity + "_" + vmgrpname
2746 payload = """<?xml version="1.0" encoding="UTF-8"?>
2747 <vmext:VMWVmHostAffinityRule
2748 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2749 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2750 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2751 <vcloud:Name>{}</vcloud:Name>
2752 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2753 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2754 <vcloud:Polarity>{}</vcloud:Polarity>
2755 <vmext:HostGroupName>{}</vmext:HostGroupName>
2756 <vmext:VmGroupName>{}</vmext:VmGroupName>
2757 </vmext:VMWVmHostAffinityRule>""".format(
2758 rule_name, polarity, hostgrpname, vmgrpname
2759 )
2760
2761 resp = self.perform_request(
2762 req_type="POST", url=addrule_href, headers=headers, data=payload
2763 )
2764
2765 if resp.status_code != requests.codes.accepted:
2766 self.logger.debug(
2767 "REST API call {} failed. Return status code {}".format(
2768 addrule_href, resp.status_code
2769 )
2770 )
2771 task_status = False
2772
2773 return task_status
2774 else:
2775 affinity_task = self.get_task_from_response(resp.content)
2776 self.logger.debug("affinity_task: {}".format(affinity_task))
2777
2778 if affinity_task is None or affinity_task is False:
2779 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
2780 # wait for task to complete
2781 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2782
2783 if result.get("status") == "success":
2784 self.logger.debug(
2785 "Successfully created affinity rule {}".format(rule_name)
2786 )
2787 return True
2788 else:
2789 raise vimconn.VimConnUnexpectedResponse(
2790 "failed to create affinity rule {}".format(rule_name)
2791 )
2792
2793 def get_add_rule_reference(self, respool_href, headers):
2794 """This method finds href to add vm to host affinity rule to vCD
2795
2796 Args:
2797 respool_href- href to resource pool
2798 headers- header information to make REST call
2799
2800 Returns:
2801 None - if no valid href to add rule found or
2802 addrule_href - href to add vm to host affinity rule of resource pool
2803 """
2804 addrule_href = None
2805 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2806
2807 if resp.status_code != requests.codes.ok:
2808 self.logger.debug(
2809 "REST API call {} failed. Return status code {}".format(
2810 respool_href, resp.status_code
2811 )
2812 )
2813 else:
2814 resp_xml = XmlElementTree.fromstring(resp.content)
2815 for child in resp_xml:
2816 if "VMWProviderVdcResourcePool" in child.tag:
2817 for schild in child:
2818 if "Link" in schild.tag:
2819 if (
2820 schild.attrib.get("type")
2821 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
2822 and schild.attrib.get("rel") == "add"
2823 ):
2824 addrule_href = schild.attrib.get("href")
2825 break
2826
2827 return addrule_href
2828
2829 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2830 """Method to add deployed VM to newly created VM Group.
2831 This is required to create VM to Host affinity in vCD
2832
2833 Args:
2834 vm_uuid- newly created vm uuid
2835 vmGroupNameURL- URL to VM Group name
2836 vmGroup_name- Name of VM group created
2837 headers- Headers for REST request
2838
2839 Returns:
2840 True- if VM added to VM group successfully
2841 False- if any error encounter
2842 """
2843 addvm_resp = self.perform_request(
2844 req_type="GET", url=vmGroupNameURL, headers=headers
2845 ) # , data=payload)
2846
2847 if addvm_resp.status_code != requests.codes.ok:
2848 self.logger.debug(
2849 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
2850 vmGroupNameURL, addvm_resp.status_code
2851 )
2852 )
2853 return False
2854 else:
2855 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2856 for child in resp_xml:
2857 if child.tag.split("}")[1] == "Link":
2858 if child.attrib.get("rel") == "addVms":
2859 addvmtogrpURL = child.attrib.get("href")
2860
2861 # Get vm details
2862 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
2863 vmdetailsURL = "".join(url_list)
2864
2865 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
2866
2867 if resp.status_code != requests.codes.ok:
2868 self.logger.debug(
2869 "REST API call {} failed. Return status code {}".format(
2870 vmdetailsURL, resp.status_code
2871 )
2872 )
2873 return False
2874
2875 # Parse VM details
2876 resp_xml = XmlElementTree.fromstring(resp.content)
2877 if resp_xml.tag.split("}")[1] == "Vm":
2878 vm_id = resp_xml.attrib.get("id")
2879 vm_name = resp_xml.attrib.get("name")
2880 vm_href = resp_xml.attrib.get("href")
2881 # print vm_id, vm_name, vm_href
2882
2883 # Add VM into VMgroup
2884 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2885 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2886 xmlns="http://www.vmware.com/vcloud/versions" \
2887 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2888 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2889 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2890 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2891 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2892 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2893 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2894 <ns2:VmReference href="{}" id="{}" name="{}" \
2895 type="application/vnd.vmware.vcloud.vm+xml" />\
2896 </ns2:Vms>""".format(
2897 vm_href, vm_id, vm_name
2898 )
2899
2900 addvmtogrp_resp = self.perform_request(
2901 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
2902 )
2903
2904 if addvmtogrp_resp.status_code != requests.codes.accepted:
2905 self.logger.debug(
2906 "REST API call {} failed. Return status code {}".format(
2907 addvmtogrpURL, addvmtogrp_resp.status_code
2908 )
2909 )
2910
2911 return False
2912 else:
2913 self.logger.debug(
2914 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
2915 )
2916
2917 return True
2918
2919 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2920 """Method to create a VM group in vCD
2921
2922 Args:
2923 vmgroup_name : Name of VM group to be created
2924 vmgroup_href : href for vmgroup
2925 headers- Headers for REST request
2926 """
2927 # POST to add URL with required data
2928 vmgroup_status = False
2929 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2930 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2931 <vmCount>1</vmCount>\
2932 </VMWVmGroup>""".format(
2933 vmgroup_name
2934 )
2935 resp = self.perform_request(
2936 req_type="POST", url=vmgroup_href, headers=headers, data=payload
2937 )
2938
2939 if resp.status_code != requests.codes.accepted:
2940 self.logger.debug(
2941 "REST API call {} failed. Return status code {}".format(
2942 vmgroup_href, resp.status_code
2943 )
2944 )
2945
2946 return vmgroup_status
2947 else:
2948 vmgroup_task = self.get_task_from_response(resp.content)
2949 if vmgroup_task is None or vmgroup_task is False:
2950 raise vimconn.VimConnUnexpectedResponse(
2951 "create_vmgroup(): failed to create VM group {}".format(
2952 vmgroup_name
2953 )
2954 )
2955
2956 # wait for task to complete
2957 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2958
2959 if result.get("status") == "success":
2960 self.logger.debug(
2961 "create_vmgroup(): Successfully created VM group {}".format(
2962 vmgroup_name
2963 )
2964 )
2965 # time.sleep(10)
2966 vmgroup_status = True
2967
2968 return vmgroup_status
2969 else:
2970 raise vimconn.VimConnUnexpectedResponse(
2971 "create_vmgroup(): failed to create VM group {}".format(
2972 vmgroup_name
2973 )
2974 )
2975
2976 def find_vmgroup_reference(self, url, headers):
2977 """Method to create a new VMGroup which is required to add created VM
2978 Args:
2979 url- resource pool href
2980 headers- header information
2981
2982 Returns:
2983 returns href to VM group to create VM group
2984 """
2985 # Perform GET on resource pool to find 'add' link to create VMGroup
2986 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2987 vmgrp_href = None
2988 resp = self.perform_request(req_type="GET", url=url, headers=headers)
2989
2990 if resp.status_code != requests.codes.ok:
2991 self.logger.debug(
2992 "REST API call {} failed. Return status code {}".format(
2993 url, resp.status_code
2994 )
2995 )
2996 else:
2997 # Get the href to add vmGroup to vCD
2998 resp_xml = XmlElementTree.fromstring(resp.content)
2999 for child in resp_xml:
3000 if "VMWProviderVdcResourcePool" in child.tag:
3001 for schild in child:
3002 if "Link" in schild.tag:
3003 # Find href with type VMGroup and rel with add
3004 if (
3005 schild.attrib.get("type")
3006 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3007 and schild.attrib.get("rel") == "add"
3008 ):
3009 vmgrp_href = schild.attrib.get("href")
3010
3011 return vmgrp_href
3012
3013 def check_availibility_zone(self, az, respool_href, headers):
3014 """Method to verify requested av zone is present or not in provided
3015 resource pool
3016
3017 Args:
3018 az - name of hostgroup (availibility_zone)
3019 respool_href - Resource Pool href
3020 headers - Headers to make REST call
3021 Returns:
3022 az_found - True if availibility_zone is found else False
3023 """
3024 az_found = False
3025 headers["Accept"] = "application/*+xml;version=27.0"
3026 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3027
3028 if resp.status_code != requests.codes.ok:
3029 self.logger.debug(
3030 "REST API call {} failed. Return status code {}".format(
3031 respool_href, resp.status_code
3032 )
3033 )
3034 else:
3035 # Get the href to hostGroups and find provided hostGroup is present in it
3036 resp_xml = XmlElementTree.fromstring(resp.content)
3037
3038 for child in resp_xml:
3039 if "VMWProviderVdcResourcePool" in child.tag:
3040 for schild in child:
3041 if "Link" in schild.tag:
3042 if (
3043 schild.attrib.get("type")
3044 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3045 ):
3046 hostGroup_href = schild.attrib.get("href")
3047 hg_resp = self.perform_request(
3048 req_type="GET", url=hostGroup_href, headers=headers
3049 )
3050
3051 if hg_resp.status_code != requests.codes.ok:
3052 self.logger.debug(
3053 "REST API call {} failed. Return status code {}".format(
3054 hostGroup_href, hg_resp.status_code
3055 )
3056 )
3057 else:
3058 hg_resp_xml = XmlElementTree.fromstring(
3059 hg_resp.content
3060 )
3061 for hostGroup in hg_resp_xml:
3062 if "HostGroup" in hostGroup.tag:
3063 if hostGroup.attrib.get("name") == az:
3064 az_found = True
3065 break
3066
3067 return az_found
3068
3069 def get_pvdc_for_org(self, org_vdc, headers):
3070 """This method gets provider vdc references from organisation
3071
3072 Args:
3073 org_vdc - name of the organisation VDC to find pvdc
3074 headers - headers to make REST call
3075
3076 Returns:
3077 None - if no pvdc href found else
3078 pvdc_href - href to pvdc
3079 """
3080 # Get provider VDC references from vCD
3081 pvdc_href = None
3082 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3083 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3084 url = "".join(url_list)
3085
3086 response = self.perform_request(req_type="GET", url=url, headers=headers)
3087 if response.status_code != requests.codes.ok:
3088 self.logger.debug(
3089 "REST API call {} failed. Return status code {}".format(
3090 url, response.status_code
3091 )
3092 )
3093 else:
3094 xmlroot_response = XmlElementTree.fromstring(response.text)
3095 for child in xmlroot_response:
3096 if "ProviderVdcReference" in child.tag:
3097 pvdc_href = child.attrib.get("href")
3098 # Get vdcReferences to find org
3099 pvdc_resp = self.perform_request(
3100 req_type="GET", url=pvdc_href, headers=headers
3101 )
3102
3103 if pvdc_resp.status_code != requests.codes.ok:
3104 raise vimconn.VimConnException(
3105 "REST API call {} failed. "
3106 "Return status code {}".format(url, pvdc_resp.status_code)
3107 )
3108
3109 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3110 for child in pvdc_resp_xml:
3111 if "Link" in child.tag:
3112 if (
3113 child.attrib.get("type")
3114 == "application/vnd.vmware.admin.vdcReferences+xml"
3115 ):
3116 vdc_href = child.attrib.get("href")
3117
3118 # Check if provided org is present in vdc
3119 vdc_resp = self.perform_request(
3120 req_type="GET", url=vdc_href, headers=headers
3121 )
3122
3123 if vdc_resp.status_code != requests.codes.ok:
3124 raise vimconn.VimConnException(
3125 "REST API call {} failed. "
3126 "Return status code {}".format(
3127 url, vdc_resp.status_code
3128 )
3129 )
3130 vdc_resp_xml = XmlElementTree.fromstring(
3131 vdc_resp.content
3132 )
3133
3134 for child in vdc_resp_xml:
3135 if "VdcReference" in child.tag:
3136 if child.attrib.get("name") == org_vdc:
3137 return pvdc_href
3138
3139 def get_resource_pool_details(self, pvdc_href, headers):
3140 """Method to get resource pool information.
3141 Host groups are property of resource group.
3142 To get host groups, we need to GET details of resource pool.
3143
3144 Args:
3145 pvdc_href: href to pvdc details
3146 headers: headers
3147
3148 Returns:
3149 respool_href - Returns href link reference to resource pool
3150 """
3151 respool_href = None
3152 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3153
3154 if resp.status_code != requests.codes.ok:
3155 self.logger.debug(
3156 "REST API call {} failed. Return status code {}".format(
3157 pvdc_href, resp.status_code
3158 )
3159 )
3160 else:
3161 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3162 for child in respool_resp_xml:
3163 if "Link" in child.tag:
3164 if (
3165 child.attrib.get("type")
3166 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3167 ):
3168 respool_href = child.attrib.get("href")
3169 break
3170
3171 return respool_href
3172
3173 def log_message(self, msg):
3174 """
3175 Method to log error messages related to Affinity rule creation
3176 in new_vminstance & raise Exception
3177 Args :
3178 msg - Error message to be logged
3179
3180 """
3181 # get token to connect vCD as a normal user
3182 self.get_token()
3183 self.logger.debug(msg)
3184
3185 raise vimconn.VimConnException(msg)
3186
3187 def get_vminstance(self, vim_vm_uuid=None):
3188 """Returns the VM instance information from VIM"""
3189 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3190
3191 _, vdc = self.get_vdc_details()
3192 if vdc is None:
3193 raise vimconn.VimConnConnectionException(
3194 "Failed to get a reference of VDC for a tenant {}".format(
3195 self.tenant_name
3196 )
3197 )
3198
3199 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3200 if not vm_info_dict:
3201 self.logger.debug(
3202 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3203 vim_vm_uuid
3204 )
3205 )
3206 raise vimconn.VimConnNotFoundException(
3207 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3208 )
3209
3210 status_key = vm_info_dict["status"]
3211 error = ""
3212 try:
3213 vm_dict = {
3214 "created": vm_info_dict["created"],
3215 "description": vm_info_dict["name"],
3216 "status": vcdStatusCode2manoFormat[int(status_key)],
3217 "hostId": vm_info_dict["vmuuid"],
3218 "error_msg": error,
3219 "vim_info": yaml.safe_dump(vm_info_dict),
3220 "interfaces": [],
3221 }
3222
3223 if "interfaces" in vm_info_dict:
3224 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3225 else:
3226 vm_dict["interfaces"] = []
3227 except KeyError:
3228 vm_dict = {
3229 "created": "",
3230 "description": "",
3231 "status": vcdStatusCode2manoFormat[int(-1)],
3232 "hostId": vm_info_dict["vmuuid"],
3233 "error_msg": "Inconsistency state",
3234 "vim_info": yaml.safe_dump(vm_info_dict),
3235 "interfaces": [],
3236 }
3237
3238 return vm_dict
3239
3240 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
3241 """Method poweroff and remove VM instance from vcloud director network.
3242
3243 Args:
3244 vm_id: VM UUID
3245
3246 Returns:
3247 Returns the instance identifier
3248 """
3249 self.logger.debug("Client requesting delete vm instance {} ".format(vm_id))
3250
3251 _, vdc = self.get_vdc_details()
3252 vdc_obj = VDC(self.client, href=vdc.get("href"))
3253 if vdc_obj is None:
3254 self.logger.debug(
3255 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3256 self.tenant_name
3257 )
3258 )
3259 raise vimconn.VimConnException(
3260 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3261 self.tenant_name
3262 )
3263 )
3264
3265 try:
3266 vapp_name = self.get_namebyvappid(vm_id)
3267 if vapp_name is None:
3268 self.logger.debug(
3269 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3270 vm_id
3271 )
3272 )
3273
3274 return (
3275 -1,
3276 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3277 vm_id
3278 ),
3279 )
3280
3281 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm_id))
3282 vapp_resource = vdc_obj.get_vapp(vapp_name)
3283 vapp = VApp(self.client, resource=vapp_resource)
3284
3285 # Delete vApp and wait for status change if task executed and vApp is None.
3286 if vapp:
3287 if vapp_resource.get("deployed") == "true":
3288 self.logger.info("Powering off vApp {}".format(vapp_name))
3289 # Power off vApp
3290 powered_off = False
3291 wait_time = 0
3292
3293 while wait_time <= MAX_WAIT_TIME:
3294 power_off_task = vapp.power_off()
3295 result = self.client.get_task_monitor().wait_for_success(
3296 task=power_off_task
3297 )
3298
3299 if result.get("status") == "success":
3300 powered_off = True
3301 break
3302 else:
3303 self.logger.info(
3304 "Wait for vApp {} to power off".format(vapp_name)
3305 )
3306 time.sleep(INTERVAL_TIME)
3307
3308 wait_time += INTERVAL_TIME
3309
3310 if not powered_off:
3311 self.logger.debug(
3312 "delete_vminstance(): Failed to power off VM instance {} ".format(
3313 vm_id
3314 )
3315 )
3316 else:
3317 self.logger.info(
3318 "delete_vminstance(): Powered off VM instance {} ".format(
3319 vm_id
3320 )
3321 )
3322
3323 # Undeploy vApp
3324 self.logger.info("Undeploy vApp {}".format(vapp_name))
3325 wait_time = 0
3326 undeployed = False
3327 while wait_time <= MAX_WAIT_TIME:
3328 vapp = VApp(self.client, resource=vapp_resource)
3329 if not vapp:
3330 self.logger.debug(
3331 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3332 vm_id
3333 )
3334 )
3335
3336 return (
3337 -1,
3338 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3339 vm_id
3340 ),
3341 )
3342
3343 undeploy_task = vapp.undeploy()
3344 result = self.client.get_task_monitor().wait_for_success(
3345 task=undeploy_task
3346 )
3347
3348 if result.get("status") == "success":
3349 undeployed = True
3350 break
3351 else:
3352 self.logger.debug(
3353 "Wait for vApp {} to undeploy".format(vapp_name)
3354 )
3355 time.sleep(INTERVAL_TIME)
3356
3357 wait_time += INTERVAL_TIME
3358
3359 if not undeployed:
3360 self.logger.debug(
3361 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3362 vm_id
3363 )
3364 )
3365
3366 # delete vapp
3367 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3368 if vapp is not None:
3369 wait_time = 0
3370 result = False
3371
3372 while wait_time <= MAX_WAIT_TIME:
3373 vapp = VApp(self.client, resource=vapp_resource)
3374 if not vapp:
3375 self.logger.debug(
3376 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3377 vm_id
3378 )
3379 )
3380
3381 return (
3382 -1,
3383 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3384 vm_id
3385 ),
3386 )
3387
3388 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3389 result = self.client.get_task_monitor().wait_for_success(
3390 task=delete_task
3391 )
3392 if result.get("status") == "success":
3393 break
3394 else:
3395 self.logger.debug(
3396 "Wait for vApp {} to delete".format(vapp_name)
3397 )
3398 time.sleep(INTERVAL_TIME)
3399
3400 wait_time += INTERVAL_TIME
3401
3402 if result is None:
3403 self.logger.debug(
3404 "delete_vminstance(): Failed delete uuid {} ".format(vm_id)
3405 )
3406 else:
3407 self.logger.info(
3408 "Deleted vm instance {} successfully".format(vm_id)
3409 )
3410 config_drive_catalog_name, config_drive_catalog_id = (
3411 "cfg_drv-" + vm_id,
3412 None,
3413 )
3414 catalog_list = self.get_image_list()
3415
3416 try:
3417 config_drive_catalog_id = [
3418 catalog_["id"]
3419 for catalog_ in catalog_list
3420 if catalog_["name"] == config_drive_catalog_name
3421 ][0]
3422 except IndexError:
3423 pass
3424
3425 if config_drive_catalog_id:
3426 self.logger.debug(
3427 "delete_vminstance(): Found a config drive catalog {} matching "
3428 'vapp_name"{}". Deleting it.'.format(
3429 config_drive_catalog_id, vapp_name
3430 )
3431 )
3432 self.delete_image(config_drive_catalog_id)
3433
3434 return vm_id
3435 except Exception:
3436 self.logger.debug(traceback.format_exc())
3437
3438 raise vimconn.VimConnException(
3439 "delete_vminstance(): Failed delete vm instance {}".format(vm_id)
3440 )
3441
3442 def refresh_vms_status(self, vm_list):
3443 """Get the status of the virtual machines and their interfaces/ports
3444 Params: the list of VM identifiers
3445 Returns a dictionary with:
3446 vm_id: #VIM id of this Virtual Machine
3447 status: #Mandatory. Text with one of:
3448 # DELETED (not found at vim)
3449 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3450 # OTHER (Vim reported other status not understood)
3451 # ERROR (VIM indicates an ERROR status)
3452 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3453 # CREATING (on building process), ERROR
3454 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3455 #
3456 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3457 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3458 interfaces:
3459 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3460 mac_address: #Text format XX:XX:XX:XX:XX:XX
3461 vim_net_id: #network id where this interface is connected
3462 vim_interface_id: #interface/port VIM id
3463 ip_address: #null, or text with IPv4, IPv6 address
3464 """
3465 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3466
3467 _, vdc = self.get_vdc_details()
3468 if vdc is None:
3469 raise vimconn.VimConnException(
3470 "Failed to get a reference of VDC for a tenant {}".format(
3471 self.tenant_name
3472 )
3473 )
3474
3475 vms_dict = {}
3476 nsx_edge_list = []
3477 for vmuuid in vm_list:
3478 vapp_name = self.get_namebyvappid(vmuuid)
3479 if vapp_name is not None:
3480 try:
3481 vm_pci_details = self.get_vm_pci_details(vmuuid)
3482 vdc_obj = VDC(self.client, href=vdc.get("href"))
3483 vapp_resource = vdc_obj.get_vapp(vapp_name)
3484 the_vapp = VApp(self.client, resource=vapp_resource)
3485
3486 vm_details = {}
3487 for vm in the_vapp.get_all_vms():
3488 headers = {
3489 "Accept": "application/*+xml;version=" + API_VERSION,
3490 "x-vcloud-authorization": self.client._session.headers[
3491 "x-vcloud-authorization"
3492 ],
3493 }
3494 response = self.perform_request(
3495 req_type="GET", url=vm.get("href"), headers=headers
3496 )
3497
3498 if response.status_code != 200:
3499 self.logger.error(
3500 "refresh_vms_status : REST call {} failed reason : {}"
3501 "status code : {}".format(
3502 vm.get("href"), response.text, response.status_code
3503 )
3504 )
3505 raise vimconn.VimConnException(
3506 "refresh_vms_status : Failed to get VM details"
3507 )
3508
3509 xmlroot = XmlElementTree.fromstring(response.text)
3510 result = response.text.replace("\n", " ")
3511 hdd_match = re.search(
3512 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3513 result,
3514 )
3515
3516 if hdd_match:
3517 hdd_mb = hdd_match.group(1)
3518 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3519
3520 cpus_match = re.search(
3521 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3522 result,
3523 )
3524
3525 if cpus_match:
3526 cpus = cpus_match.group(1)
3527 vm_details["cpus"] = int(cpus) if cpus else None
3528
3529 memory_mb = re.search(
3530 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3531 result,
3532 ).group(1)
3533 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3534 vm_details["status"] = vcdStatusCode2manoFormat[
3535 int(xmlroot.get("status"))
3536 ]
3537 vm_details["id"] = xmlroot.get("id")
3538 vm_details["name"] = xmlroot.get("name")
3539 vm_info = [vm_details]
3540
3541 if vm_pci_details:
3542 vm_info[0].update(vm_pci_details)
3543
3544 vm_dict = {
3545 "status": vcdStatusCode2manoFormat[
3546 int(vapp_resource.get("status"))
3547 ],
3548 "error_msg": vcdStatusCode2manoFormat[
3549 int(vapp_resource.get("status"))
3550 ],
3551 "vim_info": yaml.safe_dump(vm_info),
3552 "interfaces": [],
3553 }
3554
3555 # get networks
3556 vm_ip = None
3557 vm_mac = None
3558 networks = re.findall(
3559 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3560 result,
3561 )
3562
3563 for network in networks:
3564 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3565 vm_mac = mac_s.group(1) if mac_s else None
3566 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3567 vm_ip = ip_s.group(1) if ip_s else None
3568
3569 if vm_ip is None:
3570 if not nsx_edge_list:
3571 nsx_edge_list = self.get_edge_details()
3572 if nsx_edge_list is None:
3573 raise vimconn.VimConnException(
3574 "refresh_vms_status:"
3575 "Failed to get edge details from NSX Manager"
3576 )
3577
3578 if vm_mac is not None:
3579 vm_ip = self.get_ipaddr_from_NSXedge(
3580 nsx_edge_list, vm_mac
3581 )
3582
3583 net_s = re.search('network="(.*?)"', network)
3584 network_name = net_s.group(1) if net_s else None
3585 vm_net_id = self.get_network_id_by_name(network_name)
3586 interface = {
3587 "mac_address": vm_mac,
3588 "vim_net_id": vm_net_id,
3589 "vim_interface_id": vm_net_id,
3590 "ip_address": vm_ip,
3591 }
3592 vm_dict["interfaces"].append(interface)
3593
3594 # add a vm to vm dict
3595 vms_dict.setdefault(vmuuid, vm_dict)
3596 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3597 except Exception as exp:
3598 self.logger.debug("Error in response {}".format(exp))
3599 self.logger.debug(traceback.format_exc())
3600
3601 return vms_dict
3602
3603 def get_edge_details(self):
3604 """Get the NSX edge list from NSX Manager
3605 Returns list of NSX edges
3606 """
3607 edge_list = []
3608 rheaders = {"Content-Type": "application/xml"}
3609 nsx_api_url = "/api/4.0/edges"
3610
3611 self.logger.debug(
3612 "Get edge details from NSX Manager {} {}".format(
3613 self.nsx_manager, nsx_api_url
3614 )
3615 )
3616
3617 try:
3618 resp = requests.get(
3619 self.nsx_manager + nsx_api_url,
3620 auth=(self.nsx_user, self.nsx_password),
3621 verify=False,
3622 headers=rheaders,
3623 )
3624 if resp.status_code == requests.codes.ok:
3625 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3626 for edge_pages in paged_Edge_List:
3627 if edge_pages.tag == "edgePage":
3628 for edge_summary in edge_pages:
3629 if edge_summary.tag == "pagingInfo":
3630 for element in edge_summary:
3631 if (
3632 element.tag == "totalCount"
3633 and element.text == "0"
3634 ):
3635 raise vimconn.VimConnException(
3636 "get_edge_details: No NSX edges details found: {}".format(
3637 self.nsx_manager
3638 )
3639 )
3640
3641 if edge_summary.tag == "edgeSummary":
3642 for element in edge_summary:
3643 if element.tag == "id":
3644 edge_list.append(element.text)
3645 else:
3646 raise vimconn.VimConnException(
3647 "get_edge_details: No NSX edge details found: {}".format(
3648 self.nsx_manager
3649 )
3650 )
3651
3652 if not edge_list:
3653 raise vimconn.VimConnException(
3654 "get_edge_details: "
3655 "No NSX edge details found: {}".format(self.nsx_manager)
3656 )
3657 else:
3658 self.logger.debug(
3659 "get_edge_details: Found NSX edges {}".format(edge_list)
3660 )
3661
3662 return edge_list
3663 else:
3664 self.logger.debug(
3665 "get_edge_details: "
3666 "Failed to get NSX edge details from NSX Manager: {}".format(
3667 resp.content
3668 )
3669 )
3670
3671 return None
3672
3673 except Exception as exp:
3674 self.logger.debug(
3675 "get_edge_details: "
3676 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3677 )
3678 raise vimconn.VimConnException(
3679 "get_edge_details: "
3680 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3681 )
3682
3683 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3684 """Get IP address details from NSX edges, using the MAC address
3685 PARAMS: nsx_edges : List of NSX edges
3686 mac_address : Find IP address corresponding to this MAC address
3687 Returns: IP address corrresponding to the provided MAC address
3688 """
3689 ip_addr = None
3690 rheaders = {"Content-Type": "application/xml"}
3691
3692 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3693
3694 try:
3695 for edge in nsx_edges:
3696 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3697
3698 resp = requests.get(
3699 self.nsx_manager + nsx_api_url,
3700 auth=(self.nsx_user, self.nsx_password),
3701 verify=False,
3702 headers=rheaders,
3703 )
3704
3705 if resp.status_code == requests.codes.ok:
3706 dhcp_leases = XmlElementTree.fromstring(resp.text)
3707 for child in dhcp_leases:
3708 if child.tag == "dhcpLeaseInfo":
3709 dhcpLeaseInfo = child
3710 for leaseInfo in dhcpLeaseInfo:
3711 for elem in leaseInfo:
3712 if (elem.tag) == "macAddress":
3713 edge_mac_addr = elem.text
3714
3715 if (elem.tag) == "ipAddress":
3716 ip_addr = elem.text
3717
3718 if edge_mac_addr is not None:
3719 if edge_mac_addr == mac_address:
3720 self.logger.debug(
3721 "Found ip addr {} for mac {} at NSX edge {}".format(
3722 ip_addr, mac_address, edge
3723 )
3724 )
3725
3726 return ip_addr
3727 else:
3728 self.logger.debug(
3729 "get_ipaddr_from_NSXedge: "
3730 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3731 resp.content
3732 )
3733 )
3734
3735 self.logger.debug(
3736 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3737 )
3738
3739 return None
3740
3741 except XmlElementTree.ParseError as Err:
3742 self.logger.debug(
3743 "ParseError in response from NSX Manager {}".format(Err.message),
3744 exc_info=True,
3745 )
3746
3747 def action_vminstance(self, vm_id=None, action_dict=None, created_items={}):
3748 """Send and action over a VM instance from VIM
3749 Returns the vm_id if the action was successfully sent to the VIM"""
3750
3751 self.logger.debug(
3752 "Received action for vm {} and action dict {}".format(vm_id, action_dict)
3753 )
3754
3755 if vm_id is None or action_dict is None:
3756 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
3757
3758 _, vdc = self.get_vdc_details()
3759 if vdc is None:
3760 raise vimconn.VimConnException(
3761 "Failed to get a reference of VDC for a tenant {}".format(
3762 self.tenant_name
3763 )
3764 )
3765
3766 vapp_name = self.get_namebyvappid(vm_id)
3767 if vapp_name is None:
3768 self.logger.debug(
3769 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
3770 vm_id
3771 )
3772 )
3773
3774 raise vimconn.VimConnException(
3775 "Failed to get vm by given {} vm uuid".format(vm_id)
3776 )
3777 else:
3778 self.logger.info(
3779 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm_id)
3780 )
3781
3782 try:
3783 vdc_obj = VDC(self.client, href=vdc.get("href"))
3784 vapp_resource = vdc_obj.get_vapp(vapp_name)
3785 vapp = VApp(self.client, resource=vapp_resource)
3786
3787 if "start" in action_dict:
3788 self.logger.info(
3789 "action_vminstance: Power on vApp: {}".format(vapp_name)
3790 )
3791 poweron_task = self.power_on_vapp(vm_id, vapp_name)
3792 result = self.client.get_task_monitor().wait_for_success(
3793 task=poweron_task
3794 )
3795 self.instance_actions_result("start", result, vapp_name)
3796 elif "rebuild" in action_dict:
3797 self.logger.info(
3798 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
3799 )
3800 rebuild_task = vapp.deploy(power_on=True)
3801 result = self.client.get_task_monitor().wait_for_success(
3802 task=rebuild_task
3803 )
3804 self.instance_actions_result("rebuild", result, vapp_name)
3805 elif "pause" in action_dict:
3806 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3807 pause_task = vapp.undeploy(action="suspend")
3808 result = self.client.get_task_monitor().wait_for_success(
3809 task=pause_task
3810 )
3811 self.instance_actions_result("pause", result, vapp_name)
3812 elif "resume" in action_dict:
3813 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3814 poweron_task = self.power_on_vapp(vm_id, vapp_name)
3815 result = self.client.get_task_monitor().wait_for_success(
3816 task=poweron_task
3817 )
3818 self.instance_actions_result("resume", result, vapp_name)
3819 elif "shutoff" in action_dict or "shutdown" in action_dict:
3820 action_name, _ = list(action_dict.items())[0]
3821 self.logger.info(
3822 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
3823 )
3824 shutdown_task = vapp.shutdown()
3825 result = self.client.get_task_monitor().wait_for_success(
3826 task=shutdown_task
3827 )
3828 if action_name == "shutdown":
3829 self.instance_actions_result("shutdown", result, vapp_name)
3830 else:
3831 self.instance_actions_result("shutoff", result, vapp_name)
3832 elif "forceOff" in action_dict:
3833 result = vapp.undeploy(action="powerOff")
3834 self.instance_actions_result("forceOff", result, vapp_name)
3835 elif "reboot" in action_dict:
3836 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3837 reboot_task = vapp.reboot()
3838 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3839 else:
3840 raise vimconn.VimConnException(
3841 "action_vminstance: Invalid action {} or action is None.".format(
3842 action_dict
3843 )
3844 )
3845
3846 return vm_id
3847 except Exception as exp:
3848 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3849
3850 raise vimconn.VimConnException(
3851 "action_vminstance: Failed with Exception {}".format(exp)
3852 )
3853
3854 def instance_actions_result(self, action, result, vapp_name):
3855 if result.get("status") == "success":
3856 self.logger.info(
3857 "action_vminstance: Sucessfully {} the vApp: {}".format(
3858 action, vapp_name
3859 )
3860 )
3861 else:
3862 self.logger.error(
3863 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
3864 )
3865
3866 def get_vminstance_console(self, vm_id, console_type="novnc"):
3867 """
3868 Get a console for the virtual machine
3869 Params:
3870 vm_id: uuid of the VM
3871 console_type, can be:
3872 "novnc" (by default), "xvpvnc" for VNC types,
3873 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3874 Returns dict with the console parameters:
3875 protocol: ssh, ftp, http, https, ...
3876 server: usually ip address
3877 port: the http, ssh, ... port
3878 suffix: extra text, e.g. the http path and query string
3879 """
3880 console_dict = {}
3881
3882 if console_type is None or console_type == "novnc":
3883 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
3884 self.url, vm_id
3885 )
3886 headers = {
3887 "Accept": "application/*+xml;version=" + API_VERSION,
3888 "x-vcloud-authorization": self.client._session.headers[
3889 "x-vcloud-authorization"
3890 ],
3891 }
3892 response = self.perform_request(
3893 req_type="POST", url=url_rest_call, headers=headers
3894 )
3895
3896 if response.status_code == 403:
3897 response = self.retry_rest("GET", url_rest_call)
3898
3899 if response.status_code != 200:
3900 self.logger.error(
3901 "REST call {} failed reason : {}"
3902 "status code : {}".format(
3903 url_rest_call, response.text, response.status_code
3904 )
3905 )
3906 raise vimconn.VimConnException(
3907 "get_vminstance_console : Failed to get " "VM Mks ticket details"
3908 )
3909
3910 s = re.search("<Host>(.*?)</Host>", response.text)
3911 console_dict["server"] = s.group(1) if s else None
3912 s1 = re.search("<Port>(\d+)</Port>", response.text)
3913 console_dict["port"] = s1.group(1) if s1 else None
3914 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
3915 self.url, vm_id
3916 )
3917 headers = {
3918 "Accept": "application/*+xml;version=" + API_VERSION,
3919 "x-vcloud-authorization": self.client._session.headers[
3920 "x-vcloud-authorization"
3921 ],
3922 }
3923 response = self.perform_request(
3924 req_type="POST", url=url_rest_call, headers=headers
3925 )
3926
3927 if response.status_code == 403:
3928 response = self.retry_rest("GET", url_rest_call)
3929
3930 if response.status_code != 200:
3931 self.logger.error(
3932 "REST call {} failed reason : {}"
3933 "status code : {}".format(
3934 url_rest_call, response.text, response.status_code
3935 )
3936 )
3937 raise vimconn.VimConnException(
3938 "get_vminstance_console : Failed to get " "VM console details"
3939 )
3940
3941 s = re.search(">.*?/(vm-\d+.*)</", response.text)
3942 console_dict["suffix"] = s.group(1) if s else None
3943 console_dict["protocol"] = "https"
3944
3945 return console_dict
3946
3947 def get_hosts_info(self):
3948 """Get the information of deployed hosts
3949 Returns the hosts content"""
3950 raise vimconn.VimConnNotImplemented("Should have implemented this")
3951
3952 def get_hosts(self, vim_tenant):
3953 """Get the hosts and deployed instances
3954 Returns the hosts content"""
3955 raise vimconn.VimConnNotImplemented("Should have implemented this")
3956
3957 def get_network_name_by_id(self, network_uuid=None):
3958 """Method gets vcloud director network named based on supplied uuid.
3959
3960 Args:
3961 network_uuid: network_id
3962
3963 Returns:
3964 The return network name.
3965 """
3966
3967 if not network_uuid:
3968 return None
3969
3970 try:
3971 org_dict = self.get_org(self.org_uuid)
3972 if "networks" in org_dict:
3973 org_network_dict = org_dict["networks"]
3974
3975 for net_uuid in org_network_dict:
3976 if net_uuid == network_uuid:
3977 return org_network_dict[net_uuid]
3978 except Exception:
3979 self.logger.debug("Exception in get_network_name_by_id")
3980 self.logger.debug(traceback.format_exc())
3981
3982 return None
3983
3984 def get_network_id_by_name(self, network_name=None):
3985 """Method gets vcloud director network uuid based on supplied name.
3986
3987 Args:
3988 network_name: network_name
3989 Returns:
3990 The return network uuid.
3991 network_uuid: network_id
3992 """
3993 if not network_name:
3994 self.logger.debug("get_network_id_by_name() : Network name is empty")
3995 return None
3996
3997 try:
3998 org_dict = self.get_org(self.org_uuid)
3999 if org_dict and "networks" in org_dict:
4000 org_network_dict = org_dict["networks"]
4001
4002 for net_uuid, net_name in org_network_dict.items():
4003 if net_name == network_name:
4004 return net_uuid
4005
4006 except KeyError as exp:
4007 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4008
4009 return None
4010
4011 def get_physical_network_by_name(self, physical_network_name):
4012 """
4013 Methos returns uuid of physical network which passed
4014 Args:
4015 physical_network_name: physical network name
4016 Returns:
4017 UUID of physical_network_name
4018 """
4019 try:
4020 client_as_admin = self.connect_as_admin()
4021
4022 if not client_as_admin:
4023 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4024
4025 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4026 vm_list_rest_call = "".join(url_list)
4027
4028 if client_as_admin._session:
4029 headers = {
4030 "Accept": "application/*+xml;version=" + API_VERSION,
4031 "x-vcloud-authorization": client_as_admin._session.headers[
4032 "x-vcloud-authorization"
4033 ],
4034 }
4035 response = self.perform_request(
4036 req_type="GET", url=vm_list_rest_call, headers=headers
4037 )
4038 provider_network = None
4039 available_network = None
4040 # add_vdc_rest_url = None
4041
4042 if response.status_code != requests.codes.ok:
4043 self.logger.debug(
4044 "REST API call {} failed. Return status code {}".format(
4045 vm_list_rest_call, response.status_code
4046 )
4047 )
4048 return None
4049 else:
4050 try:
4051 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4052 for child in vm_list_xmlroot:
4053 if child.tag.split("}")[1] == "ProviderVdcReference":
4054 provider_network = child.attrib.get("href")
4055 # application/vnd.vmware.admin.providervdc+xml
4056
4057 if child.tag.split("}")[1] == "Link":
4058 if (
4059 child.attrib.get("type")
4060 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4061 and child.attrib.get("rel") == "add"
4062 ):
4063 child.attrib.get("href")
4064 except Exception:
4065 self.logger.debug(
4066 "Failed parse respond for rest api call {}".format(
4067 vm_list_rest_call
4068 )
4069 )
4070 self.logger.debug("Respond body {}".format(response.text))
4071
4072 return None
4073
4074 # find pvdc provided available network
4075 response = self.perform_request(
4076 req_type="GET", url=provider_network, headers=headers
4077 )
4078
4079 if response.status_code != requests.codes.ok:
4080 self.logger.debug(
4081 "REST API call {} failed. Return status code {}".format(
4082 vm_list_rest_call, response.status_code
4083 )
4084 )
4085
4086 return None
4087
4088 try:
4089 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4090 for child in vm_list_xmlroot.iter():
4091 if child.tag.split("}")[1] == "AvailableNetworks":
4092 for networks in child.iter():
4093 if (
4094 networks.attrib.get("href") is not None
4095 and networks.attrib.get("name") is not None
4096 ):
4097 if (
4098 networks.attrib.get("name")
4099 == physical_network_name
4100 ):
4101 network_url = networks.attrib.get("href")
4102 available_network = network_url[
4103 network_url.rindex("/") + 1 :
4104 ]
4105 break
4106 except Exception:
4107 return None
4108
4109 return available_network
4110 except Exception as e:
4111 self.logger.error("Error while getting physical network: {}".format(e))
4112
4113 def list_org_action(self):
4114 """
4115 Method leverages vCloud director and query for available organization for particular user
4116
4117 Args:
4118 vca - is active VCA connection.
4119 vdc_name - is a vdc name that will be used to query vms action
4120
4121 Returns:
4122 The return XML respond
4123 """
4124 url_list = [self.url, "/api/org"]
4125 vm_list_rest_call = "".join(url_list)
4126
4127 if self.client._session:
4128 headers = {
4129 "Accept": "application/*+xml;version=" + API_VERSION,
4130 "x-vcloud-authorization": self.client._session.headers[
4131 "x-vcloud-authorization"
4132 ],
4133 }
4134
4135 response = self.perform_request(
4136 req_type="GET", url=vm_list_rest_call, headers=headers
4137 )
4138
4139 if response.status_code == 403:
4140 response = self.retry_rest("GET", vm_list_rest_call)
4141
4142 if response.status_code == requests.codes.ok:
4143 return response.text
4144
4145 return None
4146
4147 def get_org_action(self, org_uuid=None):
4148 """
4149 Method leverages vCloud director and retrieve available object for organization.
4150
4151 Args:
4152 org_uuid - vCD organization uuid
4153 self.client - is active connection.
4154
4155 Returns:
4156 The return XML respond
4157 """
4158
4159 if org_uuid is None:
4160 return None
4161
4162 url_list = [self.url, "/api/org/", org_uuid]
4163 vm_list_rest_call = "".join(url_list)
4164
4165 if self.client._session:
4166 headers = {
4167 "Accept": "application/*+xml;version=" + API_VERSION,
4168 "x-vcloud-authorization": self.client._session.headers[
4169 "x-vcloud-authorization"
4170 ],
4171 }
4172
4173 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4174 response = self.perform_request(
4175 req_type="GET", url=vm_list_rest_call, headers=headers
4176 )
4177
4178 if response.status_code == 403:
4179 response = self.retry_rest("GET", vm_list_rest_call)
4180
4181 if response.status_code == requests.codes.ok:
4182 return response.text
4183
4184 return None
4185
4186 def get_org(self, org_uuid=None):
4187 """
4188 Method retrieves available organization in vCloud Director
4189
4190 Args:
4191 org_uuid - is a organization uuid.
4192
4193 Returns:
4194 The return dictionary with following key
4195 "network" - for network list under the org
4196 "catalogs" - for network list under the org
4197 "vdcs" - for vdc list under org
4198 """
4199
4200 org_dict = {}
4201
4202 if org_uuid is None:
4203 return org_dict
4204
4205 content = self.get_org_action(org_uuid=org_uuid)
4206 try:
4207 vdc_list = {}
4208 network_list = {}
4209 catalog_list = {}
4210 vm_list_xmlroot = XmlElementTree.fromstring(content)
4211 for child in vm_list_xmlroot:
4212 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4213 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4214 "name"
4215 ]
4216 org_dict["vdcs"] = vdc_list
4217
4218 if (
4219 child.attrib["type"]
4220 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4221 ):
4222 network_list[
4223 child.attrib["href"].split("/")[-1:][0]
4224 ] = child.attrib["name"]
4225 org_dict["networks"] = network_list
4226
4227 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4228 catalog_list[
4229 child.attrib["href"].split("/")[-1:][0]
4230 ] = child.attrib["name"]
4231 org_dict["catalogs"] = catalog_list
4232 except Exception:
4233 pass
4234
4235 return org_dict
4236
4237 def get_org_list(self):
4238 """
4239 Method retrieves available organization in vCloud Director
4240
4241 Args:
4242 vca - is active VCA connection.
4243
4244 Returns:
4245 The return dictionary and key for each entry VDC UUID
4246 """
4247 org_dict = {}
4248
4249 content = self.list_org_action()
4250 try:
4251 vm_list_xmlroot = XmlElementTree.fromstring(content)
4252
4253 for vm_xml in vm_list_xmlroot:
4254 if vm_xml.tag.split("}")[1] == "Org":
4255 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4256 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4257 except Exception:
4258 pass
4259
4260 return org_dict
4261
4262 def vms_view_action(self, vdc_name=None):
4263 """Method leverages vCloud director vms query call
4264
4265 Args:
4266 vca - is active VCA connection.
4267 vdc_name - is a vdc name that will be used to query vms action
4268
4269 Returns:
4270 The return XML respond
4271 """
4272 vca = self.connect()
4273 if vdc_name is None:
4274 return None
4275
4276 url_list = [vca.host, "/api/vms/query"]
4277 vm_list_rest_call = "".join(url_list)
4278
4279 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4280 refs = [
4281 ref
4282 for ref in vca.vcloud_session.organization.Link
4283 if ref.name == vdc_name
4284 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4285 ]
4286
4287 if len(refs) == 1:
4288 response = self.perform_request(
4289 req_type="GET",
4290 url=vm_list_rest_call,
4291 headers=vca.vcloud_session.get_vcloud_headers(),
4292 verify=vca.verify,
4293 logger=vca.logger,
4294 )
4295
4296 if response.status_code == requests.codes.ok:
4297 return response.text
4298
4299 return None
4300
4301 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4302 """
4303 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4304 contains a list of all VM's deployed for queried VDC.
4305 The key for a dictionary is VM UUID
4306
4307
4308 Args:
4309 vca - is active VCA connection.
4310 vdc_name - is a vdc name that will be used to query vms action
4311
4312 Returns:
4313 The return dictionary and key for each entry vapp UUID
4314 """
4315 vm_dict = {}
4316 vca = self.connect()
4317
4318 if not vca:
4319 raise vimconn.VimConnConnectionException("self.connect() is failed")
4320
4321 if vdc_name is None:
4322 return vm_dict
4323
4324 content = self.vms_view_action(vdc_name=vdc_name)
4325 try:
4326 vm_list_xmlroot = XmlElementTree.fromstring(content)
4327 for vm_xml in vm_list_xmlroot:
4328 if (
4329 vm_xml.tag.split("}")[1] == "VMRecord"
4330 and vm_xml.attrib["isVAppTemplate"] == "false"
4331 ):
4332 # lookup done by UUID
4333 if isuuid:
4334 if vapp_name in vm_xml.attrib["container"]:
4335 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4336 if "vm-" in rawuuid[0]:
4337 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4338 break
4339 # lookup done by Name
4340 else:
4341 if vapp_name in vm_xml.attrib["name"]:
4342 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4343 if "vm-" in rawuuid[0]:
4344 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4345 break
4346 except Exception:
4347 pass
4348
4349 return vm_dict
4350
4351 def get_network_action(self, network_uuid=None):
4352 """
4353 Method leverages vCloud director and query network based on network uuid
4354
4355 Args:
4356 vca - is active VCA connection.
4357 network_uuid - is a network uuid
4358
4359 Returns:
4360 The return XML respond
4361 """
4362 if network_uuid is None:
4363 return None
4364
4365 url_list = [self.url, "/api/network/", network_uuid]
4366 vm_list_rest_call = "".join(url_list)
4367
4368 if self.client._session:
4369 headers = {
4370 "Accept": "application/*+xml;version=" + API_VERSION,
4371 "x-vcloud-authorization": self.client._session.headers[
4372 "x-vcloud-authorization"
4373 ],
4374 }
4375 response = self.perform_request(
4376 req_type="GET", url=vm_list_rest_call, headers=headers
4377 )
4378
4379 # Retry login if session expired & retry sending request
4380 if response.status_code == 403:
4381 response = self.retry_rest("GET", vm_list_rest_call)
4382
4383 if response.status_code == requests.codes.ok:
4384 return response.text
4385
4386 return None
4387
4388 def get_vcd_network(self, network_uuid=None):
4389 """
4390 Method retrieves available network from vCloud Director
4391
4392 Args:
4393 network_uuid - is VCD network UUID
4394
4395 Each element serialized as key : value pair
4396
4397 Following keys available for access. network_configuration['Gateway'}
4398 <Configuration>
4399 <IpScopes>
4400 <IpScope>
4401 <IsInherited>true</IsInherited>
4402 <Gateway>172.16.252.100</Gateway>
4403 <Netmask>255.255.255.0</Netmask>
4404 <Dns1>172.16.254.201</Dns1>
4405 <Dns2>172.16.254.202</Dns2>
4406 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4407 <IsEnabled>true</IsEnabled>
4408 <IpRanges>
4409 <IpRange>
4410 <StartAddress>172.16.252.1</StartAddress>
4411 <EndAddress>172.16.252.99</EndAddress>
4412 </IpRange>
4413 </IpRanges>
4414 </IpScope>
4415 </IpScopes>
4416 <FenceMode>bridged</FenceMode>
4417
4418 Returns:
4419 The return dictionary and key for each entry vapp UUID
4420 """
4421 network_configuration = {}
4422
4423 if network_uuid is None:
4424 return network_uuid
4425
4426 try:
4427 content = self.get_network_action(network_uuid=network_uuid)
4428 if content is not None:
4429 vm_list_xmlroot = XmlElementTree.fromstring(content)
4430 network_configuration["status"] = vm_list_xmlroot.get("status")
4431 network_configuration["name"] = vm_list_xmlroot.get("name")
4432 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4433
4434 for child in vm_list_xmlroot:
4435 if child.tag.split("}")[1] == "IsShared":
4436 network_configuration["isShared"] = child.text.strip()
4437
4438 if child.tag.split("}")[1] == "Configuration":
4439 for configuration in child.iter():
4440 tagKey = configuration.tag.split("}")[1].strip()
4441 if tagKey != "":
4442 network_configuration[
4443 tagKey
4444 ] = configuration.text.strip()
4445 except Exception as exp:
4446 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4447
4448 raise vimconn.VimConnException(
4449 "get_vcd_network: Failed with Exception {}".format(exp)
4450 )
4451
4452 return network_configuration
4453
4454 def delete_network_action(self, network_uuid=None):
4455 """
4456 Method delete given network from vCloud director
4457
4458 Args:
4459 network_uuid - is a network uuid that client wish to delete
4460
4461 Returns:
4462 The return None or XML respond or false
4463 """
4464 client = self.connect_as_admin()
4465
4466 if not client:
4467 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4468
4469 if network_uuid is None:
4470 return False
4471
4472 url_list = [self.url, "/api/admin/network/", network_uuid]
4473 vm_list_rest_call = "".join(url_list)
4474
4475 if client._session:
4476 headers = {
4477 "Accept": "application/*+xml;version=" + API_VERSION,
4478 "x-vcloud-authorization": client._session.headers[
4479 "x-vcloud-authorization"
4480 ],
4481 }
4482 response = self.perform_request(
4483 req_type="DELETE", url=vm_list_rest_call, headers=headers
4484 )
4485
4486 if response.status_code == 202:
4487 return True
4488
4489 return False
4490
4491 def create_network(
4492 self,
4493 network_name=None,
4494 net_type="bridge",
4495 parent_network_uuid=None,
4496 ip_profile=None,
4497 isshared="true",
4498 ):
4499 """
4500 Method create network in vCloud director
4501
4502 Args:
4503 network_name - is network name to be created.
4504 net_type - can be 'bridge','data','ptp','mgmt'.
4505 ip_profile is a dict containing the IP parameters of the network
4506 isshared - is a boolean
4507 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4508 It optional attribute. by default if no parent network indicate the first available will be used.
4509
4510 Returns:
4511 The return network uuid or return None
4512 """
4513 new_network_name = [network_name, "-", str(uuid.uuid4())]
4514 content = self.create_network_rest(
4515 network_name="".join(new_network_name),
4516 ip_profile=ip_profile,
4517 net_type=net_type,
4518 parent_network_uuid=parent_network_uuid,
4519 isshared=isshared,
4520 )
4521
4522 if content is None:
4523 self.logger.debug("Failed create network {}.".format(network_name))
4524
4525 return None
4526
4527 try:
4528 vm_list_xmlroot = XmlElementTree.fromstring(content)
4529 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4530 if len(vcd_uuid) == 4:
4531 self.logger.info(
4532 "Created new network name: {} uuid: {}".format(
4533 network_name, vcd_uuid[3]
4534 )
4535 )
4536
4537 return vcd_uuid[3]
4538 except Exception:
4539 self.logger.debug("Failed create network {}".format(network_name))
4540
4541 return None
4542
4543 def create_network_rest(
4544 self,
4545 network_name=None,
4546 net_type="bridge",
4547 parent_network_uuid=None,
4548 ip_profile=None,
4549 isshared="true",
4550 ):
4551 """
4552 Method create network in vCloud director
4553
4554 Args:
4555 network_name - is network name to be created.
4556 net_type - can be 'bridge','data','ptp','mgmt'.
4557 ip_profile is a dict containing the IP parameters of the network
4558 isshared - is a boolean
4559 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4560 It optional attribute. by default if no parent network indicate the first available will be used.
4561
4562 Returns:
4563 The return network uuid or return None
4564 """
4565 client_as_admin = self.connect_as_admin()
4566
4567 if not client_as_admin:
4568 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4569
4570 if network_name is None:
4571 return None
4572
4573 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4574 vm_list_rest_call = "".join(url_list)
4575
4576 if client_as_admin._session:
4577 headers = {
4578 "Accept": "application/*+xml;version=" + API_VERSION,
4579 "x-vcloud-authorization": client_as_admin._session.headers[
4580 "x-vcloud-authorization"
4581 ],
4582 }
4583 response = self.perform_request(
4584 req_type="GET", url=vm_list_rest_call, headers=headers
4585 )
4586 provider_network = None
4587 available_networks = None
4588 add_vdc_rest_url = None
4589
4590 if response.status_code != requests.codes.ok:
4591 self.logger.debug(
4592 "REST API call {} failed. Return status code {}".format(
4593 vm_list_rest_call, response.status_code
4594 )
4595 )
4596
4597 return None
4598 else:
4599 try:
4600 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4601 for child in vm_list_xmlroot:
4602 if child.tag.split("}")[1] == "ProviderVdcReference":
4603 provider_network = child.attrib.get("href")
4604 # application/vnd.vmware.admin.providervdc+xml
4605
4606 if child.tag.split("}")[1] == "Link":
4607 if (
4608 child.attrib.get("type")
4609 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4610 and child.attrib.get("rel") == "add"
4611 ):
4612 add_vdc_rest_url = child.attrib.get("href")
4613 except Exception:
4614 self.logger.debug(
4615 "Failed parse respond for rest api call {}".format(
4616 vm_list_rest_call
4617 )
4618 )
4619 self.logger.debug("Respond body {}".format(response.text))
4620
4621 return None
4622
4623 # find pvdc provided available network
4624 response = self.perform_request(
4625 req_type="GET", url=provider_network, headers=headers
4626 )
4627
4628 if response.status_code != requests.codes.ok:
4629 self.logger.debug(
4630 "REST API call {} failed. Return status code {}".format(
4631 vm_list_rest_call, response.status_code
4632 )
4633 )
4634
4635 return None
4636
4637 if parent_network_uuid is None:
4638 try:
4639 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4640 for child in vm_list_xmlroot.iter():
4641 if child.tag.split("}")[1] == "AvailableNetworks":
4642 for networks in child.iter():
4643 # application/vnd.vmware.admin.network+xml
4644 if networks.attrib.get("href") is not None:
4645 available_networks = networks.attrib.get("href")
4646 break
4647 except Exception:
4648 return None
4649
4650 try:
4651 # Configure IP profile of the network
4652 ip_profile = (
4653 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
4654 )
4655
4656 if (
4657 "subnet_address" not in ip_profile
4658 or ip_profile["subnet_address"] is None
4659 ):
4660 subnet_rand = random.randint(0, 255)
4661 ip_base = "192.168.{}.".format(subnet_rand)
4662 ip_profile["subnet_address"] = ip_base + "0/24"
4663 else:
4664 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
4665
4666 if (
4667 "gateway_address" not in ip_profile
4668 or ip_profile["gateway_address"] is None
4669 ):
4670 ip_profile["gateway_address"] = ip_base + "1"
4671
4672 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
4673 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
4674
4675 if (
4676 "dhcp_enabled" not in ip_profile
4677 or ip_profile["dhcp_enabled"] is None
4678 ):
4679 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
4680
4681 if (
4682 "dhcp_start_address" not in ip_profile
4683 or ip_profile["dhcp_start_address"] is None
4684 ):
4685 ip_profile["dhcp_start_address"] = ip_base + "3"
4686
4687 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
4688 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
4689
4690 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
4691 ip_profile["dns_address"] = ip_base + "2"
4692
4693 gateway_address = ip_profile["gateway_address"]
4694 dhcp_count = int(ip_profile["dhcp_count"])
4695 subnet_address = self.convert_cidr_to_netmask(
4696 ip_profile["subnet_address"]
4697 )
4698
4699 if ip_profile["dhcp_enabled"] is True:
4700 dhcp_enabled = "true"
4701 else:
4702 dhcp_enabled = "false"
4703
4704 dhcp_start_address = ip_profile["dhcp_start_address"]
4705
4706 # derive dhcp_end_address from dhcp_start_address & dhcp_count
4707 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
4708 end_ip_int += dhcp_count - 1
4709 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
4710
4711 # ip_version = ip_profile['ip_version']
4712 dns_address = ip_profile["dns_address"]
4713 except KeyError as exp:
4714 self.logger.debug("Create Network REST: Key error {}".format(exp))
4715
4716 raise vimconn.VimConnException(
4717 "Create Network REST: Key error{}".format(exp)
4718 )
4719
4720 # either use client provided UUID or search for a first available
4721 # if both are not defined we return none
4722 if parent_network_uuid is not None:
4723 provider_network = None
4724 available_networks = None
4725 add_vdc_rest_url = None
4726 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
4727 add_vdc_rest_url = "".join(url_list)
4728 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
4729 available_networks = "".join(url_list)
4730
4731 # Creating all networks as Direct Org VDC type networks.
4732 # Unused in case of Underlay (data/ptp) network interface.
4733 fence_mode = "isolated"
4734 is_inherited = "false"
4735 dns_list = dns_address.split(";")
4736 dns1 = dns_list[0]
4737 dns2_text = ""
4738
4739 if len(dns_list) >= 2:
4740 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
4741 dns_list[1]
4742 )
4743
4744 if net_type == "isolated":
4745 fence_mode = "isolated"
4746 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4747 <Description>Openmano created</Description>
4748 <Configuration>
4749 <IpScopes>
4750 <IpScope>
4751 <IsInherited>{1:s}</IsInherited>
4752 <Gateway>{2:s}</Gateway>
4753 <Netmask>{3:s}</Netmask>
4754 <Dns1>{4:s}</Dns1>{5:s}
4755 <IsEnabled>{6:s}</IsEnabled>
4756 <IpRanges>
4757 <IpRange>
4758 <StartAddress>{7:s}</StartAddress>
4759 <EndAddress>{8:s}</EndAddress>
4760 </IpRange>
4761 </IpRanges>
4762 </IpScope>
4763 </IpScopes>
4764 <FenceMode>{9:s}</FenceMode>
4765 </Configuration>
4766 <IsShared>{10:s}</IsShared>
4767 </OrgVdcNetwork> """.format(
4768 escape(network_name),
4769 is_inherited,
4770 gateway_address,
4771 subnet_address,
4772 dns1,
4773 dns2_text,
4774 dhcp_enabled,
4775 dhcp_start_address,
4776 dhcp_end_address,
4777 fence_mode,
4778 isshared,
4779 )
4780 else:
4781 fence_mode = "bridged"
4782 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4783 <Description>Openmano created</Description>
4784 <Configuration>
4785 <IpScopes>
4786 <IpScope>
4787 <IsInherited>{1:s}</IsInherited>
4788 <Gateway>{2:s}</Gateway>
4789 <Netmask>{3:s}</Netmask>
4790 <Dns1>{4:s}</Dns1>{5:s}
4791 <IsEnabled>{6:s}</IsEnabled>
4792 <IpRanges>
4793 <IpRange>
4794 <StartAddress>{7:s}</StartAddress>
4795 <EndAddress>{8:s}</EndAddress>
4796 </IpRange>
4797 </IpRanges>
4798 </IpScope>
4799 </IpScopes>
4800 <ParentNetwork href="{9:s}"/>
4801 <FenceMode>{10:s}</FenceMode>
4802 </Configuration>
4803 <IsShared>{11:s}</IsShared>
4804 </OrgVdcNetwork> """.format(
4805 escape(network_name),
4806 is_inherited,
4807 gateway_address,
4808 subnet_address,
4809 dns1,
4810 dns2_text,
4811 dhcp_enabled,
4812 dhcp_start_address,
4813 dhcp_end_address,
4814 available_networks,
4815 fence_mode,
4816 isshared,
4817 )
4818
4819 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4820 try:
4821 response = self.perform_request(
4822 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
4823 )
4824
4825 if response.status_code != 201:
4826 self.logger.debug(
4827 "Create Network POST REST API call failed. "
4828 "Return status code {}, response.text: {}".format(
4829 response.status_code, response.text
4830 )
4831 )
4832 else:
4833 network_task = self.get_task_from_response(response.text)
4834 self.logger.debug(
4835 "Create Network REST : Waiting for Network creation complete"
4836 )
4837 time.sleep(5)
4838 result = self.client.get_task_monitor().wait_for_success(
4839 task=network_task
4840 )
4841
4842 if result.get("status") == "success":
4843 return response.text
4844 else:
4845 self.logger.debug(
4846 "create_network_rest task failed. Network Create response : {}".format(
4847 response.text
4848 )
4849 )
4850 except Exception as exp:
4851 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
4852
4853 return None
4854
4855 def convert_cidr_to_netmask(self, cidr_ip=None):
4856 """
4857 Method sets convert CIDR netmask address to normal IP format
4858 Args:
4859 cidr_ip : CIDR IP address
4860 Returns:
4861 netmask : Converted netmask
4862 """
4863 if cidr_ip is not None:
4864 if "/" in cidr_ip:
4865 _, net_bits = cidr_ip.split("/")
4866 netmask = socket.inet_ntoa(
4867 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
4868 )
4869 else:
4870 netmask = cidr_ip
4871
4872 return netmask
4873
4874 return None
4875
4876 def get_provider_rest(self, vca=None):
4877 """
4878 Method gets provider vdc view from vcloud director
4879
4880 Args:
4881 network_name - is network name to be created.
4882 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4883 It optional attribute. by default if no parent network indicate the first available will be used.
4884
4885 Returns:
4886 The return xml content of respond or None
4887 """
4888 url_list = [self.url, "/api/admin"]
4889
4890 if vca:
4891 headers = {
4892 "Accept": "application/*+xml;version=" + API_VERSION,
4893 "x-vcloud-authorization": self.client._session.headers[
4894 "x-vcloud-authorization"
4895 ],
4896 }
4897 response = self.perform_request(
4898 req_type="GET", url="".join(url_list), headers=headers
4899 )
4900
4901 if response.status_code == requests.codes.ok:
4902 return response.text
4903
4904 return None
4905
4906 def create_vdc(self, vdc_name=None):
4907 vdc_dict = {}
4908 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4909
4910 if xml_content is not None:
4911 try:
4912 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4913 for child in task_resp_xmlroot:
4914 if child.tag.split("}")[1] == "Owner":
4915 vdc_id = child.attrib.get("href").split("/")[-1]
4916 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
4917
4918 return vdc_dict
4919 except Exception:
4920 self.logger.debug("Respond body {}".format(xml_content))
4921
4922 return None
4923
4924 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4925 """
4926 Method create vdc in vCloud director based on VDC template.
4927 it uses pre-defined template.
4928
4929 Args:
4930 vdc_name - name of a new vdc.
4931
4932 Returns:
4933 The return xml content of respond or None
4934 """
4935 # pre-requesite atleast one vdc template should be available in vCD
4936 self.logger.info("Creating new vdc {}".format(vdc_name))
4937 vca = self.connect_as_admin()
4938
4939 if not vca:
4940 raise vimconn.VimConnConnectionException("Failed to connect vCD")
4941
4942 if vdc_name is None:
4943 return None
4944
4945 url_list = [self.url, "/api/vdcTemplates"]
4946 vm_list_rest_call = "".join(url_list)
4947 headers = {
4948 "Accept": "application/*+xml;version=" + API_VERSION,
4949 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
4950 }
4951 response = self.perform_request(
4952 req_type="GET", url=vm_list_rest_call, headers=headers
4953 )
4954
4955 # container url to a template
4956 vdc_template_ref = None
4957 try:
4958 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4959 for child in vm_list_xmlroot:
4960 # application/vnd.vmware.admin.providervdc+xml
4961 # we need find a template from witch we instantiate VDC
4962 if child.tag.split("}")[1] == "VdcTemplate":
4963 if (
4964 child.attrib.get("type")
4965 == "application/vnd.vmware.admin.vdcTemplate+xml"
4966 ):
4967 vdc_template_ref = child.attrib.get("href")
4968 except Exception:
4969 self.logger.debug(
4970 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
4971 )
4972 self.logger.debug("Respond body {}".format(response.text))
4973
4974 return None
4975
4976 # if we didn't found required pre defined template we return None
4977 if vdc_template_ref is None:
4978 return None
4979
4980 try:
4981 # instantiate vdc
4982 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
4983 vm_list_rest_call = "".join(url_list)
4984 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4985 <Source href="{1:s}"></Source>
4986 <Description>opnemano</Description>
4987 </InstantiateVdcTemplateParams>""".format(
4988 vdc_name, vdc_template_ref
4989 )
4990 headers[
4991 "Content-Type"
4992 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
4993 response = self.perform_request(
4994 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
4995 )
4996 vdc_task = self.get_task_from_response(response.text)
4997 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4998
4999 # if we all ok we respond with content otherwise by default None
5000 if response.status_code >= 200 and response.status_code < 300:
5001 return response.text
5002
5003 return None
5004 except Exception:
5005 self.logger.debug(
5006 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5007 )
5008 self.logger.debug("Respond body {}".format(response.text))
5009
5010 return None
5011
5012 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5013 """
5014 Method retrieve vapp detail from vCloud director
5015
5016 Args:
5017 vapp_uuid - is vapp identifier.
5018
5019 Returns:
5020 The return network uuid or return None
5021 """
5022 parsed_respond = {}
5023 vca = None
5024
5025 if need_admin_access:
5026 vca = self.connect_as_admin()
5027 else:
5028 vca = self.client
5029
5030 if not vca:
5031 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5032 if vapp_uuid is None:
5033 return None
5034
5035 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5036 get_vapp_restcall = "".join(url_list)
5037
5038 if vca._session:
5039 headers = {
5040 "Accept": "application/*+xml;version=" + API_VERSION,
5041 "x-vcloud-authorization": vca._session.headers[
5042 "x-vcloud-authorization"
5043 ],
5044 }
5045 response = self.perform_request(
5046 req_type="GET", url=get_vapp_restcall, headers=headers
5047 )
5048
5049 if response.status_code == 403:
5050 if need_admin_access is False:
5051 response = self.retry_rest("GET", get_vapp_restcall)
5052
5053 if response.status_code != requests.codes.ok:
5054 self.logger.debug(
5055 "REST API call {} failed. Return status code {}".format(
5056 get_vapp_restcall, response.status_code
5057 )
5058 )
5059
5060 return parsed_respond
5061
5062 try:
5063 xmlroot_respond = XmlElementTree.fromstring(response.text)
5064 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5065 "ovfDescriptorUploaded"
5066 ]
5067 namespaces = {
5068 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5069 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5070 "vmw": "http://www.vmware.com/schema/ovf",
5071 "vm": "http://www.vmware.com/vcloud/v1.5",
5072 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5073 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5074 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5075 }
5076
5077 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5078 if created_section is not None:
5079 parsed_respond["created"] = created_section.text
5080
5081 network_section = xmlroot_respond.find(
5082 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5083 )
5084 if (
5085 network_section is not None
5086 and "networkName" in network_section.attrib
5087 ):
5088 parsed_respond["networkname"] = network_section.attrib[
5089 "networkName"
5090 ]
5091
5092 ipscopes_section = xmlroot_respond.find(
5093 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5094 namespaces,
5095 )
5096 if ipscopes_section is not None:
5097 for ipscope in ipscopes_section:
5098 for scope in ipscope:
5099 tag_key = scope.tag.split("}")[1]
5100 if tag_key == "IpRanges":
5101 ip_ranges = scope.getchildren()
5102 for ipblock in ip_ranges:
5103 for block in ipblock:
5104 parsed_respond[
5105 block.tag.split("}")[1]
5106 ] = block.text
5107 else:
5108 parsed_respond[tag_key] = scope.text
5109
5110 # parse children section for other attrib
5111 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5112 if children_section is not None:
5113 parsed_respond["name"] = children_section.attrib["name"]
5114 parsed_respond["nestedHypervisorEnabled"] = (
5115 children_section.attrib["nestedHypervisorEnabled"]
5116 if "nestedHypervisorEnabled" in children_section.attrib
5117 else None
5118 )
5119 parsed_respond["deployed"] = children_section.attrib["deployed"]
5120 parsed_respond["status"] = children_section.attrib["status"]
5121 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5122 -1
5123 ]
5124 network_adapter = children_section.find(
5125 "vm:NetworkConnectionSection", namespaces
5126 )
5127 nic_list = []
5128 for adapters in network_adapter:
5129 adapter_key = adapters.tag.split("}")[1]
5130 if adapter_key == "PrimaryNetworkConnectionIndex":
5131 parsed_respond["primarynetwork"] = adapters.text
5132
5133 if adapter_key == "NetworkConnection":
5134 vnic = {}
5135 if "network" in adapters.attrib:
5136 vnic["network"] = adapters.attrib["network"]
5137 for adapter in adapters:
5138 setting_key = adapter.tag.split("}")[1]
5139 vnic[setting_key] = adapter.text
5140 nic_list.append(vnic)
5141
5142 for link in children_section:
5143 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5144 if link.attrib["rel"] == "screen:acquireTicket":
5145 parsed_respond["acquireTicket"] = link.attrib
5146
5147 if link.attrib["rel"] == "screen:acquireMksTicket":
5148 parsed_respond["acquireMksTicket"] = link.attrib
5149
5150 parsed_respond["interfaces"] = nic_list
5151 vCloud_extension_section = children_section.find(
5152 "xmlns:VCloudExtension", namespaces
5153 )
5154 if vCloud_extension_section is not None:
5155 vm_vcenter_info = {}
5156 vim_info = vCloud_extension_section.find(
5157 "vmext:VmVimInfo", namespaces
5158 )
5159 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5160
5161 if vmext is not None:
5162 vm_vcenter_info["vm_moref_id"] = vmext.find(
5163 "vmext:MoRef", namespaces
5164 ).text
5165
5166 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5167
5168 virtual_hardware_section = children_section.find(
5169 "ovf:VirtualHardwareSection", namespaces
5170 )
5171 vm_virtual_hardware_info = {}
5172 if virtual_hardware_section is not None:
5173 for item in virtual_hardware_section.iterfind(
5174 "ovf:Item", namespaces
5175 ):
5176 if (
5177 item.find("rasd:Description", namespaces).text
5178 == "Hard disk"
5179 ):
5180 disk_size = item.find(
5181 "rasd:HostResource", namespaces
5182 ).attrib["{" + namespaces["vm"] + "}capacity"]
5183 vm_virtual_hardware_info["disk_size"] = disk_size
5184 break
5185
5186 for link in virtual_hardware_section:
5187 if (
5188 link.tag.split("}")[1] == "Link"
5189 and "rel" in link.attrib
5190 ):
5191 if link.attrib["rel"] == "edit" and link.attrib[
5192 "href"
5193 ].endswith("/disks"):
5194 vm_virtual_hardware_info[
5195 "disk_edit_href"
5196 ] = link.attrib["href"]
5197 break
5198
5199 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5200 except Exception as exp:
5201 self.logger.info(
5202 "Error occurred calling rest api for getting vApp details {}".format(
5203 exp
5204 )
5205 )
5206
5207 return parsed_respond
5208
5209 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5210 """
5211 Method retrieve vm disk details
5212
5213 Args:
5214 vapp_uuid - is vapp identifier.
5215 flavor_disk - disk size as specified in VNFD (flavor)
5216
5217 Returns:
5218 The return network uuid or return None
5219 """
5220 status = None
5221 try:
5222 # Flavor disk is in GB convert it into MB
5223 flavor_disk = int(flavor_disk) * 1024
5224 vm_details = self.get_vapp_details_rest(vapp_uuid)
5225
5226 if vm_details:
5227 vm_name = vm_details["name"]
5228 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5229
5230 if vm_details and "vm_virtual_hardware" in vm_details:
5231 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5232 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5233 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5234
5235 if flavor_disk > vm_disk:
5236 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5237 self.logger.info(
5238 "Modify disk of VM {} from {} to {} MB".format(
5239 vm_name, vm_disk, flavor_disk
5240 )
5241 )
5242 else:
5243 status = True
5244 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5245
5246 return status
5247 except Exception as exp:
5248 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5249
5250 def modify_vm_disk_rest(self, disk_href, disk_size):
5251 """
5252 Method retrieve modify vm disk size
5253
5254 Args:
5255 disk_href - vCD API URL to GET and PUT disk data
5256 disk_size - disk size as specified in VNFD (flavor)
5257
5258 Returns:
5259 The return network uuid or return None
5260 """
5261 if disk_href is None or disk_size is None:
5262 return None
5263
5264 if self.client._session:
5265 headers = {
5266 "Accept": "application/*+xml;version=" + API_VERSION,
5267 "x-vcloud-authorization": self.client._session.headers[
5268 "x-vcloud-authorization"
5269 ],
5270 }
5271 response = self.perform_request(
5272 req_type="GET", url=disk_href, headers=headers
5273 )
5274
5275 if response.status_code == 403:
5276 response = self.retry_rest("GET", disk_href)
5277
5278 if response.status_code != requests.codes.ok:
5279 self.logger.debug(
5280 "GET REST API call {} failed. Return status code {}".format(
5281 disk_href, response.status_code
5282 )
5283 )
5284
5285 return None
5286
5287 try:
5288 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5289 namespaces = {
5290 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5291 }
5292 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5293
5294 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5295 if item.find("rasd:Description", namespaces).text == "Hard disk":
5296 disk_item = item.find("rasd:HostResource", namespaces)
5297 if disk_item is not None:
5298 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5299 disk_size
5300 )
5301 break
5302
5303 data = lxmlElementTree.tostring(
5304 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5305 )
5306
5307 # Send PUT request to modify disk size
5308 headers[
5309 "Content-Type"
5310 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5311
5312 response = self.perform_request(
5313 req_type="PUT", url=disk_href, headers=headers, data=data
5314 )
5315 if response.status_code == 403:
5316 add_headers = {"Content-Type": headers["Content-Type"]}
5317 response = self.retry_rest("PUT", disk_href, add_headers, data)
5318
5319 if response.status_code != 202:
5320 self.logger.debug(
5321 "PUT REST API call {} failed. Return status code {}".format(
5322 disk_href, response.status_code
5323 )
5324 )
5325 else:
5326 modify_disk_task = self.get_task_from_response(response.text)
5327 result = self.client.get_task_monitor().wait_for_success(
5328 task=modify_disk_task
5329 )
5330 if result.get("status") == "success":
5331 return True
5332 else:
5333 return False
5334
5335 return None
5336 except Exception as exp:
5337 self.logger.info(
5338 "Error occurred calling rest api for modifing disk size {}".format(exp)
5339 )
5340
5341 return None
5342
5343 def add_serial_device(self, vapp_uuid):
5344 """
5345 Method to attach a serial device to a VM
5346
5347 Args:
5348 vapp_uuid - uuid of vApp/VM
5349
5350 Returns:
5351 """
5352 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5353 _, content = self.get_vcenter_content()
5354 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5355
5356 if vm_moref_id:
5357 try:
5358 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5359 self.logger.info(
5360 "VM {} is currently on host {}".format(vm_obj, host_obj)
5361 )
5362 if host_obj and vm_obj:
5363 spec = vim.vm.ConfigSpec()
5364 spec.deviceChange = []
5365 serial_spec = vim.vm.device.VirtualDeviceSpec()
5366 serial_spec.operation = "add"
5367 serial_port = vim.vm.device.VirtualSerialPort()
5368 serial_port.yieldOnPoll = True
5369 backing = serial_port.URIBackingInfo()
5370 backing.serviceURI = "tcp://:65500"
5371 backing.direction = "server"
5372 serial_port.backing = backing
5373 serial_spec.device = serial_port
5374 spec.deviceChange.append(serial_spec)
5375 vm_obj.ReconfigVM_Task(spec=spec)
5376 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5377 except vmodl.MethodFault as error:
5378 self.logger.error("Error occurred while adding PCI devices {} ", error)
5379
5380 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5381 """
5382 Method to attach pci devices to VM
5383
5384 Args:
5385 vapp_uuid - uuid of vApp/VM
5386 pci_devices - pci devices infromation as specified in VNFD (flavor)
5387
5388 Returns:
5389 The status of add pci device task , vm object and
5390 vcenter_conect object
5391 """
5392 vm_obj = None
5393 self.logger.info(
5394 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5395 )
5396 vcenter_conect, content = self.get_vcenter_content()
5397 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5398
5399 if vm_moref_id:
5400 try:
5401 no_of_pci_devices = len(pci_devices)
5402 if no_of_pci_devices > 0:
5403 # Get VM and its host
5404 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5405 self.logger.info(
5406 "VM {} is currently on host {}".format(vm_obj, host_obj)
5407 )
5408
5409 if host_obj and vm_obj:
5410 # get PCI devies from host on which vapp is currently installed
5411 avilable_pci_devices = self.get_pci_devices(
5412 host_obj, no_of_pci_devices
5413 )
5414
5415 if avilable_pci_devices is None:
5416 # find other hosts with active pci devices
5417 (
5418 new_host_obj,
5419 avilable_pci_devices,
5420 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5421
5422 if (
5423 new_host_obj is not None
5424 and avilable_pci_devices is not None
5425 and len(avilable_pci_devices) > 0
5426 ):
5427 # Migrate vm to the host where PCI devices are availble
5428 self.logger.info(
5429 "Relocate VM {} on new host {}".format(
5430 vm_obj, new_host_obj
5431 )
5432 )
5433
5434 task = self.relocate_vm(new_host_obj, vm_obj)
5435 if task is not None:
5436 result = self.wait_for_vcenter_task(
5437 task, vcenter_conect
5438 )
5439 self.logger.info(
5440 "Migrate VM status: {}".format(result)
5441 )
5442 host_obj = new_host_obj
5443 else:
5444 self.logger.info(
5445 "Fail to migrate VM : {}".format(result)
5446 )
5447 raise vimconn.VimConnNotFoundException(
5448 "Fail to migrate VM : {} to host {}".format(
5449 vmname_andid, new_host_obj
5450 )
5451 )
5452
5453 if (
5454 host_obj is not None
5455 and avilable_pci_devices is not None
5456 and len(avilable_pci_devices) > 0
5457 ):
5458 # Add PCI devices one by one
5459 for pci_device in avilable_pci_devices:
5460 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5461 if task:
5462 status = self.wait_for_vcenter_task(
5463 task, vcenter_conect
5464 )
5465
5466 if status:
5467 self.logger.info(
5468 "Added PCI device {} to VM {}".format(
5469 pci_device, str(vm_obj)
5470 )
5471 )
5472 else:
5473 self.logger.error(
5474 "Fail to add PCI device {} to VM {}".format(
5475 pci_device, str(vm_obj)
5476 )
5477 )
5478
5479 return True, vm_obj, vcenter_conect
5480 else:
5481 self.logger.error(
5482 "Currently there is no host with"
5483 " {} number of avaialble PCI devices required for VM {}".format(
5484 no_of_pci_devices, vmname_andid
5485 )
5486 )
5487
5488 raise vimconn.VimConnNotFoundException(
5489 "Currently there is no host with {} "
5490 "number of avaialble PCI devices required for VM {}".format(
5491 no_of_pci_devices, vmname_andid
5492 )
5493 )
5494 else:
5495 self.logger.debug(
5496 "No infromation about PCI devices {} ", pci_devices
5497 )
5498 except vmodl.MethodFault as error:
5499 self.logger.error("Error occurred while adding PCI devices {} ", error)
5500
5501 return None, vm_obj, vcenter_conect
5502
5503 def get_vm_obj(self, content, mob_id):
5504 """
5505 Method to get the vsphere VM object associated with a given morf ID
5506 Args:
5507 vapp_uuid - uuid of vApp/VM
5508 content - vCenter content object
5509 mob_id - mob_id of VM
5510
5511 Returns:
5512 VM and host object
5513 """
5514 vm_obj = None
5515 host_obj = None
5516
5517 try:
5518 container = content.viewManager.CreateContainerView(
5519 content.rootFolder, [vim.VirtualMachine], True
5520 )
5521 for vm in container.view:
5522 mobID = vm._GetMoId()
5523
5524 if mobID == mob_id:
5525 vm_obj = vm
5526 host_obj = vm_obj.runtime.host
5527 break
5528 except Exception as exp:
5529 self.logger.error("Error occurred while finding VM object : {}".format(exp))
5530
5531 return host_obj, vm_obj
5532
5533 def get_pci_devices(self, host, need_devices):
5534 """
5535 Method to get the details of pci devices on given host
5536 Args:
5537 host - vSphere host object
5538 need_devices - number of pci devices needed on host
5539
5540 Returns:
5541 array of pci devices
5542 """
5543 all_devices = []
5544 all_device_ids = []
5545 used_devices_ids = []
5546
5547 try:
5548 if host:
5549 pciPassthruInfo = host.config.pciPassthruInfo
5550 pciDevies = host.hardware.pciDevice
5551
5552 for pci_status in pciPassthruInfo:
5553 if pci_status.passthruActive:
5554 for device in pciDevies:
5555 if device.id == pci_status.id:
5556 all_device_ids.append(device.id)
5557 all_devices.append(device)
5558
5559 # check if devices are in use
5560 avalible_devices = all_devices
5561 for vm in host.vm:
5562 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
5563 vm_devices = vm.config.hardware.device
5564 for device in vm_devices:
5565 if type(device) is vim.vm.device.VirtualPCIPassthrough:
5566 if device.backing.id in all_device_ids:
5567 for use_device in avalible_devices:
5568 if use_device.id == device.backing.id:
5569 avalible_devices.remove(use_device)
5570
5571 used_devices_ids.append(device.backing.id)
5572 self.logger.debug(
5573 "Device {} from devices {}"
5574 "is in use".format(device.backing.id, device)
5575 )
5576 if len(avalible_devices) < need_devices:
5577 self.logger.debug(
5578 "Host {} don't have {} number of active devices".format(
5579 host, need_devices
5580 )
5581 )
5582 self.logger.debug(
5583 "found only {} devices {}".format(
5584 len(avalible_devices), avalible_devices
5585 )
5586 )
5587
5588 return None
5589 else:
5590 required_devices = avalible_devices[:need_devices]
5591 self.logger.info(
5592 "Found {} PCI devices on host {} but required only {}".format(
5593 len(avalible_devices), host, need_devices
5594 )
5595 )
5596 self.logger.info(
5597 "Retruning {} devices as {}".format(need_devices, required_devices)
5598 )
5599
5600 return required_devices
5601 except Exception as exp:
5602 self.logger.error(
5603 "Error {} occurred while finding pci devices on host: {}".format(
5604 exp, host
5605 )
5606 )
5607
5608 return None
5609
5610 def get_host_and_PCIdevices(self, content, need_devices):
5611 """
5612 Method to get the details of pci devices infromation on all hosts
5613
5614 Args:
5615 content - vSphere host object
5616 need_devices - number of pci devices needed on host
5617
5618 Returns:
5619 array of pci devices and host object
5620 """
5621 host_obj = None
5622 pci_device_objs = None
5623
5624 try:
5625 if content:
5626 container = content.viewManager.CreateContainerView(
5627 content.rootFolder, [vim.HostSystem], True
5628 )
5629 for host in container.view:
5630 devices = self.get_pci_devices(host, need_devices)
5631
5632 if devices:
5633 host_obj = host
5634 pci_device_objs = devices
5635 break
5636 except Exception as exp:
5637 self.logger.error(
5638 "Error {} occurred while finding pci devices on host: {}".format(
5639 exp, host_obj
5640 )
5641 )
5642
5643 return host_obj, pci_device_objs
5644
5645 def relocate_vm(self, dest_host, vm):
5646 """
5647 Method to get the relocate VM to new host
5648
5649 Args:
5650 dest_host - vSphere host object
5651 vm - vSphere VM object
5652
5653 Returns:
5654 task object
5655 """
5656 task = None
5657
5658 try:
5659 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
5660 task = vm.Relocate(relocate_spec)
5661 self.logger.info(
5662 "Migrating {} to destination host {}".format(vm, dest_host)
5663 )
5664 except Exception as exp:
5665 self.logger.error(
5666 "Error occurred while relocate VM {} to new host {}: {}".format(
5667 dest_host, vm, exp
5668 )
5669 )
5670
5671 return task
5672
5673 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
5674 """
5675 Waits and provides updates on a vSphere task
5676 """
5677 while task.info.state == vim.TaskInfo.State.running:
5678 time.sleep(2)
5679
5680 if task.info.state == vim.TaskInfo.State.success:
5681 if task.info.result is not None and not hideResult:
5682 self.logger.info(
5683 "{} completed successfully, result: {}".format(
5684 actionName, task.info.result
5685 )
5686 )
5687 else:
5688 self.logger.info("Task {} completed successfully.".format(actionName))
5689 else:
5690 self.logger.error(
5691 "{} did not complete successfully: {} ".format(
5692 actionName, task.info.error
5693 )
5694 )
5695
5696 return task.info.result
5697
5698 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
5699 """
5700 Method to add pci device in given VM
5701
5702 Args:
5703 host_object - vSphere host object
5704 vm_object - vSphere VM object
5705 host_pci_dev - host_pci_dev must be one of the devices from the
5706 host_object.hardware.pciDevice list
5707 which is configured as a PCI passthrough device
5708
5709 Returns:
5710 task object
5711 """
5712 task = None
5713
5714 if vm_object and host_object and host_pci_dev:
5715 try:
5716 # Add PCI device to VM
5717 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
5718 host=None
5719 ).pciPassthrough
5720 systemid_by_pciid = {
5721 item.pciDevice.id: item.systemId for item in pci_passthroughs
5722 }
5723
5724 if host_pci_dev.id not in systemid_by_pciid:
5725 self.logger.error(
5726 "Device {} is not a passthrough device ".format(host_pci_dev)
5727 )
5728 return None
5729
5730 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
5731 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
5732 deviceId=deviceId,
5733 id=host_pci_dev.id,
5734 systemId=systemid_by_pciid[host_pci_dev.id],
5735 vendorId=host_pci_dev.vendorId,
5736 deviceName=host_pci_dev.deviceName,
5737 )
5738
5739 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
5740 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
5741 new_device_config.operation = "add"
5742 vmConfigSpec = vim.vm.ConfigSpec()
5743 vmConfigSpec.deviceChange = [new_device_config]
5744 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
5745 self.logger.info(
5746 "Adding PCI device {} into VM {} from host {} ".format(
5747 host_pci_dev, vm_object, host_object
5748 )
5749 )
5750 except Exception as exp:
5751 self.logger.error(
5752 "Error occurred while adding pci devive {} to VM {}: {}".format(
5753 host_pci_dev, vm_object, exp
5754 )
5755 )
5756
5757 return task
5758
5759 def get_vm_vcenter_info(self):
5760 """
5761 Method to get details of vCenter and vm
5762
5763 Args:
5764 vapp_uuid - uuid of vApp or VM
5765
5766 Returns:
5767 Moref Id of VM and deails of vCenter
5768 """
5769 vm_vcenter_info = {}
5770
5771 if self.vcenter_ip is not None:
5772 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
5773 else:
5774 raise vimconn.VimConnException(
5775 message="vCenter IP is not provided."
5776 " Please provide vCenter IP while attaching datacenter "
5777 "to tenant in --config"
5778 )
5779
5780 if self.vcenter_port is not None:
5781 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
5782 else:
5783 raise vimconn.VimConnException(
5784 message="vCenter port is not provided."
5785 " Please provide vCenter port while attaching datacenter "
5786 "to tenant in --config"
5787 )
5788
5789 if self.vcenter_user is not None:
5790 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
5791 else:
5792 raise vimconn.VimConnException(
5793 message="vCenter user is not provided."
5794 " Please provide vCenter user while attaching datacenter "
5795 "to tenant in --config"
5796 )
5797
5798 if self.vcenter_password is not None:
5799 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
5800 else:
5801 raise vimconn.VimConnException(
5802 message="vCenter user password is not provided."
5803 " Please provide vCenter user password while attaching datacenter "
5804 "to tenant in --config"
5805 )
5806
5807 return vm_vcenter_info
5808
5809 def get_vm_pci_details(self, vmuuid):
5810 """
5811 Method to get VM PCI device details from vCenter
5812
5813 Args:
5814 vm_obj - vSphere VM object
5815
5816 Returns:
5817 dict of PCI devives attached to VM
5818
5819 """
5820 vm_pci_devices_info = {}
5821
5822 try:
5823 _, content = self.get_vcenter_content()
5824 vm_moref_id = self.get_vm_moref_id(vmuuid)
5825 if vm_moref_id:
5826 # Get VM and its host
5827 if content:
5828 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5829 if host_obj and vm_obj:
5830 vm_pci_devices_info["host_name"] = host_obj.name
5831 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
5832 0
5833 ].spec.ip.ipAddress
5834
5835 for device in vm_obj.config.hardware.device:
5836 if type(device) == vim.vm.device.VirtualPCIPassthrough:
5837 device_details = {
5838 "devide_id": device.backing.id,
5839 "pciSlotNumber": device.slotInfo.pciSlotNumber,
5840 }
5841 vm_pci_devices_info[
5842 device.deviceInfo.label
5843 ] = device_details
5844 else:
5845 self.logger.error(
5846 "Can not connect to vCenter while getting "
5847 "PCI devices infromationn"
5848 )
5849
5850 return vm_pci_devices_info
5851 except Exception as exp:
5852 self.logger.error(
5853 "Error occurred while getting VM information" " for VM : {}".format(exp)
5854 )
5855
5856 raise vimconn.VimConnException(message=exp)
5857
5858 def reserve_memory_for_all_vms(self, vapp, memory_mb):
5859 """
5860 Method to reserve memory for all VMs
5861 Args :
5862 vapp - VApp
5863 memory_mb - Memory in MB
5864 Returns:
5865 None
5866 """
5867 self.logger.info("Reserve memory for all VMs")
5868
5869 for vms in vapp.get_all_vms():
5870 vm_id = vms.get("id").split(":")[-1]
5871 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
5872 self.url, vm_id
5873 )
5874 headers = {
5875 "Accept": "application/*+xml;version=" + API_VERSION,
5876 "x-vcloud-authorization": self.client._session.headers[
5877 "x-vcloud-authorization"
5878 ],
5879 }
5880 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
5881 response = self.perform_request(
5882 req_type="GET", url=url_rest_call, headers=headers
5883 )
5884
5885 if response.status_code == 403:
5886 response = self.retry_rest("GET", url_rest_call)
5887
5888 if response.status_code != 200:
5889 self.logger.error(
5890 "REST call {} failed reason : {}"
5891 "status code : {}".format(
5892 url_rest_call, response.text, response.status_code
5893 )
5894 )
5895 raise vimconn.VimConnException(
5896 "reserve_memory_for_all_vms : Failed to get " "memory"
5897 )
5898
5899 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
5900 contentelem = lxmlElementTree.XML(bytexml)
5901 namespaces = {
5902 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
5903 }
5904 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5905
5906 # Find the reservation element in the response
5907 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
5908 for memelem in memelem_list:
5909 memelem.text = str(memory_mb)
5910
5911 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
5912
5913 response = self.perform_request(
5914 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
5915 )
5916
5917 if response.status_code == 403:
5918 add_headers = {"Content-Type": headers["Content-Type"]}
5919 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
5920
5921 if response.status_code != 202:
5922 self.logger.error(
5923 "REST call {} failed reason : {}"
5924 "status code : {} ".format(
5925 url_rest_call, response.text, response.status_code
5926 )
5927 )
5928 raise vimconn.VimConnException(
5929 "reserve_memory_for_all_vms : Failed to update "
5930 "virtual hardware memory section"
5931 )
5932 else:
5933 mem_task = self.get_task_from_response(response.text)
5934 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
5935
5936 if result.get("status") == "success":
5937 self.logger.info(
5938 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
5939 )
5940 else:
5941 self.logger.error(
5942 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
5943 )
5944
5945 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
5946 """
5947 Configure VApp network config with org vdc network
5948 Args :
5949 vapp - VApp
5950 Returns:
5951 None
5952 """
5953
5954 self.logger.info(
5955 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
5956 )
5957
5958 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
5959 self.url, vapp_id
5960 )
5961
5962 headers = {
5963 "Accept": "application/*+xml;version=" + API_VERSION,
5964 "x-vcloud-authorization": self.client._session.headers[
5965 "x-vcloud-authorization"
5966 ],
5967 }
5968 response = self.perform_request(
5969 req_type="GET", url=url_rest_call, headers=headers
5970 )
5971
5972 if response.status_code == 403:
5973 response = self.retry_rest("GET", url_rest_call)
5974
5975 if response.status_code != 200:
5976 self.logger.error(
5977 "REST call {} failed reason : {}"
5978 "status code : {}".format(
5979 url_rest_call, response.text, response.status_code
5980 )
5981 )
5982 raise vimconn.VimConnException(
5983 "connect_vapp_to_org_vdc_network : Failed to get "
5984 "network config section"
5985 )
5986
5987 data = response.text
5988 headers[
5989 "Content-Type"
5990 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
5991 net_id = self.get_network_id_by_name(net_name)
5992 if not net_id:
5993 raise vimconn.VimConnException(
5994 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
5995 )
5996
5997 bytexml = bytes(bytearray(data, encoding="utf-8"))
5998 newelem = lxmlElementTree.XML(bytexml)
5999 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6000 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6001 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6002
6003 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6004 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6005 if parentnetworklist:
6006 for pn in parentnetworklist:
6007 if "href" not in pn.keys():
6008 id_val = pn.get("id")
6009 href_val = "{}/api/network/{}".format(self.url, id_val)
6010 pn.set("href", href_val)
6011
6012 newstr = """<NetworkConfig networkName="{}">
6013 <Configuration>
6014 <ParentNetwork href="{}/api/network/{}"/>
6015 <FenceMode>bridged</FenceMode>
6016 </Configuration>
6017 </NetworkConfig>
6018 """.format(
6019 net_name, self.url, net_id
6020 )
6021 newcfgelem = lxmlElementTree.fromstring(newstr)
6022 if nwcfglist:
6023 nwcfglist[0].addnext(newcfgelem)
6024
6025 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6026
6027 response = self.perform_request(
6028 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6029 )
6030
6031 if response.status_code == 403:
6032 add_headers = {"Content-Type": headers["Content-Type"]}
6033 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6034
6035 if response.status_code != 202:
6036 self.logger.error(
6037 "REST call {} failed reason : {}"
6038 "status code : {} ".format(
6039 url_rest_call, response.text, response.status_code
6040 )
6041 )
6042 raise vimconn.VimConnException(
6043 "connect_vapp_to_org_vdc_network : Failed to update "
6044 "network config section"
6045 )
6046 else:
6047 vapp_task = self.get_task_from_response(response.text)
6048 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6049 if result.get("status") == "success":
6050 self.logger.info(
6051 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6052 "network {}".format(vapp_id, net_name)
6053 )
6054 else:
6055 self.logger.error(
6056 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6057 "connect to network {}".format(vapp_id, net_name)
6058 )
6059
6060 def remove_primary_network_adapter_from_all_vms(self, vapp):
6061 """
6062 Method to remove network adapter type to vm
6063 Args :
6064 vapp - VApp
6065 Returns:
6066 None
6067 """
6068 self.logger.info("Removing network adapter from all VMs")
6069
6070 for vms in vapp.get_all_vms():
6071 vm_id = vms.get("id").split(":")[-1]
6072
6073 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6074 self.url, vm_id
6075 )
6076
6077 headers = {
6078 "Accept": "application/*+xml;version=" + API_VERSION,
6079 "x-vcloud-authorization": self.client._session.headers[
6080 "x-vcloud-authorization"
6081 ],
6082 }
6083 response = self.perform_request(
6084 req_type="GET", url=url_rest_call, headers=headers
6085 )
6086
6087 if response.status_code == 403:
6088 response = self.retry_rest("GET", url_rest_call)
6089
6090 if response.status_code != 200:
6091 self.logger.error(
6092 "REST call {} failed reason : {}"
6093 "status code : {}".format(
6094 url_rest_call, response.text, response.status_code
6095 )
6096 )
6097 raise vimconn.VimConnException(
6098 "remove_primary_network_adapter : Failed to get "
6099 "network connection section"
6100 )
6101
6102 data = response.text
6103 data = data.split('<Link rel="edit"')[0]
6104
6105 headers[
6106 "Content-Type"
6107 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6108
6109 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6110 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6111 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6112 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6113 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6114 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6115 xmlns:vmw="http://www.vmware.com/schema/ovf"
6116 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6117 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6118 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6119 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6120 ovf:required="false">
6121 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6122 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6123 <Link rel="edit" href="{url}"
6124 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6125 </NetworkConnectionSection>""".format(
6126 url=url_rest_call
6127 )
6128 response = self.perform_request(
6129 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6130 )
6131
6132 if response.status_code == 403:
6133 add_headers = {"Content-Type": headers["Content-Type"]}
6134 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6135
6136 if response.status_code != 202:
6137 self.logger.error(
6138 "REST call {} failed reason : {}"
6139 "status code : {} ".format(
6140 url_rest_call, response.text, response.status_code
6141 )
6142 )
6143 raise vimconn.VimConnException(
6144 "remove_primary_network_adapter : Failed to update "
6145 "network connection section"
6146 )
6147 else:
6148 nic_task = self.get_task_from_response(response.text)
6149 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6150 if result.get("status") == "success":
6151 self.logger.info(
6152 "remove_primary_network_adapter(): VM {} conneced to "
6153 "default NIC type".format(vm_id)
6154 )
6155 else:
6156 self.logger.error(
6157 "remove_primary_network_adapter(): VM {} failed to "
6158 "connect NIC type".format(vm_id)
6159 )
6160
6161 def add_network_adapter_to_vms(
6162 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6163 ):
6164 """
6165 Method to add network adapter type to vm
6166 Args :
6167 network_name - name of network
6168 primary_nic_index - int value for primary nic index
6169 nicIndex - int value for nic index
6170 nic_type - specify model name to which add to vm
6171 Returns:
6172 None
6173 """
6174
6175 self.logger.info(
6176 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6177 network_name, nicIndex, nic_type
6178 )
6179 )
6180 try:
6181 ip_address = None
6182 floating_ip = False
6183 mac_address = None
6184 if "floating_ip" in net:
6185 floating_ip = net["floating_ip"]
6186
6187 # Stub for ip_address feature
6188 if "ip_address" in net:
6189 ip_address = net["ip_address"]
6190
6191 if "mac_address" in net:
6192 mac_address = net["mac_address"]
6193
6194 if floating_ip:
6195 allocation_mode = "POOL"
6196 elif ip_address:
6197 allocation_mode = "MANUAL"
6198 else:
6199 allocation_mode = "DHCP"
6200
6201 if not nic_type:
6202 for vms in vapp.get_all_vms():
6203 vm_id = vms.get("id").split(":")[-1]
6204
6205 url_rest_call = (
6206 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6207 self.url, vm_id
6208 )
6209 )
6210
6211 headers = {
6212 "Accept": "application/*+xml;version=" + API_VERSION,
6213 "x-vcloud-authorization": self.client._session.headers[
6214 "x-vcloud-authorization"
6215 ],
6216 }
6217 response = self.perform_request(
6218 req_type="GET", url=url_rest_call, headers=headers
6219 )
6220
6221 if response.status_code == 403:
6222 response = self.retry_rest("GET", url_rest_call)
6223
6224 if response.status_code != 200:
6225 self.logger.error(
6226 "REST call {} failed reason : {}"
6227 "status code : {}".format(
6228 url_rest_call, response.text, response.status_code
6229 )
6230 )
6231 raise vimconn.VimConnException(
6232 "add_network_adapter_to_vms : Failed to get "
6233 "network connection section"
6234 )
6235
6236 data = response.text
6237 data = data.split('<Link rel="edit"')[0]
6238 if "<PrimaryNetworkConnectionIndex>" not in data:
6239 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6240 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6241 <NetworkConnection network="{}">
6242 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6243 <IsConnected>true</IsConnected>
6244 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6245 </NetworkConnection>""".format(
6246 primary_nic_index, network_name, nicIndex, allocation_mode
6247 )
6248
6249 # Stub for ip_address feature
6250 if ip_address:
6251 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6252 item = item.replace(
6253 "</NetworkConnectionIndex>\n",
6254 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6255 )
6256
6257 if mac_address:
6258 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6259 item = item.replace(
6260 "</IsConnected>\n",
6261 "</IsConnected>\n{}\n".format(mac_tag),
6262 )
6263
6264 data = data.replace(
6265 "</ovf:Info>\n",
6266 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6267 )
6268 else:
6269 self.logger.debug("add_network_adapter PrimaryNIC in data")
6270 new_item = """<NetworkConnection network="{}">
6271 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6272 <IsConnected>true</IsConnected>
6273 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6274 </NetworkConnection>""".format(
6275 network_name, nicIndex, allocation_mode
6276 )
6277
6278 # Stub for ip_address feature
6279 if ip_address:
6280 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6281 new_item = new_item.replace(
6282 "</NetworkConnectionIndex>\n",
6283 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6284 )
6285
6286 if mac_address:
6287 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6288 new_item = new_item.replace(
6289 "</IsConnected>\n",
6290 "</IsConnected>\n{}\n".format(mac_tag),
6291 )
6292
6293 data = data + new_item + "</NetworkConnectionSection>"
6294
6295 headers[
6296 "Content-Type"
6297 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6298
6299 response = self.perform_request(
6300 req_type="PUT", url=url_rest_call, headers=headers, data=data
6301 )
6302
6303 if response.status_code == 403:
6304 add_headers = {"Content-Type": headers["Content-Type"]}
6305 response = self.retry_rest(
6306 "PUT", url_rest_call, add_headers, data
6307 )
6308
6309 if response.status_code != 202:
6310 self.logger.error(
6311 "REST call {} failed reason : {}"
6312 "status code : {} ".format(
6313 url_rest_call, response.text, response.status_code
6314 )
6315 )
6316 raise vimconn.VimConnException(
6317 "add_network_adapter_to_vms : Failed to update "
6318 "network connection section"
6319 )
6320 else:
6321 nic_task = self.get_task_from_response(response.text)
6322 result = self.client.get_task_monitor().wait_for_success(
6323 task=nic_task
6324 )
6325
6326 if result.get("status") == "success":
6327 self.logger.info(
6328 "add_network_adapter_to_vms(): VM {} conneced to "
6329 "default NIC type".format(vm_id)
6330 )
6331 else:
6332 self.logger.error(
6333 "add_network_adapter_to_vms(): VM {} failed to "
6334 "connect NIC type".format(vm_id)
6335 )
6336 else:
6337 for vms in vapp.get_all_vms():
6338 vm_id = vms.get("id").split(":")[-1]
6339
6340 url_rest_call = (
6341 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6342 self.url, vm_id
6343 )
6344 )
6345
6346 headers = {
6347 "Accept": "application/*+xml;version=" + API_VERSION,
6348 "x-vcloud-authorization": self.client._session.headers[
6349 "x-vcloud-authorization"
6350 ],
6351 }
6352 response = self.perform_request(
6353 req_type="GET", url=url_rest_call, headers=headers
6354 )
6355
6356 if response.status_code == 403:
6357 response = self.retry_rest("GET", url_rest_call)
6358
6359 if response.status_code != 200:
6360 self.logger.error(
6361 "REST call {} failed reason : {}"
6362 "status code : {}".format(
6363 url_rest_call, response.text, response.status_code
6364 )
6365 )
6366 raise vimconn.VimConnException(
6367 "add_network_adapter_to_vms : Failed to get "
6368 "network connection section"
6369 )
6370 data = response.text
6371 data = data.split('<Link rel="edit"')[0]
6372 vcd_netadapter_type = nic_type
6373
6374 if nic_type in ["SR-IOV", "VF"]:
6375 vcd_netadapter_type = "SRIOVETHERNETCARD"
6376
6377 if "<PrimaryNetworkConnectionIndex>" not in data:
6378 self.logger.debug(
6379 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6380 nic_type
6381 )
6382 )
6383 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6384 <NetworkConnection network="{}">
6385 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6386 <IsConnected>true</IsConnected>
6387 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6388 <NetworkAdapterType>{}</NetworkAdapterType>
6389 </NetworkConnection>""".format(
6390 primary_nic_index,
6391 network_name,
6392 nicIndex,
6393 allocation_mode,
6394 vcd_netadapter_type,
6395 )
6396
6397 # Stub for ip_address feature
6398 if ip_address:
6399 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6400 item = item.replace(
6401 "</NetworkConnectionIndex>\n",
6402 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6403 )
6404
6405 if mac_address:
6406 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6407 item = item.replace(
6408 "</IsConnected>\n",
6409 "</IsConnected>\n{}\n".format(mac_tag),
6410 )
6411
6412 data = data.replace(
6413 "</ovf:Info>\n",
6414 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6415 )
6416 else:
6417 self.logger.debug(
6418 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6419 nic_type
6420 )
6421 )
6422 new_item = """<NetworkConnection network="{}">
6423 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6424 <IsConnected>true</IsConnected>
6425 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6426 <NetworkAdapterType>{}</NetworkAdapterType>
6427 </NetworkConnection>""".format(
6428 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6429 )
6430
6431 # Stub for ip_address feature
6432 if ip_address:
6433 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6434 new_item = new_item.replace(
6435 "</NetworkConnectionIndex>\n",
6436 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6437 )
6438
6439 if mac_address:
6440 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6441 new_item = new_item.replace(
6442 "</IsConnected>\n",
6443 "</IsConnected>\n{}\n".format(mac_tag),
6444 )
6445
6446 data = data + new_item + "</NetworkConnectionSection>"
6447
6448 headers[
6449 "Content-Type"
6450 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6451
6452 response = self.perform_request(
6453 req_type="PUT", url=url_rest_call, headers=headers, data=data
6454 )
6455
6456 if response.status_code == 403:
6457 add_headers = {"Content-Type": headers["Content-Type"]}
6458 response = self.retry_rest(
6459 "PUT", url_rest_call, add_headers, data
6460 )
6461
6462 if response.status_code != 202:
6463 self.logger.error(
6464 "REST call {} failed reason : {}"
6465 "status code : {}".format(
6466 url_rest_call, response.text, response.status_code
6467 )
6468 )
6469 raise vimconn.VimConnException(
6470 "add_network_adapter_to_vms : Failed to update "
6471 "network connection section"
6472 )
6473 else:
6474 nic_task = self.get_task_from_response(response.text)
6475 result = self.client.get_task_monitor().wait_for_success(
6476 task=nic_task
6477 )
6478
6479 if result.get("status") == "success":
6480 self.logger.info(
6481 "add_network_adapter_to_vms(): VM {} "
6482 "conneced to NIC type {}".format(vm_id, nic_type)
6483 )
6484 else:
6485 self.logger.error(
6486 "add_network_adapter_to_vms(): VM {} "
6487 "failed to connect NIC type {}".format(vm_id, nic_type)
6488 )
6489 except Exception as exp:
6490 self.logger.error(
6491 "add_network_adapter_to_vms() : exception occurred "
6492 "while adding Network adapter"
6493 )
6494
6495 raise vimconn.VimConnException(message=exp)
6496
6497 def set_numa_affinity(self, vmuuid, paired_threads_id):
6498 """
6499 Method to assign numa affinity in vm configuration parammeters
6500 Args :
6501 vmuuid - vm uuid
6502 paired_threads_id - one or more virtual processor
6503 numbers
6504 Returns:
6505 return if True
6506 """
6507 try:
6508 vcenter_conect, content = self.get_vcenter_content()
6509 vm_moref_id = self.get_vm_moref_id(vmuuid)
6510 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
6511
6512 if vm_obj:
6513 config_spec = vim.vm.ConfigSpec()
6514 config_spec.extraConfig = []
6515 opt = vim.option.OptionValue()
6516 opt.key = "numa.nodeAffinity"
6517 opt.value = str(paired_threads_id)
6518 config_spec.extraConfig.append(opt)
6519 task = vm_obj.ReconfigVM_Task(config_spec)
6520
6521 if task:
6522 self.wait_for_vcenter_task(task, vcenter_conect)
6523 extra_config = vm_obj.config.extraConfig
6524 flag = False
6525
6526 for opts in extra_config:
6527 if "numa.nodeAffinity" in opts.key:
6528 flag = True
6529 self.logger.info(
6530 "set_numa_affinity: Sucessfully assign numa affinity "
6531 "value {} for vm {}".format(opt.value, vm_obj)
6532 )
6533
6534 if flag:
6535 return
6536 else:
6537 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
6538 except Exception as exp:
6539 self.logger.error(
6540 "set_numa_affinity : exception occurred while setting numa affinity "
6541 "for VM {} : {}".format(vm_obj, vm_moref_id)
6542 )
6543
6544 raise vimconn.VimConnException(
6545 "set_numa_affinity : Error {} failed to assign numa "
6546 "affinity".format(exp)
6547 )
6548
6549 def add_new_disk(self, vapp_uuid, disk_size):
6550 """
6551 Method to create an empty vm disk
6552
6553 Args:
6554 vapp_uuid - is vapp identifier.
6555 disk_size - size of disk to be created in GB
6556
6557 Returns:
6558 None
6559 """
6560 status = False
6561 vm_details = None
6562 try:
6563 # Disk size in GB, convert it into MB
6564 if disk_size is not None:
6565 disk_size_mb = int(disk_size) * 1024
6566 vm_details = self.get_vapp_details_rest(vapp_uuid)
6567
6568 if vm_details and "vm_virtual_hardware" in vm_details:
6569 self.logger.info(
6570 "Adding disk to VM: {} disk size:{}GB".format(
6571 vm_details["name"], disk_size
6572 )
6573 )
6574 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
6575 status = self.add_new_disk_rest(disk_href, disk_size_mb)
6576 except Exception as exp:
6577 msg = "Error occurred while creating new disk {}.".format(exp)
6578 self.rollback_newvm(vapp_uuid, msg)
6579
6580 if status:
6581 self.logger.info(
6582 "Added new disk to VM: {} disk size:{}GB".format(
6583 vm_details["name"], disk_size
6584 )
6585 )
6586 else:
6587 # If failed to add disk, delete VM
6588 msg = "add_new_disk: Failed to add new disk to {}".format(
6589 vm_details["name"]
6590 )
6591 self.rollback_newvm(vapp_uuid, msg)
6592
6593 def add_new_disk_rest(self, disk_href, disk_size_mb):
6594 """
6595 Retrives vApp Disks section & add new empty disk
6596
6597 Args:
6598 disk_href: Disk section href to addd disk
6599 disk_size_mb: Disk size in MB
6600
6601 Returns: Status of add new disk task
6602 """
6603 status = False
6604 if self.client._session:
6605 headers = {
6606 "Accept": "application/*+xml;version=" + API_VERSION,
6607 "x-vcloud-authorization": self.client._session.headers[
6608 "x-vcloud-authorization"
6609 ],
6610 }
6611 response = self.perform_request(
6612 req_type="GET", url=disk_href, headers=headers
6613 )
6614
6615 if response.status_code == 403:
6616 response = self.retry_rest("GET", disk_href)
6617
6618 if response.status_code != requests.codes.ok:
6619 self.logger.error(
6620 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
6621 disk_href, response.status_code
6622 )
6623 )
6624
6625 return status
6626
6627 try:
6628 # Find but type & max of instance IDs assigned to disks
6629 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
6630 namespaces = {
6631 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
6632 }
6633 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6634 instance_id = 0
6635
6636 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
6637 if item.find("rasd:Description", namespaces).text == "Hard disk":
6638 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
6639
6640 if inst_id > instance_id:
6641 instance_id = inst_id
6642 disk_item = item.find("rasd:HostResource", namespaces)
6643 bus_subtype = disk_item.attrib[
6644 "{" + namespaces["xmlns"] + "}busSubType"
6645 ]
6646 bus_type = disk_item.attrib[
6647 "{" + namespaces["xmlns"] + "}busType"
6648 ]
6649
6650 instance_id = instance_id + 1
6651 new_item = """<Item>
6652 <rasd:Description>Hard disk</rasd:Description>
6653 <rasd:ElementName>New disk</rasd:ElementName>
6654 <rasd:HostResource
6655 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
6656 vcloud:capacity="{}"
6657 vcloud:busSubType="{}"
6658 vcloud:busType="{}"></rasd:HostResource>
6659 <rasd:InstanceID>{}</rasd:InstanceID>
6660 <rasd:ResourceType>17</rasd:ResourceType>
6661 </Item>""".format(
6662 disk_size_mb, bus_subtype, bus_type, instance_id
6663 )
6664
6665 new_data = response.text
6666 # Add new item at the bottom
6667 new_data = new_data.replace(
6668 "</Item>\n</RasdItemsList>",
6669 "</Item>\n{}\n</RasdItemsList>".format(new_item),
6670 )
6671
6672 # Send PUT request to modify virtual hardware section with new disk
6673 headers[
6674 "Content-Type"
6675 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
6676
6677 response = self.perform_request(
6678 req_type="PUT", url=disk_href, data=new_data, headers=headers
6679 )
6680
6681 if response.status_code == 403:
6682 add_headers = {"Content-Type": headers["Content-Type"]}
6683 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
6684
6685 if response.status_code != 202:
6686 self.logger.error(
6687 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
6688 disk_href, response.status_code, response.text
6689 )
6690 )
6691 else:
6692 add_disk_task = self.get_task_from_response(response.text)
6693 result = self.client.get_task_monitor().wait_for_success(
6694 task=add_disk_task
6695 )
6696
6697 if result.get("status") == "success":
6698 status = True
6699 else:
6700 self.logger.error(
6701 "Add new disk REST task failed to add {} MB disk".format(
6702 disk_size_mb
6703 )
6704 )
6705 except Exception as exp:
6706 self.logger.error(
6707 "Error occurred calling rest api for creating new disk {}".format(exp)
6708 )
6709
6710 return status
6711
6712 def add_existing_disk(
6713 self,
6714 catalogs=None,
6715 image_id=None,
6716 size=None,
6717 template_name=None,
6718 vapp_uuid=None,
6719 ):
6720 """
6721 Method to add existing disk to vm
6722 Args :
6723 catalogs - List of VDC catalogs
6724 image_id - Catalog ID
6725 template_name - Name of template in catalog
6726 vapp_uuid - UUID of vApp
6727 Returns:
6728 None
6729 """
6730 disk_info = None
6731 vcenter_conect, content = self.get_vcenter_content()
6732 # find moref-id of vm in image
6733 catalog_vm_info = self.get_vapp_template_details(
6734 catalogs=catalogs,
6735 image_id=image_id,
6736 )
6737
6738 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
6739 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
6740 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
6741 "vm_moref_id", None
6742 )
6743
6744 if catalog_vm_moref_id:
6745 self.logger.info(
6746 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
6747 )
6748 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
6749
6750 if catalog_vm_obj:
6751 # find existing disk
6752 disk_info = self.find_disk(catalog_vm_obj)
6753 else:
6754 exp_msg = "No VM with image id {} found".format(image_id)
6755 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
6756 else:
6757 exp_msg = "No Image found with image ID {} ".format(image_id)
6758 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
6759
6760 if disk_info:
6761 self.logger.info("Existing disk_info : {}".format(disk_info))
6762 # get VM
6763 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
6764 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
6765
6766 if vm_obj:
6767 status = self.add_disk(
6768 vcenter_conect=vcenter_conect,
6769 vm=vm_obj,
6770 disk_info=disk_info,
6771 size=size,
6772 vapp_uuid=vapp_uuid,
6773 )
6774
6775 if status:
6776 self.logger.info(
6777 "Disk from image id {} added to {}".format(
6778 image_id, vm_obj.config.name
6779 )
6780 )
6781 else:
6782 msg = "No disk found with image id {} to add in VM {}".format(
6783 image_id, vm_obj.config.name
6784 )
6785 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
6786
6787 def find_disk(self, vm_obj):
6788 """
6789 Method to find details of existing disk in VM
6790 Args:
6791 vm_obj - vCenter object of VM
6792 Returns:
6793 disk_info : dict of disk details
6794 """
6795 disk_info = {}
6796 if vm_obj:
6797 try:
6798 devices = vm_obj.config.hardware.device
6799
6800 for device in devices:
6801 if type(device) is vim.vm.device.VirtualDisk:
6802 if isinstance(
6803 device.backing,
6804 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
6805 ) and hasattr(device.backing, "fileName"):
6806 disk_info["full_path"] = device.backing.fileName
6807 disk_info["datastore"] = device.backing.datastore
6808 disk_info["capacityKB"] = device.capacityInKB
6809 break
6810 except Exception as exp:
6811 self.logger.error(
6812 "find_disk() : exception occurred while "
6813 "getting existing disk details :{}".format(exp)
6814 )
6815
6816 return disk_info
6817
6818 def add_disk(
6819 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
6820 ):
6821 """
6822 Method to add existing disk in VM
6823 Args :
6824 vcenter_conect - vCenter content object
6825 vm - vCenter vm object
6826 disk_info : dict of disk details
6827 Returns:
6828 status : status of add disk task
6829 """
6830 datastore = disk_info["datastore"] if "datastore" in disk_info else None
6831 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
6832 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
6833 if size is not None:
6834 # Convert size from GB to KB
6835 sizeKB = int(size) * 1024 * 1024
6836 # compare size of existing disk and user given size.Assign whicherver is greater
6837 self.logger.info(
6838 "Add Existing disk : sizeKB {} , capacityKB {}".format(
6839 sizeKB, capacityKB
6840 )
6841 )
6842
6843 if sizeKB > capacityKB:
6844 capacityKB = sizeKB
6845
6846 if datastore and fullpath and capacityKB:
6847 try:
6848 spec = vim.vm.ConfigSpec()
6849 # get all disks on a VM, set unit_number to the next available
6850 unit_number = 0
6851 for dev in vm.config.hardware.device:
6852 if hasattr(dev.backing, "fileName"):
6853 unit_number = int(dev.unitNumber) + 1
6854 # unit_number 7 reserved for scsi controller
6855
6856 if unit_number == 7:
6857 unit_number += 1
6858
6859 if isinstance(dev, vim.vm.device.VirtualDisk):
6860 # vim.vm.device.VirtualSCSIController
6861 controller_key = dev.controllerKey
6862
6863 self.logger.info(
6864 "Add Existing disk : unit number {} , controller key {}".format(
6865 unit_number, controller_key
6866 )
6867 )
6868 # add disk here
6869 dev_changes = []
6870 disk_spec = vim.vm.device.VirtualDeviceSpec()
6871 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6872 disk_spec.device = vim.vm.device.VirtualDisk()
6873 disk_spec.device.backing = (
6874 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
6875 )
6876 disk_spec.device.backing.thinProvisioned = True
6877 disk_spec.device.backing.diskMode = "persistent"
6878 disk_spec.device.backing.datastore = datastore
6879 disk_spec.device.backing.fileName = fullpath
6880
6881 disk_spec.device.unitNumber = unit_number
6882 disk_spec.device.capacityInKB = capacityKB
6883 disk_spec.device.controllerKey = controller_key
6884 dev_changes.append(disk_spec)
6885 spec.deviceChange = dev_changes
6886 task = vm.ReconfigVM_Task(spec=spec)
6887 status = self.wait_for_vcenter_task(task, vcenter_conect)
6888
6889 return status
6890 except Exception as exp:
6891 exp_msg = (
6892 "add_disk() : exception {} occurred while adding disk "
6893 "{} to vm {}".format(exp, fullpath, vm.config.name)
6894 )
6895 self.rollback_newvm(vapp_uuid, exp_msg)
6896 else:
6897 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
6898 disk_info
6899 )
6900 self.rollback_newvm(vapp_uuid, msg)
6901
6902 def get_vcenter_content(self):
6903 """
6904 Get the vsphere content object
6905 """
6906 try:
6907 vm_vcenter_info = self.get_vm_vcenter_info()
6908 except Exception as exp:
6909 self.logger.error(
6910 "Error occurred while getting vCenter infromationn"
6911 " for VM : {}".format(exp)
6912 )
6913
6914 raise vimconn.VimConnException(message=exp)
6915
6916 context = None
6917 if hasattr(ssl, "_create_unverified_context"):
6918 context = ssl._create_unverified_context()
6919
6920 vcenter_conect = SmartConnect(
6921 host=vm_vcenter_info["vm_vcenter_ip"],
6922 user=vm_vcenter_info["vm_vcenter_user"],
6923 pwd=vm_vcenter_info["vm_vcenter_password"],
6924 port=int(vm_vcenter_info["vm_vcenter_port"]),
6925 sslContext=context,
6926 )
6927 atexit.register(Disconnect, vcenter_conect)
6928 content = vcenter_conect.RetrieveContent()
6929
6930 return vcenter_conect, content
6931
6932 def get_vm_moref_id(self, vapp_uuid):
6933 """
6934 Get the moref_id of given VM
6935 """
6936 try:
6937 if vapp_uuid:
6938 vm_details = self.get_vapp_details_rest(
6939 vapp_uuid, need_admin_access=True
6940 )
6941
6942 if vm_details and "vm_vcenter_info" in vm_details:
6943 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
6944
6945 return vm_moref_id
6946 except Exception as exp:
6947 self.logger.error(
6948 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
6949 )
6950
6951 return None
6952
6953 def get_vapp_template_details(
6954 self, catalogs=None, image_id=None, template_name=None
6955 ):
6956 """
6957 Method to get vApp template details
6958 Args :
6959 catalogs - list of VDC catalogs
6960 image_id - Catalog ID to find
6961 template_name : template name in catalog
6962 Returns:
6963 parsed_respond : dict of vApp tempalte details
6964 """
6965 parsed_response = {}
6966
6967 vca = self.connect_as_admin()
6968 if not vca:
6969 raise vimconn.VimConnConnectionException("Failed to connect vCD")
6970
6971 try:
6972 org, _ = self.get_vdc_details()
6973 catalog = self.get_catalog_obj(image_id, catalogs)
6974 if catalog:
6975 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
6976 catalog_items = [items.attrib]
6977
6978 if len(catalog_items) == 1:
6979 headers = {
6980 "Accept": "application/*+xml;version=" + API_VERSION,
6981 "x-vcloud-authorization": vca._session.headers[
6982 "x-vcloud-authorization"
6983 ],
6984 }
6985 response = self.perform_request(
6986 req_type="GET",
6987 url=catalog_items[0].get("href"),
6988 headers=headers,
6989 )
6990 catalogItem = XmlElementTree.fromstring(response.text)
6991 entity = [
6992 child
6993 for child in catalogItem
6994 if child.get("type")
6995 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
6996 ][0]
6997 vapp_tempalte_href = entity.get("href")
6998 # get vapp details and parse moref id
6999
7000 namespaces = {
7001 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7002 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7003 "vmw": "http://www.vmware.com/schema/ovf",
7004 "vm": "http://www.vmware.com/vcloud/v1.5",
7005 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7006 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7007 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7008 }
7009
7010 if vca._session:
7011 response = self.perform_request(
7012 req_type="GET", url=vapp_tempalte_href, headers=headers
7013 )
7014
7015 if response.status_code != requests.codes.ok:
7016 self.logger.debug(
7017 "REST API call {} failed. Return status code {}".format(
7018 vapp_tempalte_href, response.status_code
7019 )
7020 )
7021 else:
7022 xmlroot_respond = XmlElementTree.fromstring(response.text)
7023 children_section = xmlroot_respond.find(
7024 "vm:Children/", namespaces
7025 )
7026
7027 if children_section is not None:
7028 vCloud_extension_section = children_section.find(
7029 "xmlns:VCloudExtension", namespaces
7030 )
7031
7032 if vCloud_extension_section is not None:
7033 vm_vcenter_info = {}
7034 vim_info = vCloud_extension_section.find(
7035 "vmext:VmVimInfo", namespaces
7036 )
7037 vmext = vim_info.find(
7038 "vmext:VmVimObjectRef", namespaces
7039 )
7040
7041 if vmext is not None:
7042 vm_vcenter_info["vm_moref_id"] = vmext.find(
7043 "vmext:MoRef", namespaces
7044 ).text
7045
7046 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7047 except Exception as exp:
7048 self.logger.info(
7049 "Error occurred calling rest api for getting vApp details {}".format(
7050 exp
7051 )
7052 )
7053
7054 return parsed_response
7055
7056 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7057 """
7058 Method to delete vApp
7059 Args :
7060 vapp_uuid - vApp UUID
7061 msg - Error message to be logged
7062 exp_type : Exception type
7063 Returns:
7064 None
7065 """
7066 if vapp_uuid:
7067 self.delete_vminstance(vapp_uuid)
7068 else:
7069 msg = "No vApp ID"
7070
7071 self.logger.error(msg)
7072
7073 if exp_type == "Genric":
7074 raise vimconn.VimConnException(msg)
7075 elif exp_type == "NotFound":
7076 raise vimconn.VimConnNotFoundException(message=msg)
7077
7078 def get_sriov_devices(self, host, no_of_vfs):
7079 """
7080 Method to get the details of SRIOV devices on given host
7081 Args:
7082 host - vSphere host object
7083 no_of_vfs - number of VFs needed on host
7084
7085 Returns:
7086 array of SRIOV devices
7087 """
7088 sriovInfo = []
7089
7090 if host:
7091 for device in host.config.pciPassthruInfo:
7092 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7093 if device.numVirtualFunction >= no_of_vfs:
7094 sriovInfo.append(device)
7095 break
7096
7097 return sriovInfo
7098
7099 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
7100 """
7101 Method to reconfigure disributed virtual portgroup
7102
7103 Args:
7104 dvPort_group_name - name of disributed virtual portgroup
7105 content - vCenter content object
7106 config_info - disributed virtual portgroup configuration
7107
7108 Returns:
7109 task object
7110 """
7111 try:
7112 dvPort_group = self.get_dvport_group(dvPort_group_name)
7113
7114 if dvPort_group:
7115 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
7116 dv_pg_spec.configVersion = dvPort_group.config.configVersion
7117 dv_pg_spec.defaultPortConfig = (
7118 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
7119 )
7120
7121 if "vlanID" in config_info:
7122 dv_pg_spec.defaultPortConfig.vlan = (
7123 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
7124 )
7125 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
7126
7127 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
7128
7129 return task
7130 else:
7131 return None
7132 except Exception as exp:
7133 self.logger.error(
7134 "Error occurred while reconfiguraing disributed virtaul port group {}"
7135 " : {}".format(dvPort_group_name, exp)
7136 )
7137
7138 return None
7139
7140 def get_dvport_group(self, dvPort_group_name):
7141 """
7142 Method to get disributed virtual portgroup
7143
7144 Args:
7145 network_name - name of network/portgroup
7146
7147 Returns:
7148 portgroup object
7149 """
7150 _, content = self.get_vcenter_content()
7151 dvPort_group = None
7152
7153 try:
7154 container = content.viewManager.CreateContainerView(
7155 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
7156 )
7157
7158 for item in container.view:
7159 if item.key == dvPort_group_name:
7160 dvPort_group = item
7161 break
7162
7163 return dvPort_group
7164 except vmodl.MethodFault as exp:
7165 self.logger.error(
7166 "Caught vmodl fault {} for disributed virtual port group {}".format(
7167 exp, dvPort_group_name
7168 )
7169 )
7170
7171 return None
7172
7173 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
7174 """
7175 Method to get disributed virtual portgroup vlanID
7176
7177 Args:
7178 network_name - name of network/portgroup
7179
7180 Returns:
7181 vlan ID
7182 """
7183 vlanId = None
7184
7185 try:
7186 dvPort_group = self.get_dvport_group(dvPort_group_name)
7187
7188 if dvPort_group:
7189 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
7190 except vmodl.MethodFault as exp:
7191 self.logger.error(
7192 "Caught vmodl fault {} for disributed virtaul port group {}".format(
7193 exp, dvPort_group_name
7194 )
7195 )
7196
7197 return vlanId
7198
7199 def insert_media_to_vm(self, vapp, image_id):
7200 """
7201 Method to insert media CD-ROM (ISO image) from catalog to vm.
7202 vapp - vapp object to get vm id
7203 Image_id - image id for cdrom to be inerted to vm
7204 """
7205 # create connection object
7206 vca = self.connect()
7207 try:
7208 # fetching catalog details
7209 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
7210
7211 if vca._session:
7212 headers = {
7213 "Accept": "application/*+xml;version=" + API_VERSION,
7214 "x-vcloud-authorization": vca._session.headers[
7215 "x-vcloud-authorization"
7216 ],
7217 }
7218 response = self.perform_request(
7219 req_type="GET", url=rest_url, headers=headers
7220 )
7221
7222 if response.status_code != 200:
7223 self.logger.error(
7224 "REST call {} failed reason : {}"
7225 "status code : {}".format(
7226 rest_url, response.text, response.status_code
7227 )
7228 )
7229
7230 raise vimconn.VimConnException(
7231 "insert_media_to_vm(): Failed to get " "catalog details"
7232 )
7233
7234 # searching iso name and id
7235 iso_name, media_id = self.get_media_details(vca, response.text)
7236
7237 if iso_name and media_id:
7238 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
7239 <ns6:MediaInsertOrEjectParams
7240 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
7241 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
7242 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
7243 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
7244 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
7245 xmlns:ns7="http://www.vmware.com/schema/ovf"
7246 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
7247 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
7248 <ns6:Media
7249 type="application/vnd.vmware.vcloud.media+xml"
7250 name="{}"
7251 id="urn:vcloud:media:{}"
7252 href="https://{}/api/media/{}"/>
7253 </ns6:MediaInsertOrEjectParams>""".format(
7254 iso_name, media_id, self.url, media_id
7255 )
7256
7257 for vms in vapp.get_all_vms():
7258 vm_id = vms.get("id").split(":")[-1]
7259
7260 headers[
7261 "Content-Type"
7262 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
7263 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
7264 self.url, vm_id
7265 )
7266
7267 response = self.perform_request(
7268 req_type="POST", url=rest_url, data=data, headers=headers
7269 )
7270
7271 if response.status_code != 202:
7272 error_msg = (
7273 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
7274 "Status code {}".format(response.text, response.status_code)
7275 )
7276 self.logger.error(error_msg)
7277
7278 raise vimconn.VimConnException(error_msg)
7279 else:
7280 task = self.get_task_from_response(response.text)
7281 result = self.client.get_task_monitor().wait_for_success(
7282 task=task
7283 )
7284
7285 if result.get("status") == "success":
7286 self.logger.info(
7287 "insert_media_to_vm(): Sucessfully inserted media ISO"
7288 " image to vm {}".format(vm_id)
7289 )
7290 except Exception as exp:
7291 self.logger.error(
7292 "insert_media_to_vm() : exception occurred "
7293 "while inserting media CD-ROM"
7294 )
7295
7296 raise vimconn.VimConnException(message=exp)
7297
7298 def get_media_details(self, vca, content):
7299 """
7300 Method to get catalog item details
7301 vca - connection object
7302 content - Catalog details
7303 Return - Media name, media id
7304 """
7305 cataloghref_list = []
7306 try:
7307 if content:
7308 vm_list_xmlroot = XmlElementTree.fromstring(content)
7309
7310 for child in vm_list_xmlroot.iter():
7311 if "CatalogItem" in child.tag:
7312 cataloghref_list.append(child.attrib.get("href"))
7313
7314 if cataloghref_list is not None:
7315 for href in cataloghref_list:
7316 if href:
7317 headers = {
7318 "Accept": "application/*+xml;version=" + API_VERSION,
7319 "x-vcloud-authorization": vca._session.headers[
7320 "x-vcloud-authorization"
7321 ],
7322 }
7323 response = self.perform_request(
7324 req_type="GET", url=href, headers=headers
7325 )
7326
7327 if response.status_code != 200:
7328 self.logger.error(
7329 "REST call {} failed reason : {}"
7330 "status code : {}".format(
7331 href, response.text, response.status_code
7332 )
7333 )
7334
7335 raise vimconn.VimConnException(
7336 "get_media_details : Failed to get "
7337 "catalogitem details"
7338 )
7339
7340 list_xmlroot = XmlElementTree.fromstring(response.text)
7341
7342 for child in list_xmlroot.iter():
7343 if "Entity" in child.tag:
7344 if "media" in child.attrib.get("href"):
7345 name = child.attrib.get("name")
7346 media_id = (
7347 child.attrib.get("href").split("/").pop()
7348 )
7349
7350 return name, media_id
7351 else:
7352 self.logger.debug("Media name and id not found")
7353
7354 return False, False
7355 except Exception as exp:
7356 self.logger.error(
7357 "get_media_details : exception occurred " "getting media details"
7358 )
7359
7360 raise vimconn.VimConnException(message=exp)
7361
7362 def retry_rest(self, method, url, add_headers=None, data=None):
7363 """Method to get Token & retry respective REST request
7364 Args:
7365 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
7366 url - request url to be used
7367 add_headers - Additional headers (optional)
7368 data - Request payload data to be passed in request
7369 Returns:
7370 response - Response of request
7371 """
7372 response = None
7373
7374 # Get token
7375 self.get_token()
7376
7377 if self.client._session:
7378 headers = {
7379 "Accept": "application/*+xml;version=" + API_VERSION,
7380 "x-vcloud-authorization": self.client._session.headers[
7381 "x-vcloud-authorization"
7382 ],
7383 }
7384
7385 if add_headers:
7386 headers.update(add_headers)
7387
7388 if method == "GET":
7389 response = self.perform_request(req_type="GET", url=url, headers=headers)
7390 elif method == "PUT":
7391 response = self.perform_request(
7392 req_type="PUT", url=url, headers=headers, data=data
7393 )
7394 elif method == "POST":
7395 response = self.perform_request(
7396 req_type="POST", url=url, headers=headers, data=data
7397 )
7398 elif method == "DELETE":
7399 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
7400
7401 return response
7402
7403 def get_token(self):
7404 """Generate a new token if expired
7405
7406 Returns:
7407 The return client object that letter can be used to connect to vCloud director as admin for VDC
7408 """
7409 self.client = self.connect()
7410
7411 def get_vdc_details(self):
7412 """Get VDC details using pyVcloud Lib
7413
7414 Returns org and vdc object
7415 """
7416 vdc = None
7417
7418 try:
7419 org = Org(self.client, resource=self.client.get_org())
7420 vdc = org.get_vdc(self.tenant_name)
7421 except Exception as e:
7422 # pyvcloud not giving a specific exception, Refresh nevertheless
7423 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
7424
7425 # Retry once, if failed by refreshing token
7426 if vdc is None:
7427 self.get_token()
7428 org = Org(self.client, resource=self.client.get_org())
7429 vdc = org.get_vdc(self.tenant_name)
7430
7431 return org, vdc
7432
7433 def perform_request(self, req_type, url, headers=None, data=None):
7434 """Perform the POST/PUT/GET/DELETE request."""
7435 # Log REST request details
7436 self.log_request(req_type, url=url, headers=headers, data=data)
7437 # perform request and return its result
7438
7439 if req_type == "GET":
7440 response = requests.get(url=url, headers=headers, verify=False)
7441 elif req_type == "PUT":
7442 response = requests.put(url=url, headers=headers, data=data, verify=False)
7443 elif req_type == "POST":
7444 response = requests.post(url=url, headers=headers, data=data, verify=False)
7445 elif req_type == "DELETE":
7446 response = requests.delete(url=url, headers=headers, verify=False)
7447
7448 # Log the REST response
7449 self.log_response(response)
7450
7451 return response
7452
7453 def log_request(self, req_type, url=None, headers=None, data=None):
7454 """Logs REST request details"""
7455
7456 if req_type is not None:
7457 self.logger.debug("Request type: {}".format(req_type))
7458
7459 if url is not None:
7460 self.logger.debug("Request url: {}".format(url))
7461
7462 if headers is not None:
7463 for header in headers:
7464 self.logger.debug(
7465 "Request header: {}: {}".format(header, headers[header])
7466 )
7467
7468 if data is not None:
7469 self.logger.debug("Request data: {}".format(data))
7470
7471 def log_response(self, response):
7472 """Logs REST response details"""
7473
7474 self.logger.debug("Response status code: {} ".format(response.status_code))
7475
7476 def get_task_from_response(self, content):
7477 """
7478 content - API response.text(response.text)
7479 return task object
7480 """
7481 xmlroot = XmlElementTree.fromstring(content)
7482
7483 if xmlroot.tag.split("}")[1] == "Task":
7484 return xmlroot
7485 else:
7486 for ele in xmlroot:
7487 if ele.tag.split("}")[1] == "Tasks":
7488 task = ele[0]
7489 break
7490
7491 return task
7492
7493 def power_on_vapp(self, vapp_id, vapp_name):
7494 """
7495 vapp_id - vApp uuid
7496 vapp_name - vAapp name
7497 return - Task object
7498 """
7499 headers = {
7500 "Accept": "application/*+xml;version=" + API_VERSION,
7501 "x-vcloud-authorization": self.client._session.headers[
7502 "x-vcloud-authorization"
7503 ],
7504 }
7505
7506 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
7507 self.url, vapp_id
7508 )
7509 response = self.perform_request(
7510 req_type="POST", url=poweron_href, headers=headers
7511 )
7512
7513 if response.status_code != 202:
7514 self.logger.error(
7515 "REST call {} failed reason : {}"
7516 "status code : {} ".format(
7517 poweron_href, response.text, response.status_code
7518 )
7519 )
7520
7521 raise vimconn.VimConnException(
7522 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
7523 )
7524 else:
7525 poweron_task = self.get_task_from_response(response.text)
7526
7527 return poweron_task
7528
7529 def migrate_instance(self, vm_id, compute_host=None):
7530 """
7531 Migrate a vdu
7532 param:
7533 vm_id: ID of an instance
7534 compute_host: Host to migrate the vdu to
7535 """
7536 # TODO: Add support for migration
7537 raise vimconn.VimConnNotImplemented("Should have implemented this")
7538
7539 def resize_instance(self, vm_id, flavor_id=None):
7540 """
7541 resize a vdu
7542 param:
7543 vm_id: ID of an instance
7544 flavor_id: flavor_id to resize the vdu to
7545 """
7546 # TODO: Add support for resize
7547 raise vimconn.VimConnNotImplemented("Should have implemented this")