456db81dac664e8dec6bb04f5f4c656803d8661d
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 import atexit
29 import hashlib
30 import json
31 import logging
32 import os
33 import random
34 import re
35 import shutil
36 import socket
37 import ssl
38 import struct
39 import subprocess
40 import tempfile
41 import time
42 import traceback
43 import uuid
44 from xml.etree import ElementTree as XmlElementTree
45 from xml.sax.saxutils import escape
46
47 from lxml import etree as lxmlElementTree
48 import netaddr
49 from osm_ro_plugin import vimconn
50 from progressbar import Bar, ETA, FileTransferSpeed, Percentage, ProgressBar
51 from pyvcloud.vcd.client import BasicLoginCredentials, Client
52 from pyvcloud.vcd.org import Org
53 from pyvcloud.vcd.vapp import VApp
54 from pyvcloud.vcd.vdc import VDC
55 from pyVim.connect import Disconnect, SmartConnect
56 from pyVmomi import vim, vmodl # @UnresolvedImport
57 import requests
58 import yaml
59
60 # global variable for vcd connector type
61 STANDALONE = "standalone"
62
63 # key for flavor dicts
64 FLAVOR_RAM_KEY = "ram"
65 FLAVOR_VCPUS_KEY = "vcpus"
66 FLAVOR_DISK_KEY = "disk"
67 DEFAULT_IP_PROFILE = {"dhcp_count": 50, "dhcp_enabled": True, "ip_version": "IPv4"}
68 # global variable for wait time
69 INTERVAL_TIME = 5
70 MAX_WAIT_TIME = 1800
71
72 API_VERSION = "27.0"
73
74 # -1: "Could not be created",
75 # 0: "Unresolved",
76 # 1: "Resolved",
77 # 2: "Deployed",
78 # 3: "Suspended",
79 # 4: "Powered on",
80 # 5: "Waiting for user input",
81 # 6: "Unknown state",
82 # 7: "Unrecognized state",
83 # 8: "Powered off",
84 # 9: "Inconsistent state",
85 # 10: "Children do not all have the same status",
86 # 11: "Upload initiated, OVF descriptor pending",
87 # 12: "Upload initiated, copying contents",
88 # 13: "Upload initiated , disk contents pending",
89 # 14: "Upload has been quarantined",
90 # 15: "Upload quarantine period has expired"
91
92 # mapping vCD status to MANO
93 vcdStatusCode2manoFormat = {
94 4: "ACTIVE",
95 7: "PAUSED",
96 3: "SUSPENDED",
97 8: "INACTIVE",
98 12: "BUILD",
99 -1: "ERROR",
100 14: "DELETED",
101 }
102
103 #
104 netStatus2manoFormat = {
105 "ACTIVE": "ACTIVE",
106 "PAUSED": "PAUSED",
107 "INACTIVE": "INACTIVE",
108 "BUILD": "BUILD",
109 "ERROR": "ERROR",
110 "DELETED": "DELETED",
111 }
112
113
114 class vimconnector(vimconn.VimConnector):
115 # dict used to store flavor in memory
116 flavorlist = {}
117
118 def __init__(
119 self,
120 uuid=None,
121 name=None,
122 tenant_id=None,
123 tenant_name=None,
124 url=None,
125 url_admin=None,
126 user=None,
127 passwd=None,
128 log_level=None,
129 config={},
130 persistent_info={},
131 ):
132 """
133 Constructor create vmware connector to vCloud director.
134
135 By default construct doesn't validate connection state. So client can create object with None arguments.
136 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
137
138 a) It initialize organization UUID
139 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
140
141 Args:
142 uuid - is organization uuid.
143 name - is organization name that must be presented in vCloud director.
144 tenant_id - is VDC uuid it must be presented in vCloud director
145 tenant_name - is VDC name.
146 url - is hostname or ip address of vCloud director
147 url_admin - same as above.
148 user - is user that administrator for organization. Caller must make sure that
149 username has right privileges.
150
151 password - is password for a user.
152
153 VMware connector also requires PVDC administrative privileges and separate account.
154 This variables must be passed via config argument dict contains keys
155
156 dict['admin_username']
157 dict['admin_password']
158 config - Provide NSX and vCenter information
159
160 Returns:
161 Nothing.
162 """
163
164 vimconn.VimConnector.__init__(
165 self,
166 uuid,
167 name,
168 tenant_id,
169 tenant_name,
170 url,
171 url_admin,
172 user,
173 passwd,
174 log_level,
175 config,
176 )
177
178 self.logger = logging.getLogger("ro.vim.vmware")
179 self.logger.setLevel(10)
180 self.persistent_info = persistent_info
181
182 self.name = name
183 self.id = uuid
184 self.url = url
185 self.url_admin = url_admin
186 self.tenant_id = tenant_id
187 self.tenant_name = tenant_name
188 self.user = user
189 self.passwd = passwd
190 self.config = config
191 self.admin_password = None
192 self.admin_user = None
193 self.org_name = ""
194 self.nsx_manager = None
195 self.nsx_user = None
196 self.nsx_password = None
197 self.availability_zone = None
198
199 # Disable warnings from self-signed certificates.
200 requests.packages.urllib3.disable_warnings()
201
202 if tenant_name is not None:
203 orgnameandtenant = tenant_name.split(":")
204
205 if len(orgnameandtenant) == 2:
206 self.tenant_name = orgnameandtenant[1]
207 self.org_name = orgnameandtenant[0]
208 else:
209 self.tenant_name = tenant_name
210
211 if "orgname" in config:
212 self.org_name = config["orgname"]
213
214 if log_level:
215 self.logger.setLevel(getattr(logging, log_level))
216
217 try:
218 self.admin_user = config["admin_username"]
219 self.admin_password = config["admin_password"]
220 except KeyError:
221 raise vimconn.VimConnException(
222 message="Error admin username or admin password is empty."
223 )
224
225 try:
226 self.nsx_manager = config["nsx_manager"]
227 self.nsx_user = config["nsx_user"]
228 self.nsx_password = config["nsx_password"]
229 except KeyError:
230 raise vimconn.VimConnException(
231 message="Error: nsx manager or nsx user or nsx password is empty in Config"
232 )
233
234 self.vcenter_ip = config.get("vcenter_ip", None)
235 self.vcenter_port = config.get("vcenter_port", None)
236 self.vcenter_user = config.get("vcenter_user", None)
237 self.vcenter_password = config.get("vcenter_password", None)
238
239 # Set availability zone for Affinity rules
240 self.availability_zone = self.set_availability_zones()
241
242 # ############# Stub code for SRIOV #################
243 # try:
244 # self.dvs_name = config['dv_switch_name']
245 # except KeyError:
246 # raise vimconn.VimConnException(message="Error:
247 # distributed virtaul switch name is empty in Config")
248 #
249 # self.vlanID_range = config.get("vlanID_range", None)
250
251 self.org_uuid = None
252 self.client = None
253
254 if not url:
255 raise vimconn.VimConnException("url param can not be NoneType")
256
257 if not self.url_admin: # try to use normal url
258 self.url_admin = self.url
259
260 logging.debug(
261 "UUID: {} name: {} tenant_id: {} tenant name {}".format(
262 self.id, self.org_name, self.tenant_id, self.tenant_name
263 )
264 )
265 logging.debug(
266 "vcd url {} vcd username: {} vcd password: {}".format(
267 self.url, self.user, self.passwd
268 )
269 )
270 logging.debug(
271 "vcd admin username {} vcd admin passowrd {}".format(
272 self.admin_user, self.admin_password
273 )
274 )
275
276 # initialize organization
277 if self.user is not None and self.passwd is not None and self.url:
278 self.init_organization()
279
280 def __getitem__(self, index):
281 if index == "name":
282 return self.name
283
284 if index == "tenant_id":
285 return self.tenant_id
286
287 if index == "tenant_name":
288 return self.tenant_name
289 elif index == "id":
290 return self.id
291 elif index == "org_name":
292 return self.org_name
293 elif index == "org_uuid":
294 return self.org_uuid
295 elif index == "user":
296 return self.user
297 elif index == "passwd":
298 return self.passwd
299 elif index == "url":
300 return self.url
301 elif index == "url_admin":
302 return self.url_admin
303 elif index == "config":
304 return self.config
305 else:
306 raise KeyError("Invalid key '{}'".format(index))
307
308 def __setitem__(self, index, value):
309 if index == "name":
310 self.name = value
311
312 if index == "tenant_id":
313 self.tenant_id = value
314
315 if index == "tenant_name":
316 self.tenant_name = value
317 elif index == "id":
318 self.id = value
319 elif index == "org_name":
320 self.org_name = value
321 elif index == "org_uuid":
322 self.org_uuid = value
323 elif index == "user":
324 self.user = value
325 elif index == "passwd":
326 self.passwd = value
327 elif index == "url":
328 self.url = value
329 elif index == "url_admin":
330 self.url_admin = value
331 else:
332 raise KeyError("Invalid key '{}'".format(index))
333
334 def connect_as_admin(self):
335 """Method connect as pvdc admin user to vCloud director.
336 There are certain action that can be done only by provider vdc admin user.
337 Organization creation / provider network creation etc.
338
339 Returns:
340 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
341 """
342 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
343
344 try:
345 host = self.url
346 org = "System"
347 client_as_admin = Client(
348 host, verify_ssl_certs=False, api_version=API_VERSION
349 )
350 client_as_admin.set_credentials(
351 BasicLoginCredentials(self.admin_user, org, self.admin_password)
352 )
353 except Exception as e:
354 raise vimconn.VimConnException(
355 "Can't connect to vCloud director as: {} with exception {}".format(
356 self.admin_user, e
357 )
358 )
359
360 return client_as_admin
361
362 def connect(self):
363 """Method connect as normal user to vCloud director.
364
365 Returns:
366 The return client object that latter can be used to connect to vCloud director as admin for VDC
367 """
368 try:
369 self.logger.debug(
370 "Logging into vCD {} as {} to datacenter {}.".format(
371 self.org_name, self.user, self.org_name
372 )
373 )
374 host = self.url
375 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
376 client.set_credentials(
377 BasicLoginCredentials(self.user, self.org_name, self.passwd)
378 )
379 except Exception as e:
380 raise vimconn.VimConnConnectionException(
381 "Can't connect to vCloud director org: "
382 "{} as user {} with exception: {}".format(self.org_name, self.user, e)
383 )
384
385 return client
386
387 def init_organization(self):
388 """Method initialize organization UUID and VDC parameters.
389
390 At bare minimum client must provide organization name that present in vCloud director and VDC.
391
392 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
393 The Org - UUID will be initialized at the run time if data center present in vCloud director.
394
395 Returns:
396 The return vca object that letter can be used to connect to vcloud direct as admin
397 """
398 client = self.connect()
399
400 if not client:
401 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
402
403 self.client = client
404 try:
405 if self.org_uuid is None:
406 org_list = client.get_org_list()
407 for org in org_list.Org:
408 # we set org UUID at the init phase but we can do it only when we have valid credential.
409 if org.get("name") == self.org_name:
410 self.org_uuid = org.get("href").split("/")[-1]
411 self.logger.debug(
412 "Setting organization UUID {}".format(self.org_uuid)
413 )
414 break
415 else:
416 raise vimconn.VimConnException(
417 "Vcloud director organization {} not found".format(
418 self.org_name
419 )
420 )
421
422 # if well good we require for org details
423 org_details_dict = self.get_org(org_uuid=self.org_uuid)
424
425 # we have two case if we want to initialize VDC ID or VDC name at run time
426 # tenant_name provided but no tenant id
427 if (
428 self.tenant_id is None
429 and self.tenant_name is not None
430 and "vdcs" in org_details_dict
431 ):
432 vdcs_dict = org_details_dict["vdcs"]
433 for vdc in vdcs_dict:
434 if vdcs_dict[vdc] == self.tenant_name:
435 self.tenant_id = vdc
436 self.logger.debug(
437 "Setting vdc uuid {} for organization UUID {}".format(
438 self.tenant_id, self.org_name
439 )
440 )
441 break
442 else:
443 raise vimconn.VimConnException(
444 "Tenant name indicated but not present in vcloud director."
445 )
446
447 # case two we have tenant_id but we don't have tenant name so we find and set it.
448 if (
449 self.tenant_id is not None
450 and self.tenant_name is None
451 and "vdcs" in org_details_dict
452 ):
453 vdcs_dict = org_details_dict["vdcs"]
454 for vdc in vdcs_dict:
455 if vdc == self.tenant_id:
456 self.tenant_name = vdcs_dict[vdc]
457 self.logger.debug(
458 "Setting vdc uuid {} for organization UUID {}".format(
459 self.tenant_id, self.org_name
460 )
461 )
462 break
463 else:
464 raise vimconn.VimConnException(
465 "Tenant id indicated but not present in vcloud director"
466 )
467
468 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
469 except Exception as e:
470 self.logger.debug(
471 "Failed initialize organization UUID for org {}: {}".format(
472 self.org_name, e
473 ),
474 )
475 self.logger.debug(traceback.format_exc())
476 self.org_uuid = None
477
478 def new_tenant(self, tenant_name=None, tenant_description=None):
479 """Method adds a new tenant to VIM with this name.
480 This action requires access to create VDC action in vCloud director.
481
482 Args:
483 tenant_name is tenant_name to be created.
484 tenant_description not used for this call
485
486 Return:
487 returns the tenant identifier in UUID format.
488 If action is failed method will throw vimconn.VimConnException method
489 """
490 vdc_task = self.create_vdc(vdc_name=tenant_name)
491 if vdc_task is not None:
492 vdc_uuid, _ = vdc_task.popitem()
493 self.logger.info(
494 "Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid)
495 )
496
497 return vdc_uuid
498 else:
499 raise vimconn.VimConnException(
500 "Failed create tenant {}".format(tenant_name)
501 )
502
503 def delete_tenant(self, tenant_id=None):
504 """Delete a tenant from VIM
505 Args:
506 tenant_id is tenant_id to be deleted.
507
508 Return:
509 returns the tenant identifier in UUID format.
510 If action is failed method will throw exception
511 """
512 vca = self.connect_as_admin()
513 if not vca:
514 raise vimconn.VimConnConnectionException("Failed to connect vCD")
515
516 if tenant_id is not None:
517 if vca._session:
518 # Get OrgVDC
519 url_list = [self.url, "/api/vdc/", tenant_id]
520 orgvdc_herf = "".join(url_list)
521
522 headers = {
523 "Accept": "application/*+xml;version=" + API_VERSION,
524 "x-vcloud-authorization": vca._session.headers[
525 "x-vcloud-authorization"
526 ],
527 }
528 response = self.perform_request(
529 req_type="GET", url=orgvdc_herf, headers=headers
530 )
531
532 if response.status_code != requests.codes.ok:
533 self.logger.debug(
534 "delete_tenant():GET REST API call {} failed. "
535 "Return status code {}".format(
536 orgvdc_herf, response.status_code
537 )
538 )
539
540 raise vimconn.VimConnNotFoundException(
541 "Fail to get tenant {}".format(tenant_id)
542 )
543
544 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
545 namespaces = {
546 prefix: uri
547 for prefix, uri in lxmlroot_respond.nsmap.items()
548 if prefix
549 }
550 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
551 vdc_remove_href = lxmlroot_respond.find(
552 "xmlns:Link[@rel='remove']", namespaces
553 ).attrib["href"]
554 vdc_remove_href = vdc_remove_href + "?recursive=true&force=true"
555
556 response = self.perform_request(
557 req_type="DELETE", url=vdc_remove_href, headers=headers
558 )
559
560 if response.status_code == 202:
561 time.sleep(5)
562
563 return tenant_id
564 else:
565 self.logger.debug(
566 "delete_tenant(): DELETE REST API call {} failed. "
567 "Return status code {}".format(
568 vdc_remove_href, response.status_code
569 )
570 )
571
572 raise vimconn.VimConnException(
573 "Fail to delete tenant with ID {}".format(tenant_id)
574 )
575 else:
576 self.logger.debug(
577 "delete_tenant():Incorrect tenant ID {}".format(tenant_id)
578 )
579
580 raise vimconn.VimConnNotFoundException(
581 "Fail to get tenant {}".format(tenant_id)
582 )
583
584 def get_tenant_list(self, filter_dict={}):
585 """Obtain tenants of VIM
586 filter_dict can contain the following keys:
587 name: filter by tenant name
588 id: filter by tenant uuid/id
589 <other VIM specific>
590 Returns the tenant list of dictionaries:
591 [{'name':'<name>, 'id':'<id>, ...}, ...]
592
593 """
594 org_dict = self.get_org(self.org_uuid)
595 vdcs_dict = org_dict["vdcs"]
596
597 vdclist = []
598 try:
599 for k in vdcs_dict:
600 entry = {"name": vdcs_dict[k], "id": k}
601 # if caller didn't specify dictionary we return all tenants.
602
603 if filter_dict is not None and filter_dict:
604 filtered_entry = entry.copy()
605 filtered_dict = set(entry.keys()) - set(filter_dict)
606
607 for unwanted_key in filtered_dict:
608 del entry[unwanted_key]
609
610 if filter_dict == entry:
611 vdclist.append(filtered_entry)
612 else:
613 vdclist.append(entry)
614 except Exception:
615 self.logger.debug("Error in get_tenant_list()")
616 self.logger.debug(traceback.format_exc())
617
618 raise vimconn.VimConnException("Incorrect state. {}")
619
620 return vdclist
621
622 def new_network(
623 self,
624 net_name,
625 net_type,
626 ip_profile=None,
627 shared=False,
628 provider_network_profile=None,
629 ):
630 """Adds a tenant network to VIM
631 Params:
632 'net_name': name of the network
633 'net_type': one of:
634 'bridge': overlay isolated network
635 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
636 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
637 'ip_profile': is a dict containing the IP parameters of the network
638 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
639 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
640 'gateway_address': (Optional) ip_schema, that is X.X.X.X
641 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
642 'dhcp_enabled': True or False
643 'dhcp_start_address': ip_schema, first IP to grant
644 'dhcp_count': number of IPs to grant.
645 'shared': if this network can be seen/use by other tenants/organization
646 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
647 Returns a tuple with the network identifier and created_items, or raises an exception on error
648 created_items can be None or a dictionary where this method can include key-values that will be passed to
649 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
650 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
651 as not present.
652 """
653
654 self.logger.debug(
655 "new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}".format(
656 net_name, net_type, ip_profile, shared, provider_network_profile
657 )
658 )
659 # vlan = None
660 # if provider_network_profile:
661 # vlan = provider_network_profile.get("segmentation-id")
662
663 created_items = {}
664 isshared = "false"
665
666 if shared:
667 isshared = "true"
668
669 # ############# Stub code for SRIOV #################
670 # if net_type == "data" or net_type == "ptp":
671 # if self.config.get('dv_switch_name') == None:
672 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
673 # network_uuid = self.create_dvPort_group(net_name)
674 parent_network_uuid = None
675
676 if provider_network_profile is not None:
677 for k, v in provider_network_profile.items():
678 if k == "physical_network":
679 parent_network_uuid = self.get_physical_network_by_name(v)
680
681 network_uuid = self.create_network(
682 network_name=net_name,
683 net_type=net_type,
684 ip_profile=ip_profile,
685 isshared=isshared,
686 parent_network_uuid=parent_network_uuid,
687 )
688
689 if network_uuid is not None:
690 return network_uuid, created_items
691 else:
692 raise vimconn.VimConnUnexpectedResponse(
693 "Failed create a new network {}".format(net_name)
694 )
695
696 def get_network_list(self, filter_dict={}):
697 """Obtain tenant networks of VIM
698 Filter_dict can be:
699 name: network name OR/AND
700 id: network uuid OR/AND
701 shared: boolean OR/AND
702 tenant_id: tenant OR/AND
703 admin_state_up: boolean
704 status: 'ACTIVE'
705
706 [{key : value , key : value}]
707
708 Returns the network list of dictionaries:
709 [{<the fields at Filter_dict plus some VIM specific>}, ...]
710 List can be empty
711 """
712
713 self.logger.debug(
714 "get_network_list(): retrieving network list for vcd {}".format(
715 self.tenant_name
716 )
717 )
718
719 if not self.tenant_name:
720 raise vimconn.VimConnConnectionException("Tenant name is empty.")
721
722 _, vdc = self.get_vdc_details()
723 if vdc is None:
724 raise vimconn.VimConnConnectionException(
725 "Can't retrieve information for a VDC {}.".format(self.tenant_name)
726 )
727
728 try:
729 vdcid = vdc.get("id").split(":")[3]
730
731 if self.client._session:
732 headers = {
733 "Accept": "application/*+xml;version=" + API_VERSION,
734 "x-vcloud-authorization": self.client._session.headers[
735 "x-vcloud-authorization"
736 ],
737 }
738 response = self.perform_request(
739 req_type="GET", url=vdc.get("href"), headers=headers
740 )
741
742 if response.status_code != 200:
743 self.logger.error("Failed to get vdc content")
744 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
745 else:
746 content = XmlElementTree.fromstring(response.text)
747
748 network_list = []
749 for item in content:
750 if item.tag.split("}")[-1] == "AvailableNetworks":
751 for net in item:
752 response = self.perform_request(
753 req_type="GET", url=net.get("href"), headers=headers
754 )
755
756 if response.status_code != 200:
757 self.logger.error("Failed to get network content")
758 raise vimconn.VimConnNotFoundException(
759 "Failed to get network content"
760 )
761 else:
762 net_details = XmlElementTree.fromstring(response.text)
763
764 filter_entry = {}
765 net_uuid = net_details.get("id").split(":")
766
767 if len(net_uuid) != 4:
768 continue
769 else:
770 net_uuid = net_uuid[3]
771 # create dict entry
772 self.logger.debug(
773 "get_network_list(): Adding net {}"
774 " to a list vcd id {} network {}".format(
775 net_uuid, vdcid, net_details.get("name")
776 )
777 )
778 filter_entry["name"] = net_details.get("name")
779 filter_entry["id"] = net_uuid
780
781 if [
782 i.text
783 for i in net_details
784 if i.tag.split("}")[-1] == "IsShared"
785 ][0] == "true":
786 shared = True
787 else:
788 shared = False
789
790 filter_entry["shared"] = shared
791 filter_entry["tenant_id"] = vdcid
792
793 if int(net_details.get("status")) == 1:
794 filter_entry["admin_state_up"] = True
795 else:
796 filter_entry["admin_state_up"] = False
797
798 filter_entry["status"] = "ACTIVE"
799 filter_entry["type"] = "bridge"
800 filtered_entry = filter_entry.copy()
801
802 if filter_dict is not None and filter_dict:
803 # we remove all the key : value we don't care and match only
804 # respected field
805 filtered_dict = set(filter_entry.keys()) - set(
806 filter_dict
807 )
808
809 for unwanted_key in filtered_dict:
810 del filter_entry[unwanted_key]
811
812 if filter_dict == filter_entry:
813 network_list.append(filtered_entry)
814 else:
815 network_list.append(filtered_entry)
816 except Exception as e:
817 self.logger.debug("Error in get_network_list", exc_info=True)
818
819 if isinstance(e, vimconn.VimConnException):
820 raise
821 else:
822 raise vimconn.VimConnNotFoundException(
823 "Failed : Networks list not found {} ".format(e)
824 )
825
826 self.logger.debug("Returning {}".format(network_list))
827
828 return network_list
829
830 def get_network(self, net_id):
831 """Method obtains network details of net_id VIM network
832 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]
833 """
834 try:
835 _, vdc = self.get_vdc_details()
836 vdc_id = vdc.get("id").split(":")[3]
837
838 if self.client._session:
839 headers = {
840 "Accept": "application/*+xml;version=" + API_VERSION,
841 "x-vcloud-authorization": self.client._session.headers[
842 "x-vcloud-authorization"
843 ],
844 }
845 response = self.perform_request(
846 req_type="GET", url=vdc.get("href"), headers=headers
847 )
848
849 if response.status_code != 200:
850 self.logger.error("Failed to get vdc content")
851 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
852 else:
853 content = XmlElementTree.fromstring(response.text)
854
855 filter_dict = {}
856
857 for item in content:
858 if item.tag.split("}")[-1] == "AvailableNetworks":
859 for net in item:
860 response = self.perform_request(
861 req_type="GET", url=net.get("href"), headers=headers
862 )
863
864 if response.status_code != 200:
865 self.logger.error("Failed to get network content")
866 raise vimconn.VimConnNotFoundException(
867 "Failed to get network content"
868 )
869 else:
870 net_details = XmlElementTree.fromstring(response.text)
871
872 vdc_network_id = net_details.get("id").split(":")
873 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
874 filter_dict["name"] = net_details.get("name")
875 filter_dict["id"] = vdc_network_id[3]
876
877 if [
878 i.text
879 for i in net_details
880 if i.tag.split("}")[-1] == "IsShared"
881 ][0] == "true":
882 shared = True
883 else:
884 shared = False
885
886 filter_dict["shared"] = shared
887 filter_dict["tenant_id"] = vdc_id
888
889 if int(net_details.get("status")) == 1:
890 filter_dict["admin_state_up"] = True
891 else:
892 filter_dict["admin_state_up"] = False
893
894 filter_dict["status"] = "ACTIVE"
895 filter_dict["type"] = "bridge"
896 self.logger.debug("Returning {}".format(filter_dict))
897
898 return filter_dict
899 else:
900 raise vimconn.VimConnNotFoundException(
901 "Network {} not found".format(net_id)
902 )
903 except Exception as e:
904 self.logger.debug("Error in get_network")
905 self.logger.debug(traceback.format_exc())
906
907 if isinstance(e, vimconn.VimConnException):
908 raise
909 else:
910 raise vimconn.VimConnNotFoundException(
911 "Failed : Network not found {} ".format(e)
912 )
913
914 return filter_dict
915
916 def delete_network(self, net_id, created_items=None):
917 """
918 Removes a tenant network from VIM and its associated elements
919 :param net_id: VIM identifier of the network, provided by method new_network
920 :param created_items: dictionary with extra items to be deleted. provided by method new_network
921 Returns the network identifier or raises an exception upon error or when network is not found
922 """
923 vcd_network = self.get_vcd_network(network_uuid=net_id)
924 if vcd_network is not None and vcd_network:
925 if self.delete_network_action(network_uuid=net_id):
926 return net_id
927 else:
928 raise vimconn.VimConnNotFoundException(
929 "Network {} not found".format(net_id)
930 )
931
932 def refresh_nets_status(self, net_list):
933 """Get the status of the networks
934 Params: the list of network identifiers
935 Returns a dictionary with:
936 net_id: #VIM id of this network
937 status: #Mandatory. Text with one of:
938 # DELETED (not found at vim)
939 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
940 # OTHER (Vim reported other status not understood)
941 # ERROR (VIM indicates an ERROR status)
942 # ACTIVE, INACTIVE, DOWN (admin down),
943 # BUILD (on building process)
944 #
945 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
946 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
947
948 """
949 dict_entry = {}
950 try:
951 for net in net_list:
952 errormsg = ""
953 vcd_network = self.get_vcd_network(network_uuid=net)
954 if vcd_network is not None and vcd_network:
955 if vcd_network["status"] == "1":
956 status = "ACTIVE"
957 else:
958 status = "DOWN"
959 else:
960 status = "DELETED"
961 errormsg = "Network not found."
962
963 dict_entry[net] = {
964 "status": status,
965 "error_msg": errormsg,
966 "vim_info": yaml.safe_dump(vcd_network),
967 }
968 except Exception:
969 self.logger.debug("Error in refresh_nets_status")
970 self.logger.debug(traceback.format_exc())
971
972 return dict_entry
973
974 def get_flavor(self, flavor_id):
975 """Obtain flavor details from the VIM
976 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
977 """
978 if flavor_id not in vimconnector.flavorlist:
979 raise vimconn.VimConnNotFoundException("Flavor not found.")
980
981 return vimconnector.flavorlist[flavor_id]
982
983 def new_flavor(self, flavor_data):
984 """Adds a tenant flavor to VIM
985 flavor_data contains a dictionary with information, keys:
986 name: flavor name
987 ram: memory (cloud type) in MBytes
988 vpcus: cpus (cloud type)
989 extended: EPA parameters
990 - numas: #items requested in same NUMA
991 memory: number of 1G huge pages memory
992 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
993 threads
994 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
995 - name: interface name
996 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
997 bandwidth: X Gbps; requested guarantee bandwidth
998 vpci: requested virtual PCI address
999 disk: disk size
1000 is_public:
1001 #TODO to concrete
1002 Returns the flavor identifier"""
1003
1004 # generate a new uuid put to internal dict and return it.
1005 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
1006 new_flavor = flavor_data
1007 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
1008 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
1009 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
1010
1011 if not isinstance(ram, int):
1012 raise vimconn.VimConnException("Non-integer value for ram")
1013 elif not isinstance(cpu, int):
1014 raise vimconn.VimConnException("Non-integer value for cpu")
1015 elif not isinstance(disk, int):
1016 raise vimconn.VimConnException("Non-integer value for disk")
1017
1018 extended_flv = flavor_data.get("extended")
1019 if extended_flv:
1020 numas = extended_flv.get("numas")
1021 if numas:
1022 for numa in numas:
1023 # overwrite ram and vcpus
1024 if "memory" in numa:
1025 ram = numa["memory"] * 1024
1026
1027 if "paired-threads" in numa:
1028 cpu = numa["paired-threads"] * 2
1029 elif "cores" in numa:
1030 cpu = numa["cores"]
1031 elif "threads" in numa:
1032 cpu = numa["threads"]
1033
1034 new_flavor[FLAVOR_RAM_KEY] = ram
1035 new_flavor[FLAVOR_VCPUS_KEY] = cpu
1036 new_flavor[FLAVOR_DISK_KEY] = disk
1037 # generate a new uuid put to internal dict and return it.
1038 flavor_id = uuid.uuid4()
1039 vimconnector.flavorlist[str(flavor_id)] = new_flavor
1040 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
1041
1042 return str(flavor_id)
1043
1044 def delete_flavor(self, flavor_id):
1045 """Deletes a tenant flavor from VIM identify by its id
1046
1047 Returns the used id or raise an exception
1048 """
1049 if flavor_id not in vimconnector.flavorlist:
1050 raise vimconn.VimConnNotFoundException("Flavor not found.")
1051
1052 vimconnector.flavorlist.pop(flavor_id, None)
1053
1054 return flavor_id
1055
1056 def new_image(self, image_dict):
1057 """
1058 Adds a tenant image to VIM
1059 Returns:
1060 200, image-id if the image is created
1061 <0, message if there is an error
1062 """
1063 return self.get_image_id_from_path(image_dict["location"])
1064
1065 def delete_image(self, image_id):
1066 """
1067 Deletes a tenant image from VIM
1068 Args:
1069 image_id is ID of Image to be deleted
1070 Return:
1071 returns the image identifier in UUID format or raises an exception on error
1072 """
1073 conn = self.connect_as_admin()
1074
1075 if not conn:
1076 raise vimconn.VimConnConnectionException("Failed to connect vCD")
1077
1078 # Get Catalog details
1079 url_list = [self.url, "/api/catalog/", image_id]
1080 catalog_herf = "".join(url_list)
1081
1082 headers = {
1083 "Accept": "application/*+xml;version=" + API_VERSION,
1084 "x-vcloud-authorization": conn._session.headers["x-vcloud-authorization"],
1085 }
1086
1087 response = self.perform_request(
1088 req_type="GET", url=catalog_herf, headers=headers
1089 )
1090
1091 if response.status_code != requests.codes.ok:
1092 self.logger.debug(
1093 "delete_image():GET REST API call {} failed. "
1094 "Return status code {}".format(catalog_herf, response.status_code)
1095 )
1096
1097 raise vimconn.VimConnNotFoundException(
1098 "Fail to get image {}".format(image_id)
1099 )
1100
1101 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1102 namespaces = {
1103 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1104 }
1105 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1106
1107 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
1108 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
1109
1110 for catalogItem in catalogItems:
1111 catalogItem_href = catalogItem.attrib["href"]
1112
1113 response = self.perform_request(
1114 req_type="GET", url=catalogItem_href, headers=headers
1115 )
1116
1117 if response.status_code != requests.codes.ok:
1118 self.logger.debug(
1119 "delete_image():GET REST API call {} failed. "
1120 "Return status code {}".format(catalog_herf, response.status_code)
1121 )
1122 raise vimconn.VimConnNotFoundException(
1123 "Fail to get catalogItem {} for catalog {}".format(
1124 catalogItem, image_id
1125 )
1126 )
1127
1128 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1129 namespaces = {
1130 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
1131 }
1132 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1133 catalogitem_remove_href = lxmlroot_respond.find(
1134 "xmlns:Link[@rel='remove']", namespaces
1135 ).attrib["href"]
1136
1137 # Remove catalogItem
1138 response = self.perform_request(
1139 req_type="DELETE", url=catalogitem_remove_href, headers=headers
1140 )
1141
1142 if response.status_code == requests.codes.no_content:
1143 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1144 else:
1145 raise vimconn.VimConnException(
1146 "Fail to delete Catalog Item {}".format(catalogItem)
1147 )
1148
1149 # Remove catalog
1150 url_list = [self.url, "/api/admin/catalog/", image_id]
1151 catalog_remove_herf = "".join(url_list)
1152 response = self.perform_request(
1153 req_type="DELETE", url=catalog_remove_herf, headers=headers
1154 )
1155
1156 if response.status_code == requests.codes.no_content:
1157 self.logger.debug("Deleted Catalog {}".format(image_id))
1158
1159 return image_id
1160 else:
1161 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1162
1163 def catalog_exists(self, catalog_name, catalogs):
1164 """
1165
1166 :param catalog_name:
1167 :param catalogs:
1168 :return:
1169 """
1170 for catalog in catalogs:
1171 if catalog["name"] == catalog_name:
1172 return catalog["id"]
1173
1174 def create_vimcatalog(self, vca=None, catalog_name=None):
1175 """Create new catalog entry in vCloud director.
1176
1177 Args
1178 vca: vCloud director.
1179 catalog_name catalog that client wish to create. Note no validation done for a name.
1180 Client must make sure that provide valid string representation.
1181
1182 Returns catalog id if catalog created else None.
1183
1184 """
1185 try:
1186 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1187
1188 if lxml_catalog_element:
1189 id_attr_value = lxml_catalog_element.get("id")
1190 return id_attr_value.split(":")[-1]
1191
1192 catalogs = vca.list_catalogs()
1193 except Exception as ex:
1194 self.logger.error(
1195 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(
1196 catalog_name, ex
1197 )
1198 )
1199 raise
1200 return self.catalog_exists(catalog_name, catalogs)
1201
1202 # noinspection PyIncorrectDocstring
1203 def upload_ovf(
1204 self,
1205 vca=None,
1206 catalog_name=None,
1207 image_name=None,
1208 media_file_name=None,
1209 description="",
1210 progress=False,
1211 chunk_bytes=128 * 1024,
1212 ):
1213 """
1214 Uploads a OVF file to a vCloud catalog
1215
1216 :param chunk_bytes:
1217 :param progress:
1218 :param description:
1219 :param image_name:
1220 :param vca:
1221 :param catalog_name: (str): The name of the catalog to upload the media.
1222 :param media_file_name: (str): The name of the local media file to upload.
1223 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1224 """
1225 os.path.isfile(media_file_name)
1226 statinfo = os.stat(media_file_name)
1227
1228 # find a catalog entry where we upload OVF.
1229 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1230 # status change.
1231 # if VCD can parse OVF we upload VMDK file
1232 try:
1233 for catalog in vca.list_catalogs():
1234 if catalog_name != catalog["name"]:
1235 continue
1236 catalog_href = "{}/api/catalog/{}/action/upload".format(
1237 self.url, catalog["id"]
1238 )
1239 data = """
1240 <UploadVAppTemplateParams name="{}"
1241 xmlns="http://www.vmware.com/vcloud/v1.5"
1242 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1243 <Description>{} vApp Template</Description>
1244 </UploadVAppTemplateParams>
1245 """.format(
1246 catalog_name, description
1247 )
1248
1249 if self.client:
1250 headers = {
1251 "Accept": "application/*+xml;version=" + API_VERSION,
1252 "x-vcloud-authorization": self.client._session.headers[
1253 "x-vcloud-authorization"
1254 ],
1255 }
1256 headers[
1257 "Content-Type"
1258 ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
1259
1260 response = self.perform_request(
1261 req_type="POST", url=catalog_href, headers=headers, data=data
1262 )
1263
1264 if response.status_code == requests.codes.created:
1265 catalogItem = XmlElementTree.fromstring(response.text)
1266 entity = [
1267 child
1268 for child in catalogItem
1269 if child.get("type")
1270 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1271 ][0]
1272 href = entity.get("href")
1273 template = href
1274
1275 response = self.perform_request(
1276 req_type="GET", url=href, headers=headers
1277 )
1278
1279 if response.status_code == requests.codes.ok:
1280 headers["Content-Type"] = "Content-Type text/xml"
1281 result = re.search(
1282 'rel="upload:default"\shref="(.*?\/descriptor.ovf)"',
1283 response.text,
1284 )
1285
1286 if result:
1287 transfer_href = result.group(1)
1288
1289 response = self.perform_request(
1290 req_type="PUT",
1291 url=transfer_href,
1292 headers=headers,
1293 data=open(media_file_name, "rb"),
1294 )
1295
1296 if response.status_code != requests.codes.ok:
1297 self.logger.debug(
1298 "Failed create vApp template for catalog name {} and image {}".format(
1299 catalog_name, media_file_name
1300 )
1301 )
1302 return False
1303
1304 # TODO fix this with aync block
1305 time.sleep(5)
1306
1307 self.logger.debug(
1308 "vApp template for catalog name {} and image {}".format(
1309 catalog_name, media_file_name
1310 )
1311 )
1312
1313 # uploading VMDK file
1314 # check status of OVF upload and upload remaining files.
1315 response = self.perform_request(
1316 req_type="GET", url=template, headers=headers
1317 )
1318
1319 if response.status_code == requests.codes.ok:
1320 result = re.search(
1321 'rel="upload:default"\s*href="(.*?vmdk)"', response.text
1322 )
1323
1324 if result:
1325 link_href = result.group(1)
1326
1327 # we skip ovf since it already uploaded.
1328 if "ovf" in link_href:
1329 continue
1330
1331 # The OVF file and VMDK must be in a same directory
1332 head, _ = os.path.split(media_file_name)
1333 file_vmdk = head + "/" + link_href.split("/")[-1]
1334
1335 if not os.path.isfile(file_vmdk):
1336 return False
1337
1338 statinfo = os.stat(file_vmdk)
1339 if statinfo.st_size == 0:
1340 return False
1341
1342 hrefvmdk = link_href
1343
1344 if progress:
1345 widgets = [
1346 "Uploading file: ",
1347 Percentage(),
1348 " ",
1349 Bar(),
1350 " ",
1351 ETA(),
1352 " ",
1353 FileTransferSpeed(),
1354 ]
1355 progress_bar = ProgressBar(
1356 widgets=widgets, maxval=statinfo.st_size
1357 ).start()
1358
1359 bytes_transferred = 0
1360 f = open(file_vmdk, "rb")
1361
1362 while bytes_transferred < statinfo.st_size:
1363 my_bytes = f.read(chunk_bytes)
1364 if len(my_bytes) <= chunk_bytes:
1365 headers["Content-Range"] = "bytes {}-{}/{}".format(
1366 bytes_transferred,
1367 len(my_bytes) - 1,
1368 statinfo.st_size,
1369 )
1370 headers["Content-Length"] = str(len(my_bytes))
1371 response = requests.put(
1372 url=hrefvmdk,
1373 headers=headers,
1374 data=my_bytes,
1375 verify=False,
1376 )
1377
1378 if response.status_code == requests.codes.ok:
1379 bytes_transferred += len(my_bytes)
1380 if progress:
1381 progress_bar.update(bytes_transferred)
1382 else:
1383 self.logger.debug(
1384 "file upload failed with error: [{}] {}".format(
1385 response.status_code, response.text
1386 )
1387 )
1388
1389 f.close()
1390
1391 return False
1392
1393 f.close()
1394 if progress:
1395 progress_bar.finish()
1396 time.sleep(10)
1397
1398 return True
1399 else:
1400 self.logger.debug(
1401 "Failed retrieve vApp template for catalog name {} for OVF {}".format(
1402 catalog_name, media_file_name
1403 )
1404 )
1405 return False
1406 except Exception as exp:
1407 self.logger.debug(
1408 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1409 catalog_name, media_file_name, exp
1410 )
1411 )
1412
1413 raise vimconn.VimConnException(
1414 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}".format(
1415 catalog_name, media_file_name, exp
1416 )
1417 )
1418
1419 self.logger.debug(
1420 "Failed retrieve catalog name {} for OVF file {}".format(
1421 catalog_name, media_file_name
1422 )
1423 )
1424
1425 return False
1426
1427 def upload_vimimage(
1428 self,
1429 vca=None,
1430 catalog_name=None,
1431 media_name=None,
1432 medial_file_name=None,
1433 progress=False,
1434 ):
1435 """Upload media file"""
1436 # TODO add named parameters for readability
1437 return self.upload_ovf(
1438 vca=vca,
1439 catalog_name=catalog_name,
1440 image_name=media_name.split(".")[0],
1441 media_file_name=medial_file_name,
1442 description="medial_file_name",
1443 progress=progress,
1444 )
1445
1446 def validate_uuid4(self, uuid_string=None):
1447 """Method validate correct format of UUID.
1448
1449 Return: true if string represent valid uuid
1450 """
1451 try:
1452 uuid.UUID(uuid_string, version=4)
1453 except ValueError:
1454 return False
1455
1456 return True
1457
1458 def get_catalogid(self, catalog_name=None, catalogs=None):
1459 """Method check catalog and return catalog ID in UUID format.
1460
1461 Args
1462 catalog_name: catalog name as string
1463 catalogs: list of catalogs.
1464
1465 Return: catalogs uuid
1466 """
1467 for catalog in catalogs:
1468 if catalog["name"] == catalog_name:
1469 catalog_id = catalog["id"]
1470 return catalog_id
1471
1472 return None
1473
1474 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1475 """Method check catalog and return catalog name lookup done by catalog UUID.
1476
1477 Args
1478 catalog_name: catalog name as string
1479 catalogs: list of catalogs.
1480
1481 Return: catalogs name or None
1482 """
1483 if not self.validate_uuid4(uuid_string=catalog_uuid):
1484 return None
1485
1486 for catalog in catalogs:
1487 catalog_id = catalog.get("id")
1488
1489 if catalog_id == catalog_uuid:
1490 return catalog.get("name")
1491
1492 return None
1493
1494 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1495 """Method check catalog and return catalog name lookup done by catalog UUID.
1496
1497 Args
1498 catalog_name: catalog name as string
1499 catalogs: list of catalogs.
1500
1501 Return: catalogs name or None
1502 """
1503 if not self.validate_uuid4(uuid_string=catalog_uuid):
1504 return None
1505
1506 for catalog in catalogs:
1507 catalog_id = catalog.get("id")
1508
1509 if catalog_id == catalog_uuid:
1510 return catalog
1511
1512 return None
1513
1514 def get_image_id_from_path(self, path=None, progress=False):
1515 """Method upload OVF image to vCloud director.
1516
1517 Each OVF image represented as single catalog entry in vcloud director.
1518 The method check for existing catalog entry. The check done by file name without file extension.
1519
1520 if given catalog name already present method will respond with existing catalog uuid otherwise
1521 it will create new catalog entry and upload OVF file to newly created catalog.
1522
1523 If method can't create catalog entry or upload a file it will throw exception.
1524
1525 Method accept boolean flag progress that will output progress bar. It useful method
1526 for standalone upload use case. In case to test large file upload.
1527
1528 Args
1529 path: - valid path to OVF file.
1530 progress - boolean progress bar show progress bar.
1531
1532 Return: if image uploaded correct method will provide image catalog UUID.
1533 """
1534 if not path:
1535 raise vimconn.VimConnException("Image path can't be None.")
1536
1537 if not os.path.isfile(path):
1538 raise vimconn.VimConnException("Can't read file. File not found.")
1539
1540 if not os.access(path, os.R_OK):
1541 raise vimconn.VimConnException(
1542 "Can't read file. Check file permission to read."
1543 )
1544
1545 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1546
1547 _, filename = os.path.split(path)
1548 _, file_extension = os.path.splitext(path)
1549 if file_extension != ".ovf":
1550 self.logger.debug(
1551 "Wrong file extension {} connector support only OVF container.".format(
1552 file_extension
1553 )
1554 )
1555
1556 raise vimconn.VimConnException(
1557 "Wrong container. vCloud director supports only OVF."
1558 )
1559
1560 catalog_name = os.path.splitext(filename)[0]
1561 catalog_md5_name = hashlib.md5(path.encode("utf-8")).hexdigest()
1562 self.logger.debug(
1563 "File name {} Catalog Name {} file path {} "
1564 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name)
1565 )
1566
1567 try:
1568 org, _ = self.get_vdc_details()
1569 catalogs = org.list_catalogs()
1570 except Exception as exp:
1571 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1572
1573 raise vimconn.VimConnException(
1574 "Failed get catalogs() with Exception {} ".format(exp)
1575 )
1576
1577 if len(catalogs) == 0:
1578 self.logger.info(
1579 "Creating a new catalog entry {} in vcloud director".format(
1580 catalog_name
1581 )
1582 )
1583
1584 if self.create_vimcatalog(org, catalog_md5_name) is None:
1585 raise vimconn.VimConnException(
1586 "Failed create new catalog {} ".format(catalog_md5_name)
1587 )
1588
1589 result = self.upload_vimimage(
1590 vca=org,
1591 catalog_name=catalog_md5_name,
1592 media_name=filename,
1593 medial_file_name=path,
1594 progress=progress,
1595 )
1596
1597 if not result:
1598 raise vimconn.VimConnException(
1599 "Failed create vApp template for catalog {} ".format(catalog_name)
1600 )
1601
1602 return self.get_catalogid(catalog_name, catalogs)
1603 else:
1604 for catalog in catalogs:
1605 # search for existing catalog if we find same name we return ID
1606 # TODO optimize this
1607 if catalog["name"] == catalog_md5_name:
1608 self.logger.debug(
1609 "Found existing catalog entry for {} "
1610 "catalog id {}".format(
1611 catalog_name, self.get_catalogid(catalog_md5_name, catalogs)
1612 )
1613 )
1614
1615 return self.get_catalogid(catalog_md5_name, catalogs)
1616
1617 # if we didn't find existing catalog we create a new one and upload image.
1618 self.logger.debug(
1619 "Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name)
1620 )
1621 if self.create_vimcatalog(org, catalog_md5_name) is None:
1622 raise vimconn.VimConnException(
1623 "Failed create new catalog {} ".format(catalog_md5_name)
1624 )
1625
1626 result = self.upload_vimimage(
1627 vca=org,
1628 catalog_name=catalog_md5_name,
1629 media_name=filename,
1630 medial_file_name=path,
1631 progress=progress,
1632 )
1633 if not result:
1634 raise vimconn.VimConnException(
1635 "Failed create vApp template for catalog {} ".format(catalog_md5_name)
1636 )
1637
1638 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1639
1640 def get_image_list(self, filter_dict={}):
1641 """Obtain tenant images from VIM
1642 Filter_dict can be:
1643 name: image name
1644 id: image uuid
1645 checksum: image checksum
1646 location: image path
1647 Returns the image list of dictionaries:
1648 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1649 List can be empty
1650 """
1651 try:
1652 org, _ = self.get_vdc_details()
1653 image_list = []
1654 catalogs = org.list_catalogs()
1655
1656 if len(catalogs) == 0:
1657 return image_list
1658 else:
1659 for catalog in catalogs:
1660 catalog_uuid = catalog.get("id")
1661 name = catalog.get("name")
1662 filtered_dict = {}
1663
1664 if filter_dict.get("name") and filter_dict["name"] != name:
1665 continue
1666
1667 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1668 continue
1669
1670 filtered_dict["name"] = name
1671 filtered_dict["id"] = catalog_uuid
1672 image_list.append(filtered_dict)
1673
1674 self.logger.debug(
1675 "List of already created catalog items: {}".format(image_list)
1676 )
1677
1678 return image_list
1679 except Exception as exp:
1680 raise vimconn.VimConnException(
1681 "Exception occured while retriving catalog items {}".format(exp)
1682 )
1683
1684 def get_namebyvappid(self, vapp_uuid=None):
1685 """Method returns vApp name from vCD and lookup done by vapp_id.
1686
1687 Args:
1688 vapp_uuid: vappid is application identifier
1689
1690 Returns:
1691 The return vApp name otherwise None
1692 """
1693 try:
1694 if self.client and vapp_uuid:
1695 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1696 headers = {
1697 "Accept": "application/*+xml;version=" + API_VERSION,
1698 "x-vcloud-authorization": self.client._session.headers[
1699 "x-vcloud-authorization"
1700 ],
1701 }
1702
1703 response = self.perform_request(
1704 req_type="GET", url=vapp_call, headers=headers
1705 )
1706
1707 # Retry login if session expired & retry sending request
1708 if response.status_code == 403:
1709 response = self.retry_rest("GET", vapp_call)
1710
1711 tree = XmlElementTree.fromstring(response.text)
1712
1713 return tree.attrib["name"] if "name" in tree.attrib else None
1714 except Exception as e:
1715 self.logger.exception(e)
1716
1717 return None
1718
1719 return None
1720
1721 def new_vminstance(
1722 self,
1723 name=None,
1724 description="",
1725 start=False,
1726 image_id=None,
1727 flavor_id=None,
1728 affinity_group_list=[],
1729 net_list=[],
1730 cloud_config=None,
1731 disk_list=None,
1732 availability_zone_index=None,
1733 availability_zone_list=None,
1734 ):
1735 """Adds a VM instance to VIM
1736 Params:
1737 'start': (boolean) indicates if VM must start or created in pause mode.
1738 'image_id','flavor_id': image and flavor VIM id to use for the VM
1739 'net_list': list of interfaces, each one is a dictionary with:
1740 'name': (optional) name for the interface.
1741 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1742 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1743 capabilities
1744 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1745 'mac_address': (optional) mac address to assign to this interface
1746 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1747 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1748 for tagging VF
1749 'type': (mandatory) can be one of:
1750 'virtual', in this case always connected to a network of type 'net_type=bridge'
1751 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1752 data/ptp network or it can created unconnected
1753 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1754 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1755 are allocated on the same physical NIC
1756 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1757 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1758 or True, it must apply the default VIM behaviour
1759 After execution the method will add the key:
1760 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1761 interface. 'net_list' is modified
1762 'cloud_config': (optional) dictionary with:
1763 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1764 'users': (optional) list of users to be inserted, each item is a dict with:
1765 'name': (mandatory) user name,
1766 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1767 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1768 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1769 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1770 'dest': (mandatory) string with the destination absolute path
1771 'encoding': (optional, by default text). Can be one of:
1772 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1773 'content' (mandatory): string with the content of the file
1774 'permissions': (optional) string with file permissions, typically octal notation '0644'
1775 'owner': (optional) file owner, string with the format 'owner:group'
1776 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1777 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1778 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1779 'size': (mandatory) string with the size of the disk in GB
1780 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1781 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1782 availability_zone_index is None
1783 Returns a tuple with the instance identifier and created_items or raises an exception on error
1784 created_items can be None or a dictionary where this method can include key-values that will be passed to
1785 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1786 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1787 as not present.
1788 """
1789 self.logger.info("Creating new instance for entry {}".format(name))
1790 self.logger.debug(
1791 "desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1792 "availability_zone_index {} availability_zone_list {}".format(
1793 description,
1794 start,
1795 image_id,
1796 flavor_id,
1797 net_list,
1798 cloud_config,
1799 disk_list,
1800 availability_zone_index,
1801 availability_zone_list,
1802 )
1803 )
1804
1805 # new vm name = vmname + tenant_id + uuid
1806 new_vm_name = [name, "-", str(uuid.uuid4())]
1807 vmname_andid = "".join(new_vm_name)
1808
1809 for net in net_list:
1810 if net["type"] == "PCI-PASSTHROUGH":
1811 raise vimconn.VimConnNotSupportedException(
1812 "Current vCD version does not support type : {}".format(net["type"])
1813 )
1814
1815 if len(net_list) > 10:
1816 raise vimconn.VimConnNotSupportedException(
1817 "The VM hardware versions 7 and above support upto 10 NICs only"
1818 )
1819
1820 # if vm already deployed we return existing uuid
1821 # we check for presence of VDC, Catalog entry and Flavor.
1822 org, vdc = self.get_vdc_details()
1823 if vdc is None:
1824 raise vimconn.VimConnNotFoundException(
1825 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(
1826 name
1827 )
1828 )
1829
1830 catalogs = org.list_catalogs()
1831 if catalogs is None:
1832 # Retry once, if failed by refreshing token
1833 self.get_token()
1834 org = Org(self.client, resource=self.client.get_org())
1835 catalogs = org.list_catalogs()
1836
1837 if catalogs is None:
1838 raise vimconn.VimConnNotFoundException(
1839 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(
1840 name
1841 )
1842 )
1843
1844 catalog_hash_name = self.get_catalogbyid(
1845 catalog_uuid=image_id, catalogs=catalogs
1846 )
1847 if catalog_hash_name:
1848 self.logger.info(
1849 "Found catalog entry {} for image id {}".format(
1850 catalog_hash_name, image_id
1851 )
1852 )
1853 else:
1854 raise vimconn.VimConnNotFoundException(
1855 "new_vminstance(): Failed create vApp {}: "
1856 "(Failed retrieve catalog information {})".format(name, image_id)
1857 )
1858
1859 # Set vCPU and Memory based on flavor.
1860 vm_cpus = None
1861 vm_memory = None
1862 vm_disk = None
1863 numas = None
1864
1865 if flavor_id is not None:
1866 if flavor_id not in vimconnector.flavorlist:
1867 raise vimconn.VimConnNotFoundException(
1868 "new_vminstance(): Failed create vApp {}: "
1869 "Failed retrieve flavor information "
1870 "flavor id {}".format(name, flavor_id)
1871 )
1872 else:
1873 try:
1874 flavor = vimconnector.flavorlist[flavor_id]
1875 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1876 vm_memory = flavor[FLAVOR_RAM_KEY]
1877 vm_disk = flavor[FLAVOR_DISK_KEY]
1878 extended = flavor.get("extended", None)
1879
1880 if extended:
1881 numas = extended.get("numas", None)
1882 except Exception as exp:
1883 raise vimconn.VimConnException(
1884 "Corrupted flavor. {}.Exception: {}".format(flavor_id, exp)
1885 )
1886
1887 # image upload creates template name as catalog name space Template.
1888 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1889 # power_on = 'false'
1890 # if start:
1891 # power_on = 'true'
1892
1893 # client must provide at least one entry in net_list if not we report error
1894 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1895 # If no mgmt, then the 1st NN in netlist is considered as primary net.
1896 primary_net = None
1897 primary_netname = None
1898 primary_net_href = None
1899 # network_mode = 'bridged'
1900 if net_list is not None and len(net_list) > 0:
1901 for net in net_list:
1902 if "use" in net and net["use"] == "mgmt" and not primary_net:
1903 primary_net = net
1904
1905 if primary_net is None:
1906 primary_net = net_list[0]
1907
1908 try:
1909 primary_net_id = primary_net["net_id"]
1910 url_list = [self.url, "/api/network/", primary_net_id]
1911 primary_net_href = "".join(url_list)
1912 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1913
1914 if "name" in network_dict:
1915 primary_netname = network_dict["name"]
1916 except KeyError:
1917 raise vimconn.VimConnException(
1918 "Corrupted flavor. {}".format(primary_net)
1919 )
1920 else:
1921 raise vimconn.VimConnUnexpectedResponse(
1922 "new_vminstance(): Failed network list is empty."
1923 )
1924
1925 # use: 'data', 'bridge', 'mgmt'
1926 # create vApp. Set vcpu and ram based on flavor id.
1927 try:
1928 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1929 if not vdc_obj:
1930 raise vimconn.VimConnNotFoundException(
1931 "new_vminstance(): Failed to get VDC object"
1932 )
1933
1934 for retry in (1, 2):
1935 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1936 catalog_items = [items.attrib]
1937
1938 if len(catalog_items) == 1:
1939 if self.client:
1940 headers = {
1941 "Accept": "application/*+xml;version=" + API_VERSION,
1942 "x-vcloud-authorization": self.client._session.headers[
1943 "x-vcloud-authorization"
1944 ],
1945 }
1946
1947 response = self.perform_request(
1948 req_type="GET",
1949 url=catalog_items[0].get("href"),
1950 headers=headers,
1951 )
1952 catalogItem = XmlElementTree.fromstring(response.text)
1953 entity = [
1954 child
1955 for child in catalogItem
1956 if child.get("type")
1957 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
1958 ][0]
1959 vapp_tempalte_href = entity.get("href")
1960
1961 response = self.perform_request(
1962 req_type="GET", url=vapp_tempalte_href, headers=headers
1963 )
1964
1965 if response.status_code != requests.codes.ok:
1966 self.logger.debug(
1967 "REST API call {} failed. Return status code {}".format(
1968 vapp_tempalte_href, response.status_code
1969 )
1970 )
1971 else:
1972 result = (response.text).replace("\n", " ")
1973
1974 vapp_template_tree = XmlElementTree.fromstring(response.text)
1975 children_element = [
1976 child for child in vapp_template_tree if "Children" in child.tag
1977 ][0]
1978 vm_element = [child for child in children_element if "Vm" in child.tag][
1979 0
1980 ]
1981 vm_name = vm_element.get("name")
1982 vm_id = vm_element.get("id")
1983 vm_href = vm_element.get("href")
1984
1985 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
1986 # result).group(1)
1987 memory_mb = re.search(
1988 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
1989 result,
1990 ).group(1)
1991 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
1992
1993 headers[
1994 "Content-Type"
1995 ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
1996 vdc_id = vdc.get("id").split(":")[-1]
1997 instantiate_vapp_href = (
1998 "{}/api/vdc/{}/action/instantiateVAppTemplate".format(
1999 self.url, vdc_id
2000 )
2001 )
2002
2003 with open(
2004 os.path.join(
2005 os.path.dirname(__file__), "InstantiateVAppTemplateParams.xml"
2006 ),
2007 "r",
2008 ) as f:
2009 template = f.read()
2010
2011 data = template.format(
2012 vmname_andid,
2013 primary_netname,
2014 primary_net_href,
2015 vapp_tempalte_href,
2016 vm_href,
2017 vm_id,
2018 vm_name,
2019 primary_netname,
2020 cpu=vm_cpus,
2021 core=1,
2022 memory=vm_memory,
2023 )
2024
2025 response = self.perform_request(
2026 req_type="POST",
2027 url=instantiate_vapp_href,
2028 headers=headers,
2029 data=data,
2030 )
2031
2032 if response.status_code != 201:
2033 self.logger.error(
2034 "REST call {} failed reason : {}"
2035 "status code : {}".format(
2036 instantiate_vapp_href, response.text, response.status_code
2037 )
2038 )
2039 raise vimconn.VimConnException(
2040 "new_vminstance(): Failed to create"
2041 "vAapp {}".format(vmname_andid)
2042 )
2043 else:
2044 vapptask = self.get_task_from_response(response.text)
2045
2046 if vapptask is None and retry == 1:
2047 self.get_token() # Retry getting token
2048 continue
2049 else:
2050 break
2051
2052 if vapptask is None or vapptask is False:
2053 raise vimconn.VimConnUnexpectedResponse(
2054 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2055 )
2056
2057 # wait for task to complete
2058 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
2059
2060 if result.get("status") == "success":
2061 self.logger.debug(
2062 "new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid)
2063 )
2064 else:
2065 raise vimconn.VimConnUnexpectedResponse(
2066 "new_vminstance(): failed to create vApp {}".format(vmname_andid)
2067 )
2068 except Exception as exp:
2069 raise vimconn.VimConnUnexpectedResponse(
2070 "new_vminstance(): failed to create vApp {} with Exception:{}".format(
2071 vmname_andid, exp
2072 )
2073 )
2074
2075 # we should have now vapp in undeployed state.
2076 try:
2077 vdc_obj = VDC(self.client, href=vdc.get("href"))
2078 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2079 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2080 vapp = VApp(self.client, resource=vapp_resource)
2081 except Exception as exp:
2082 raise vimconn.VimConnUnexpectedResponse(
2083 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2084 vmname_andid, exp
2085 )
2086 )
2087
2088 if vapp_uuid is None:
2089 raise vimconn.VimConnUnexpectedResponse(
2090 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
2091 vmname_andid
2092 )
2093 )
2094
2095 # Add PCI passthrough/SRIOV configrations
2096 pci_devices_info = []
2097 reserve_memory = False
2098
2099 for net in net_list:
2100 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
2101 pci_devices_info.append(net)
2102 elif (
2103 net["type"] == "VF"
2104 or net["type"] == "SR-IOV"
2105 or net["type"] == "VFnotShared"
2106 ) and "net_id" in net:
2107 reserve_memory = True
2108
2109 # Add PCI
2110 if len(pci_devices_info) > 0:
2111 self.logger.info(
2112 "Need to add PCI devices {} into VM {}".format(
2113 pci_devices_info, vmname_andid
2114 )
2115 )
2116 PCI_devices_status, _, _ = self.add_pci_devices(
2117 vapp_uuid, pci_devices_info, vmname_andid
2118 )
2119
2120 if PCI_devices_status:
2121 self.logger.info(
2122 "Added PCI devives {} to VM {}".format(
2123 pci_devices_info, vmname_andid
2124 )
2125 )
2126 reserve_memory = True
2127 else:
2128 self.logger.info(
2129 "Fail to add PCI devives {} to VM {}".format(
2130 pci_devices_info, vmname_andid
2131 )
2132 )
2133
2134 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
2135 self.add_serial_device(vapp_uuid)
2136
2137 if vm_disk:
2138 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
2139 result = self.modify_vm_disk(vapp_uuid, vm_disk)
2140 if result:
2141 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
2142
2143 # Add new or existing disks to vApp
2144 if disk_list:
2145 added_existing_disk = False
2146 for disk in disk_list:
2147 if "device_type" in disk and disk["device_type"] == "cdrom":
2148 image_id = disk["image_id"]
2149 # Adding CD-ROM to VM
2150 # will revisit code once specification ready to support this feature
2151 self.insert_media_to_vm(vapp, image_id)
2152 elif "image_id" in disk and disk["image_id"] is not None:
2153 self.logger.debug(
2154 "Adding existing disk from image {} to vm {} ".format(
2155 disk["image_id"], vapp_uuid
2156 )
2157 )
2158 self.add_existing_disk(
2159 catalogs=catalogs,
2160 image_id=disk["image_id"],
2161 size=disk["size"],
2162 template_name=templateName,
2163 vapp_uuid=vapp_uuid,
2164 )
2165 added_existing_disk = True
2166 else:
2167 # Wait till added existing disk gets reflected into vCD database/API
2168 if added_existing_disk:
2169 time.sleep(5)
2170 added_existing_disk = False
2171 self.add_new_disk(vapp_uuid, disk["size"])
2172
2173 if numas:
2174 # Assigning numa affinity setting
2175 for numa in numas:
2176 if "paired-threads-id" in numa:
2177 paired_threads_id = numa["paired-threads-id"]
2178 self.set_numa_affinity(vapp_uuid, paired_threads_id)
2179
2180 # add NICs & connect to networks in netlist
2181 try:
2182 vdc_obj = VDC(self.client, href=vdc.get("href"))
2183 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2184 vapp = VApp(self.client, resource=vapp_resource)
2185 vapp_id = vapp_resource.get("id").split(":")[-1]
2186
2187 self.logger.info("Removing primary NIC: ")
2188 # First remove all NICs so that NIC properties can be adjusted as needed
2189 self.remove_primary_network_adapter_from_all_vms(vapp)
2190
2191 self.logger.info("Request to connect VM to a network: {}".format(net_list))
2192 primary_nic_index = 0
2193 nicIndex = 0
2194 for net in net_list:
2195 # openmano uses network id in UUID format.
2196 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
2197 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
2198 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
2199
2200 if "net_id" not in net:
2201 continue
2202
2203 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
2204 # Same will be returned in refresh_vms_status() as vim_interface_id
2205 net["vim_id"] = net[
2206 "net_id"
2207 ] # Provide the same VIM identifier as the VIM network
2208
2209 interface_net_id = net["net_id"]
2210 interface_net_name = self.get_network_name_by_id(
2211 network_uuid=interface_net_id
2212 )
2213 interface_network_mode = net["use"]
2214
2215 if interface_network_mode == "mgmt":
2216 primary_nic_index = nicIndex
2217
2218 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
2219 - DHCP (The IP address is obtained from a DHCP service.)
2220 - MANUAL (The IP address is assigned manually in the IpAddress element.)
2221 - NONE (No IP addressing mode specified.)"""
2222
2223 if primary_netname is not None:
2224 self.logger.debug(
2225 "new_vminstance(): Filtering by net name {}".format(
2226 interface_net_name
2227 )
2228 )
2229 nets = [
2230 n
2231 for n in self.get_network_list()
2232 if n.get("name") == interface_net_name
2233 ]
2234
2235 if len(nets) == 1:
2236 self.logger.info(
2237 "new_vminstance(): Found requested network: {}".format(
2238 nets[0].get("name")
2239 )
2240 )
2241
2242 if interface_net_name != primary_netname:
2243 # connect network to VM - with all DHCP by default
2244 self.logger.info(
2245 "new_vminstance(): Attaching net {} to vapp".format(
2246 interface_net_name
2247 )
2248 )
2249 self.connect_vapp_to_org_vdc_network(
2250 vapp_id, nets[0].get("name")
2251 )
2252
2253 type_list = ("PF", "PCI-PASSTHROUGH", "VFnotShared")
2254 nic_type = "VMXNET3"
2255 if "type" in net and net["type"] not in type_list:
2256 # fetching nic type from vnf
2257 if "model" in net:
2258 if net["model"] is not None:
2259 if (
2260 net["model"].lower() == "paravirt"
2261 or net["model"].lower() == "virtio"
2262 ):
2263 nic_type = "VMXNET3"
2264 else:
2265 nic_type = net["model"]
2266
2267 self.logger.info(
2268 "new_vminstance(): adding network adapter "
2269 "to a network {}".format(nets[0].get("name"))
2270 )
2271 self.add_network_adapter_to_vms(
2272 vapp,
2273 nets[0].get("name"),
2274 primary_nic_index,
2275 nicIndex,
2276 net,
2277 nic_type=nic_type,
2278 )
2279 else:
2280 self.logger.info(
2281 "new_vminstance(): adding network adapter "
2282 "to a network {}".format(nets[0].get("name"))
2283 )
2284
2285 if net["type"] in ["SR-IOV", "VF"]:
2286 nic_type = net["type"]
2287 self.add_network_adapter_to_vms(
2288 vapp,
2289 nets[0].get("name"),
2290 primary_nic_index,
2291 nicIndex,
2292 net,
2293 nic_type=nic_type,
2294 )
2295 nicIndex += 1
2296
2297 # cloud-init for ssh-key injection
2298 if cloud_config:
2299 # Create a catalog which will be carrying the config drive ISO
2300 # This catalog is deleted during vApp deletion. The catalog name carries
2301 # vApp UUID and thats how it gets identified during its deletion.
2302 config_drive_catalog_name = "cfg_drv-" + vapp_uuid
2303 self.logger.info(
2304 'new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
2305 config_drive_catalog_name
2306 )
2307 )
2308 config_drive_catalog_id = self.create_vimcatalog(
2309 org, config_drive_catalog_name
2310 )
2311
2312 if config_drive_catalog_id is None:
2313 error_msg = (
2314 "new_vminstance(): Failed to create new catalog '{}' to carry the config drive "
2315 "ISO".format(config_drive_catalog_name)
2316 )
2317 raise Exception(error_msg)
2318
2319 # Create config-drive ISO
2320 _, userdata = self._create_user_data(cloud_config)
2321 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
2322 iso_path = self.create_config_drive_iso(userdata)
2323 self.logger.debug(
2324 "new_vminstance(): The ISO is successfully created. Path: {}".format(
2325 iso_path
2326 )
2327 )
2328
2329 self.logger.info(
2330 "new_vminstance(): uploading iso to catalog {}".format(
2331 config_drive_catalog_name
2332 )
2333 )
2334 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
2335 # Attach the config-drive ISO to the VM
2336 self.logger.info(
2337 "new_vminstance(): Attaching the config-drive ISO to the VM"
2338 )
2339 self.insert_media_to_vm(vapp, config_drive_catalog_id)
2340 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
2341
2342 # If VM has PCI devices or SRIOV reserve memory for VM
2343 if reserve_memory:
2344 self.reserve_memory_for_all_vms(vapp, memory_mb)
2345
2346 self.logger.debug(
2347 "new_vminstance(): starting power on vApp {} ".format(vmname_andid)
2348 )
2349
2350 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
2351 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
2352 if result.get("status") == "success":
2353 self.logger.info(
2354 "new_vminstance(): Successfully power on "
2355 "vApp {}".format(vmname_andid)
2356 )
2357 else:
2358 self.logger.error(
2359 "new_vminstance(): failed to power on vApp "
2360 "{}".format(vmname_andid)
2361 )
2362
2363 except Exception as exp:
2364 try:
2365 self.delete_vminstance(vapp_uuid)
2366 except Exception as exp2:
2367 self.logger.error("new_vminstance rollback fail {}".format(exp2))
2368 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
2369 self.logger.error(
2370 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2371 name, exp
2372 )
2373 )
2374 raise vimconn.VimConnException(
2375 "new_vminstance(): Failed create new vm instance {} with exception {}".format(
2376 name, exp
2377 )
2378 )
2379 # check if vApp deployed and if that the case return vApp UUID otherwise -1
2380 wait_time = 0
2381 vapp_uuid = None
2382 while wait_time <= MAX_WAIT_TIME:
2383 try:
2384 vapp_resource = vdc_obj.get_vapp(vmname_andid)
2385 vapp = VApp(self.client, resource=vapp_resource)
2386 except Exception as exp:
2387 raise vimconn.VimConnUnexpectedResponse(
2388 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}".format(
2389 vmname_andid, exp
2390 )
2391 )
2392
2393 # if vapp and vapp.me.deployed:
2394 if vapp and vapp_resource.get("deployed") == "true":
2395 vapp_uuid = vapp_resource.get("id").split(":")[-1]
2396 break
2397 else:
2398 self.logger.debug(
2399 "new_vminstance(): Wait for vApp {} to deploy".format(name)
2400 )
2401 time.sleep(INTERVAL_TIME)
2402
2403 wait_time += INTERVAL_TIME
2404
2405 # SET Affinity Rule for VM
2406 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
2407 # While creating VIM account user has to pass the Host Group names in availability_zone list
2408 # "availability_zone" is a part of VIM "config" parameters
2409 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
2410 # Host groups are referred as availability zones
2411 # With following procedure, deployed VM will be added into a VM group.
2412 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
2413 if availability_zone_list:
2414 self.logger.debug(
2415 "Existing Host Groups in VIM {}".format(
2416 self.config.get("availability_zone")
2417 )
2418 )
2419 # Admin access required for creating Affinity rules
2420 client = self.connect_as_admin()
2421
2422 if not client:
2423 raise vimconn.VimConnConnectionException(
2424 "Failed to connect vCD as admin"
2425 )
2426 else:
2427 self.client = client
2428
2429 if self.client:
2430 headers = {
2431 "Accept": "application/*+xml;version=27.0",
2432 "x-vcloud-authorization": self.client._session.headers[
2433 "x-vcloud-authorization"
2434 ],
2435 }
2436
2437 # Step1: Get provider vdc details from organization
2438 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
2439 if pvdc_href is not None:
2440 # Step2: Found required pvdc, now get resource pool information
2441 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2442 if respool_href is None:
2443 # Raise error if respool_href not found
2444 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(
2445 pvdc_href
2446 )
2447 self.log_message(msg)
2448
2449 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2450 # get availability Zone
2451 vm_az = self.get_vm_availability_zone(
2452 availability_zone_index, availability_zone_list
2453 )
2454
2455 # check if provided av zone(hostGroup) is present in vCD VIM
2456 status = self.check_availibility_zone(vm_az, respool_href, headers)
2457 if status is False:
2458 msg = (
2459 "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2460 "resource pool {} status: {}"
2461 ).format(vm_az, respool_href, status)
2462 self.log_message(msg)
2463 else:
2464 self.logger.debug(
2465 "new_vminstance(): Availability zone {} found in VIM".format(vm_az)
2466 )
2467
2468 # Step4: Find VM group references to create vm group
2469 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2470 if vmgrp_href is None:
2471 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2472 self.log_message(msg)
2473
2474 # Step5: Create a VmGroup with name az_VmGroup
2475 vmgrp_name = (
2476 vm_az + "_" + name
2477 ) # Formed VM Group name = Host Group name + VM name
2478 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2479 if status is not True:
2480 msg = "new_vminstance(): Error in creating VM group {}".format(
2481 vmgrp_name
2482 )
2483 self.log_message(msg)
2484
2485 # VM Group url to add vms to vm group
2486 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2487
2488 # Step6: Add VM to VM Group
2489 # Find VM uuid from vapp_uuid
2490 vm_details = self.get_vapp_details_rest(vapp_uuid)
2491 vm_uuid = vm_details["vmuuid"]
2492
2493 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2494 if status is not True:
2495 msg = "new_vminstance(): Error in adding VM to VM group {}".format(
2496 vmgrp_name
2497 )
2498 self.log_message(msg)
2499
2500 # Step7: Create VM to Host affinity rule
2501 addrule_href = self.get_add_rule_reference(respool_href, headers)
2502 if addrule_href is None:
2503 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}".format(
2504 respool_href
2505 )
2506 self.log_message(msg)
2507
2508 status = self.create_vm_to_host_affinity_rule(
2509 addrule_href, vmgrp_name, vm_az, "Affinity", headers
2510 )
2511 if status is False:
2512 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}".format(
2513 name, vm_az
2514 )
2515 self.log_message(msg)
2516 else:
2517 self.logger.debug(
2518 "new_vminstance(): Affinity rule created successfully. Added {} in Host group {}".format(
2519 name, vm_az
2520 )
2521 )
2522 # Reset token to a normal user to perform other operations
2523 self.get_token()
2524
2525 if vapp_uuid is not None:
2526 return vapp_uuid, None
2527 else:
2528 raise vimconn.VimConnUnexpectedResponse(
2529 "new_vminstance(): Failed create new vm instance {}".format(name)
2530 )
2531
2532 def create_config_drive_iso(self, user_data):
2533 tmpdir = tempfile.mkdtemp()
2534 iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
2535 latest_dir = os.path.join(tmpdir, "openstack", "latest")
2536 os.makedirs(latest_dir)
2537 with open(
2538 os.path.join(latest_dir, "meta_data.json"), "w"
2539 ) as meta_file_obj, open(
2540 os.path.join(latest_dir, "user_data"), "w"
2541 ) as userdata_file_obj:
2542 userdata_file_obj.write(user_data)
2543 meta_file_obj.write(
2544 json.dumps(
2545 {
2546 "availability_zone": "nova",
2547 "launch_index": 0,
2548 "name": "ConfigDrive",
2549 "uuid": str(uuid.uuid4()),
2550 }
2551 )
2552 )
2553 genisoimage_cmd = (
2554 "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
2555 iso_path=iso_path, source_dir_path=tmpdir
2556 )
2557 )
2558 self.logger.info(
2559 'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
2560 genisoimage_cmd
2561 )
2562 )
2563
2564 try:
2565 FNULL = open(os.devnull, "w")
2566 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2567 except subprocess.CalledProcessError as e:
2568 shutil.rmtree(tmpdir, ignore_errors=True)
2569 error_msg = "create_config_drive_iso(): Exception while running genisoimage command: {}".format(
2570 e
2571 )
2572 self.logger.error(error_msg)
2573 raise Exception(error_msg)
2574
2575 return iso_path
2576
2577 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2578 if not os.path.isfile(iso_file_path):
2579 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(
2580 iso_file_path
2581 )
2582 self.logger.error(error_msg)
2583 raise Exception(error_msg)
2584
2585 iso_file_stat = os.stat(iso_file_path)
2586 xml_media_elem = """<?xml version="1.0" encoding="UTF-8"?>
2587 <Media
2588 xmlns="http://www.vmware.com/vcloud/v1.5"
2589 name="{iso_name}"
2590 size="{iso_size}"
2591 imageType="iso">
2592 <Description>ISO image for config-drive</Description>
2593 </Media>""".format(
2594 iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size
2595 )
2596 headers = {
2597 "Accept": "application/*+xml;version=" + API_VERSION,
2598 "x-vcloud-authorization": self.client._session.headers[
2599 "x-vcloud-authorization"
2600 ],
2601 }
2602 headers["Content-Type"] = "application/vnd.vmware.vcloud.media+xml"
2603 catalog_href = self.url + "/api/catalog/" + catalog_id + "/action/upload"
2604 response = self.perform_request(
2605 req_type="POST", url=catalog_href, headers=headers, data=xml_media_elem
2606 )
2607
2608 if response.status_code != 201:
2609 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(
2610 catalog_href
2611 )
2612 self.logger.error(error_msg)
2613 raise Exception(error_msg)
2614
2615 catalogItem = XmlElementTree.fromstring(response.text)
2616 entity = [
2617 child
2618 for child in catalogItem
2619 if child.get("type") == "application/vnd.vmware.vcloud.media+xml"
2620 ][0]
2621 entity_href = entity.get("href")
2622
2623 response = self.perform_request(
2624 req_type="GET", url=entity_href, headers=headers
2625 )
2626 if response.status_code != 200:
2627 raise Exception(
2628 "upload_iso_to_catalog(): Failed to GET entity href {}".format(
2629 entity_href
2630 )
2631 )
2632
2633 match = re.search(
2634 r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>',
2635 response.text,
2636 re.DOTALL,
2637 )
2638 if match:
2639 media_upload_href = match.group(1)
2640 else:
2641 raise Exception(
2642 "Could not parse the upload URL for the media file from the last response"
2643 )
2644 upload_iso_task = self.get_task_from_response(response.text)
2645 headers["Content-Type"] = "application/octet-stream"
2646 response = self.perform_request(
2647 req_type="PUT",
2648 url=media_upload_href,
2649 headers=headers,
2650 data=open(iso_file_path, "rb"),
2651 )
2652
2653 if response.status_code != 200:
2654 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2655
2656 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2657 if result.get("status") != "success":
2658 raise Exception(
2659 "The upload iso task failed with status {}".format(result.get("status"))
2660 )
2661
2662 def set_availability_zones(self):
2663 """
2664 Set vim availability zone
2665 """
2666 vim_availability_zones = None
2667 availability_zone = None
2668
2669 if "availability_zone" in self.config:
2670 vim_availability_zones = self.config.get("availability_zone")
2671
2672 if isinstance(vim_availability_zones, str):
2673 availability_zone = [vim_availability_zones]
2674 elif isinstance(vim_availability_zones, list):
2675 availability_zone = vim_availability_zones
2676 else:
2677 return availability_zone
2678
2679 return availability_zone
2680
2681 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2682 """
2683 Return the availability zone to be used by the created VM.
2684 returns: The VIM availability zone to be used or None
2685 """
2686 if availability_zone_index is None:
2687 if not self.config.get("availability_zone"):
2688 return None
2689 elif isinstance(self.config.get("availability_zone"), str):
2690 return self.config["availability_zone"]
2691 else:
2692 return self.config["availability_zone"][0]
2693
2694 vim_availability_zones = self.availability_zone
2695
2696 # check if VIM offer enough availability zones describe in the VNFD
2697 if vim_availability_zones and len(availability_zone_list) <= len(
2698 vim_availability_zones
2699 ):
2700 # check if all the names of NFV AV match VIM AV names
2701 match_by_index = False
2702 for av in availability_zone_list:
2703 if av not in vim_availability_zones:
2704 match_by_index = True
2705 break
2706
2707 if match_by_index:
2708 self.logger.debug(
2709 "Required Availability zone or Host Group not found in VIM config"
2710 )
2711 self.logger.debug(
2712 "Input Availability zone list: {}".format(availability_zone_list)
2713 )
2714 self.logger.debug(
2715 "VIM configured Availability zones: {}".format(
2716 vim_availability_zones
2717 )
2718 )
2719 self.logger.debug("VIM Availability zones will be used by index")
2720 return vim_availability_zones[availability_zone_index]
2721 else:
2722 return availability_zone_list[availability_zone_index]
2723 else:
2724 raise vimconn.VimConnConflictException(
2725 "No enough availability zones at VIM for this deployment"
2726 )
2727
2728 def create_vm_to_host_affinity_rule(
2729 self, addrule_href, vmgrpname, hostgrpname, polarity, headers
2730 ):
2731 """Method to create VM to Host Affinity rule in vCD
2732
2733 Args:
2734 addrule_href - href to make a POST request
2735 vmgrpname - name of the VM group created
2736 hostgrpnmae - name of the host group created earlier
2737 polarity - Affinity or Anti-affinity (default: Affinity)
2738 headers - headers to make REST call
2739
2740 Returns:
2741 True- if rule is created
2742 False- Failed to create rule due to some error
2743
2744 """
2745 task_status = False
2746 rule_name = polarity + "_" + vmgrpname
2747 payload = """<?xml version="1.0" encoding="UTF-8"?>
2748 <vmext:VMWVmHostAffinityRule
2749 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2750 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2751 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2752 <vcloud:Name>{}</vcloud:Name>
2753 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2754 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2755 <vcloud:Polarity>{}</vcloud:Polarity>
2756 <vmext:HostGroupName>{}</vmext:HostGroupName>
2757 <vmext:VmGroupName>{}</vmext:VmGroupName>
2758 </vmext:VMWVmHostAffinityRule>""".format(
2759 rule_name, polarity, hostgrpname, vmgrpname
2760 )
2761
2762 resp = self.perform_request(
2763 req_type="POST", url=addrule_href, headers=headers, data=payload
2764 )
2765
2766 if resp.status_code != requests.codes.accepted:
2767 self.logger.debug(
2768 "REST API call {} failed. Return status code {}".format(
2769 addrule_href, resp.status_code
2770 )
2771 )
2772 task_status = False
2773
2774 return task_status
2775 else:
2776 affinity_task = self.get_task_from_response(resp.content)
2777 self.logger.debug("affinity_task: {}".format(affinity_task))
2778
2779 if affinity_task is None or affinity_task is False:
2780 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
2781 # wait for task to complete
2782 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2783
2784 if result.get("status") == "success":
2785 self.logger.debug(
2786 "Successfully created affinity rule {}".format(rule_name)
2787 )
2788 return True
2789 else:
2790 raise vimconn.VimConnUnexpectedResponse(
2791 "failed to create affinity rule {}".format(rule_name)
2792 )
2793
2794 def get_add_rule_reference(self, respool_href, headers):
2795 """This method finds href to add vm to host affinity rule to vCD
2796
2797 Args:
2798 respool_href- href to resource pool
2799 headers- header information to make REST call
2800
2801 Returns:
2802 None - if no valid href to add rule found or
2803 addrule_href - href to add vm to host affinity rule of resource pool
2804 """
2805 addrule_href = None
2806 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
2807
2808 if resp.status_code != requests.codes.ok:
2809 self.logger.debug(
2810 "REST API call {} failed. Return status code {}".format(
2811 respool_href, resp.status_code
2812 )
2813 )
2814 else:
2815 resp_xml = XmlElementTree.fromstring(resp.content)
2816 for child in resp_xml:
2817 if "VMWProviderVdcResourcePool" in child.tag:
2818 for schild in child:
2819 if "Link" in schild.tag:
2820 if (
2821 schild.attrib.get("type")
2822 == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml"
2823 and schild.attrib.get("rel") == "add"
2824 ):
2825 addrule_href = schild.attrib.get("href")
2826 break
2827
2828 return addrule_href
2829
2830 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2831 """Method to add deployed VM to newly created VM Group.
2832 This is required to create VM to Host affinity in vCD
2833
2834 Args:
2835 vm_uuid- newly created vm uuid
2836 vmGroupNameURL- URL to VM Group name
2837 vmGroup_name- Name of VM group created
2838 headers- Headers for REST request
2839
2840 Returns:
2841 True- if VM added to VM group successfully
2842 False- if any error encounter
2843 """
2844 addvm_resp = self.perform_request(
2845 req_type="GET", url=vmGroupNameURL, headers=headers
2846 ) # , data=payload)
2847
2848 if addvm_resp.status_code != requests.codes.ok:
2849 self.logger.debug(
2850 "REST API call to get VM Group Name url {} failed. Return status code {}".format(
2851 vmGroupNameURL, addvm_resp.status_code
2852 )
2853 )
2854 return False
2855 else:
2856 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2857 for child in resp_xml:
2858 if child.tag.split("}")[1] == "Link":
2859 if child.attrib.get("rel") == "addVms":
2860 addvmtogrpURL = child.attrib.get("href")
2861
2862 # Get vm details
2863 url_list = [self.url, "/api/vApp/vm-", vm_uuid]
2864 vmdetailsURL = "".join(url_list)
2865
2866 resp = self.perform_request(req_type="GET", url=vmdetailsURL, headers=headers)
2867
2868 if resp.status_code != requests.codes.ok:
2869 self.logger.debug(
2870 "REST API call {} failed. Return status code {}".format(
2871 vmdetailsURL, resp.status_code
2872 )
2873 )
2874 return False
2875
2876 # Parse VM details
2877 resp_xml = XmlElementTree.fromstring(resp.content)
2878 if resp_xml.tag.split("}")[1] == "Vm":
2879 vm_id = resp_xml.attrib.get("id")
2880 vm_name = resp_xml.attrib.get("name")
2881 vm_href = resp_xml.attrib.get("href")
2882 # print vm_id, vm_name, vm_href
2883
2884 # Add VM into VMgroup
2885 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2886 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2887 xmlns="http://www.vmware.com/vcloud/versions" \
2888 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2889 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2890 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2891 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2892 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2893 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2894 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2895 <ns2:VmReference href="{}" id="{}" name="{}" \
2896 type="application/vnd.vmware.vcloud.vm+xml" />\
2897 </ns2:Vms>""".format(
2898 vm_href, vm_id, vm_name
2899 )
2900
2901 addvmtogrp_resp = self.perform_request(
2902 req_type="POST", url=addvmtogrpURL, headers=headers, data=payload
2903 )
2904
2905 if addvmtogrp_resp.status_code != requests.codes.accepted:
2906 self.logger.debug(
2907 "REST API call {} failed. Return status code {}".format(
2908 addvmtogrpURL, addvmtogrp_resp.status_code
2909 )
2910 )
2911
2912 return False
2913 else:
2914 self.logger.debug(
2915 "Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name)
2916 )
2917
2918 return True
2919
2920 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2921 """Method to create a VM group in vCD
2922
2923 Args:
2924 vmgroup_name : Name of VM group to be created
2925 vmgroup_href : href for vmgroup
2926 headers- Headers for REST request
2927 """
2928 # POST to add URL with required data
2929 vmgroup_status = False
2930 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2931 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2932 <vmCount>1</vmCount>\
2933 </VMWVmGroup>""".format(
2934 vmgroup_name
2935 )
2936 resp = self.perform_request(
2937 req_type="POST", url=vmgroup_href, headers=headers, data=payload
2938 )
2939
2940 if resp.status_code != requests.codes.accepted:
2941 self.logger.debug(
2942 "REST API call {} failed. Return status code {}".format(
2943 vmgroup_href, resp.status_code
2944 )
2945 )
2946
2947 return vmgroup_status
2948 else:
2949 vmgroup_task = self.get_task_from_response(resp.content)
2950 if vmgroup_task is None or vmgroup_task is False:
2951 raise vimconn.VimConnUnexpectedResponse(
2952 "create_vmgroup(): failed to create VM group {}".format(
2953 vmgroup_name
2954 )
2955 )
2956
2957 # wait for task to complete
2958 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2959
2960 if result.get("status") == "success":
2961 self.logger.debug(
2962 "create_vmgroup(): Successfully created VM group {}".format(
2963 vmgroup_name
2964 )
2965 )
2966 # time.sleep(10)
2967 vmgroup_status = True
2968
2969 return vmgroup_status
2970 else:
2971 raise vimconn.VimConnUnexpectedResponse(
2972 "create_vmgroup(): failed to create VM group {}".format(
2973 vmgroup_name
2974 )
2975 )
2976
2977 def find_vmgroup_reference(self, url, headers):
2978 """Method to create a new VMGroup which is required to add created VM
2979 Args:
2980 url- resource pool href
2981 headers- header information
2982
2983 Returns:
2984 returns href to VM group to create VM group
2985 """
2986 # Perform GET on resource pool to find 'add' link to create VMGroup
2987 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2988 vmgrp_href = None
2989 resp = self.perform_request(req_type="GET", url=url, headers=headers)
2990
2991 if resp.status_code != requests.codes.ok:
2992 self.logger.debug(
2993 "REST API call {} failed. Return status code {}".format(
2994 url, resp.status_code
2995 )
2996 )
2997 else:
2998 # Get the href to add vmGroup to vCD
2999 resp_xml = XmlElementTree.fromstring(resp.content)
3000 for child in resp_xml:
3001 if "VMWProviderVdcResourcePool" in child.tag:
3002 for schild in child:
3003 if "Link" in schild.tag:
3004 # Find href with type VMGroup and rel with add
3005 if (
3006 schild.attrib.get("type")
3007 == "application/vnd.vmware.admin.vmwVmGroupType+xml"
3008 and schild.attrib.get("rel") == "add"
3009 ):
3010 vmgrp_href = schild.attrib.get("href")
3011
3012 return vmgrp_href
3013
3014 def check_availibility_zone(self, az, respool_href, headers):
3015 """Method to verify requested av zone is present or not in provided
3016 resource pool
3017
3018 Args:
3019 az - name of hostgroup (availibility_zone)
3020 respool_href - Resource Pool href
3021 headers - Headers to make REST call
3022 Returns:
3023 az_found - True if availibility_zone is found else False
3024 """
3025 az_found = False
3026 headers["Accept"] = "application/*+xml;version=27.0"
3027 resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
3028
3029 if resp.status_code != requests.codes.ok:
3030 self.logger.debug(
3031 "REST API call {} failed. Return status code {}".format(
3032 respool_href, resp.status_code
3033 )
3034 )
3035 else:
3036 # Get the href to hostGroups and find provided hostGroup is present in it
3037 resp_xml = XmlElementTree.fromstring(resp.content)
3038
3039 for child in resp_xml:
3040 if "VMWProviderVdcResourcePool" in child.tag:
3041 for schild in child:
3042 if "Link" in schild.tag:
3043 if (
3044 schild.attrib.get("type")
3045 == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
3046 ):
3047 hostGroup_href = schild.attrib.get("href")
3048 hg_resp = self.perform_request(
3049 req_type="GET", url=hostGroup_href, headers=headers
3050 )
3051
3052 if hg_resp.status_code != requests.codes.ok:
3053 self.logger.debug(
3054 "REST API call {} failed. Return status code {}".format(
3055 hostGroup_href, hg_resp.status_code
3056 )
3057 )
3058 else:
3059 hg_resp_xml = XmlElementTree.fromstring(
3060 hg_resp.content
3061 )
3062 for hostGroup in hg_resp_xml:
3063 if "HostGroup" in hostGroup.tag:
3064 if hostGroup.attrib.get("name") == az:
3065 az_found = True
3066 break
3067
3068 return az_found
3069
3070 def get_pvdc_for_org(self, org_vdc, headers):
3071 """This method gets provider vdc references from organisation
3072
3073 Args:
3074 org_vdc - name of the organisation VDC to find pvdc
3075 headers - headers to make REST call
3076
3077 Returns:
3078 None - if no pvdc href found else
3079 pvdc_href - href to pvdc
3080 """
3081 # Get provider VDC references from vCD
3082 pvdc_href = None
3083 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
3084 url_list = [self.url, "/api/admin/extension/providerVdcReferences"]
3085 url = "".join(url_list)
3086
3087 response = self.perform_request(req_type="GET", url=url, headers=headers)
3088 if response.status_code != requests.codes.ok:
3089 self.logger.debug(
3090 "REST API call {} failed. Return status code {}".format(
3091 url, response.status_code
3092 )
3093 )
3094 else:
3095 xmlroot_response = XmlElementTree.fromstring(response.text)
3096 for child in xmlroot_response:
3097 if "ProviderVdcReference" in child.tag:
3098 pvdc_href = child.attrib.get("href")
3099 # Get vdcReferences to find org
3100 pvdc_resp = self.perform_request(
3101 req_type="GET", url=pvdc_href, headers=headers
3102 )
3103
3104 if pvdc_resp.status_code != requests.codes.ok:
3105 raise vimconn.VimConnException(
3106 "REST API call {} failed. "
3107 "Return status code {}".format(url, pvdc_resp.status_code)
3108 )
3109
3110 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
3111 for child in pvdc_resp_xml:
3112 if "Link" in child.tag:
3113 if (
3114 child.attrib.get("type")
3115 == "application/vnd.vmware.admin.vdcReferences+xml"
3116 ):
3117 vdc_href = child.attrib.get("href")
3118
3119 # Check if provided org is present in vdc
3120 vdc_resp = self.perform_request(
3121 req_type="GET", url=vdc_href, headers=headers
3122 )
3123
3124 if vdc_resp.status_code != requests.codes.ok:
3125 raise vimconn.VimConnException(
3126 "REST API call {} failed. "
3127 "Return status code {}".format(
3128 url, vdc_resp.status_code
3129 )
3130 )
3131 vdc_resp_xml = XmlElementTree.fromstring(
3132 vdc_resp.content
3133 )
3134
3135 for child in vdc_resp_xml:
3136 if "VdcReference" in child.tag:
3137 if child.attrib.get("name") == org_vdc:
3138 return pvdc_href
3139
3140 def get_resource_pool_details(self, pvdc_href, headers):
3141 """Method to get resource pool information.
3142 Host groups are property of resource group.
3143 To get host groups, we need to GET details of resource pool.
3144
3145 Args:
3146 pvdc_href: href to pvdc details
3147 headers: headers
3148
3149 Returns:
3150 respool_href - Returns href link reference to resource pool
3151 """
3152 respool_href = None
3153 resp = self.perform_request(req_type="GET", url=pvdc_href, headers=headers)
3154
3155 if resp.status_code != requests.codes.ok:
3156 self.logger.debug(
3157 "REST API call {} failed. Return status code {}".format(
3158 pvdc_href, resp.status_code
3159 )
3160 )
3161 else:
3162 respool_resp_xml = XmlElementTree.fromstring(resp.content)
3163 for child in respool_resp_xml:
3164 if "Link" in child.tag:
3165 if (
3166 child.attrib.get("type")
3167 == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml"
3168 ):
3169 respool_href = child.attrib.get("href")
3170 break
3171
3172 return respool_href
3173
3174 def log_message(self, msg):
3175 """
3176 Method to log error messages related to Affinity rule creation
3177 in new_vminstance & raise Exception
3178 Args :
3179 msg - Error message to be logged
3180
3181 """
3182 # get token to connect vCD as a normal user
3183 self.get_token()
3184 self.logger.debug(msg)
3185
3186 raise vimconn.VimConnException(msg)
3187
3188 def get_vminstance(self, vim_vm_uuid=None):
3189 """Returns the VM instance information from VIM"""
3190 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
3191
3192 _, vdc = self.get_vdc_details()
3193 if vdc is None:
3194 raise vimconn.VimConnConnectionException(
3195 "Failed to get a reference of VDC for a tenant {}".format(
3196 self.tenant_name
3197 )
3198 )
3199
3200 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
3201 if not vm_info_dict:
3202 self.logger.debug(
3203 "get_vminstance(): Failed to get vApp name by UUID {}".format(
3204 vim_vm_uuid
3205 )
3206 )
3207 raise vimconn.VimConnNotFoundException(
3208 "Failed to get vApp name by UUID {}".format(vim_vm_uuid)
3209 )
3210
3211 status_key = vm_info_dict["status"]
3212 error = ""
3213 try:
3214 vm_dict = {
3215 "created": vm_info_dict["created"],
3216 "description": vm_info_dict["name"],
3217 "status": vcdStatusCode2manoFormat[int(status_key)],
3218 "hostId": vm_info_dict["vmuuid"],
3219 "error_msg": error,
3220 "vim_info": yaml.safe_dump(vm_info_dict),
3221 "interfaces": [],
3222 }
3223
3224 if "interfaces" in vm_info_dict:
3225 vm_dict["interfaces"] = vm_info_dict["interfaces"]
3226 else:
3227 vm_dict["interfaces"] = []
3228 except KeyError:
3229 vm_dict = {
3230 "created": "",
3231 "description": "",
3232 "status": vcdStatusCode2manoFormat[int(-1)],
3233 "hostId": vm_info_dict["vmuuid"],
3234 "error_msg": "Inconsistency state",
3235 "vim_info": yaml.safe_dump(vm_info_dict),
3236 "interfaces": [],
3237 }
3238
3239 return vm_dict
3240
3241 def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
3242 """Method poweroff and remove VM instance from vcloud director network.
3243
3244 Args:
3245 vm_id: VM UUID
3246
3247 Returns:
3248 Returns the instance identifier
3249 """
3250 self.logger.debug("Client requesting delete vm instance {} ".format(vm_id))
3251
3252 _, vdc = self.get_vdc_details()
3253 vdc_obj = VDC(self.client, href=vdc.get("href"))
3254 if vdc_obj is None:
3255 self.logger.debug(
3256 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3257 self.tenant_name
3258 )
3259 )
3260 raise vimconn.VimConnException(
3261 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
3262 self.tenant_name
3263 )
3264 )
3265
3266 try:
3267 vapp_name = self.get_namebyvappid(vm_id)
3268 if vapp_name is None:
3269 self.logger.debug(
3270 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3271 vm_id
3272 )
3273 )
3274
3275 return (
3276 -1,
3277 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3278 vm_id
3279 ),
3280 )
3281
3282 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm_id))
3283 vapp_resource = vdc_obj.get_vapp(vapp_name)
3284 vapp = VApp(self.client, resource=vapp_resource)
3285
3286 # Delete vApp and wait for status change if task executed and vApp is None.
3287 if vapp:
3288 if vapp_resource.get("deployed") == "true":
3289 self.logger.info("Powering off vApp {}".format(vapp_name))
3290 # Power off vApp
3291 powered_off = False
3292 wait_time = 0
3293
3294 while wait_time <= MAX_WAIT_TIME:
3295 power_off_task = vapp.power_off()
3296 result = self.client.get_task_monitor().wait_for_success(
3297 task=power_off_task
3298 )
3299
3300 if result.get("status") == "success":
3301 powered_off = True
3302 break
3303 else:
3304 self.logger.info(
3305 "Wait for vApp {} to power off".format(vapp_name)
3306 )
3307 time.sleep(INTERVAL_TIME)
3308
3309 wait_time += INTERVAL_TIME
3310
3311 if not powered_off:
3312 self.logger.debug(
3313 "delete_vminstance(): Failed to power off VM instance {} ".format(
3314 vm_id
3315 )
3316 )
3317 else:
3318 self.logger.info(
3319 "delete_vminstance(): Powered off VM instance {} ".format(
3320 vm_id
3321 )
3322 )
3323
3324 # Undeploy vApp
3325 self.logger.info("Undeploy vApp {}".format(vapp_name))
3326 wait_time = 0
3327 undeployed = False
3328 while wait_time <= MAX_WAIT_TIME:
3329 vapp = VApp(self.client, resource=vapp_resource)
3330 if not vapp:
3331 self.logger.debug(
3332 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3333 vm_id
3334 )
3335 )
3336
3337 return (
3338 -1,
3339 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3340 vm_id
3341 ),
3342 )
3343
3344 undeploy_task = vapp.undeploy()
3345 result = self.client.get_task_monitor().wait_for_success(
3346 task=undeploy_task
3347 )
3348
3349 if result.get("status") == "success":
3350 undeployed = True
3351 break
3352 else:
3353 self.logger.debug(
3354 "Wait for vApp {} to undeploy".format(vapp_name)
3355 )
3356 time.sleep(INTERVAL_TIME)
3357
3358 wait_time += INTERVAL_TIME
3359
3360 if not undeployed:
3361 self.logger.debug(
3362 "delete_vminstance(): Failed to undeploy vApp {} ".format(
3363 vm_id
3364 )
3365 )
3366
3367 # delete vapp
3368 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
3369 if vapp is not None:
3370 wait_time = 0
3371 result = False
3372
3373 while wait_time <= MAX_WAIT_TIME:
3374 vapp = VApp(self.client, resource=vapp_resource)
3375 if not vapp:
3376 self.logger.debug(
3377 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3378 vm_id
3379 )
3380 )
3381
3382 return (
3383 -1,
3384 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(
3385 vm_id
3386 ),
3387 )
3388
3389 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
3390 result = self.client.get_task_monitor().wait_for_success(
3391 task=delete_task
3392 )
3393 if result.get("status") == "success":
3394 break
3395 else:
3396 self.logger.debug(
3397 "Wait for vApp {} to delete".format(vapp_name)
3398 )
3399 time.sleep(INTERVAL_TIME)
3400
3401 wait_time += INTERVAL_TIME
3402
3403 if result is None:
3404 self.logger.debug(
3405 "delete_vminstance(): Failed delete uuid {} ".format(vm_id)
3406 )
3407 else:
3408 self.logger.info(
3409 "Deleted vm instance {} successfully".format(vm_id)
3410 )
3411 config_drive_catalog_name, config_drive_catalog_id = (
3412 "cfg_drv-" + vm_id,
3413 None,
3414 )
3415 catalog_list = self.get_image_list()
3416
3417 try:
3418 config_drive_catalog_id = [
3419 catalog_["id"]
3420 for catalog_ in catalog_list
3421 if catalog_["name"] == config_drive_catalog_name
3422 ][0]
3423 except IndexError:
3424 pass
3425
3426 if config_drive_catalog_id:
3427 self.logger.debug(
3428 "delete_vminstance(): Found a config drive catalog {} matching "
3429 'vapp_name"{}". Deleting it.'.format(
3430 config_drive_catalog_id, vapp_name
3431 )
3432 )
3433 self.delete_image(config_drive_catalog_id)
3434
3435 return vm_id
3436 except Exception:
3437 self.logger.debug(traceback.format_exc())
3438
3439 raise vimconn.VimConnException(
3440 "delete_vminstance(): Failed delete vm instance {}".format(vm_id)
3441 )
3442
3443 def refresh_vms_status(self, vm_list):
3444 """Get the status of the virtual machines and their interfaces/ports
3445 Params: the list of VM identifiers
3446 Returns a dictionary with:
3447 vm_id: #VIM id of this Virtual Machine
3448 status: #Mandatory. Text with one of:
3449 # DELETED (not found at vim)
3450 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
3451 # OTHER (Vim reported other status not understood)
3452 # ERROR (VIM indicates an ERROR status)
3453 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
3454 # CREATING (on building process), ERROR
3455 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
3456 #
3457 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
3458 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3459 interfaces:
3460 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
3461 mac_address: #Text format XX:XX:XX:XX:XX:XX
3462 vim_net_id: #network id where this interface is connected
3463 vim_interface_id: #interface/port VIM id
3464 ip_address: #null, or text with IPv4, IPv6 address
3465 """
3466 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
3467
3468 _, vdc = self.get_vdc_details()
3469 if vdc is None:
3470 raise vimconn.VimConnException(
3471 "Failed to get a reference of VDC for a tenant {}".format(
3472 self.tenant_name
3473 )
3474 )
3475
3476 vms_dict = {}
3477 nsx_edge_list = []
3478 for vmuuid in vm_list:
3479 vapp_name = self.get_namebyvappid(vmuuid)
3480 if vapp_name is not None:
3481 try:
3482 vm_pci_details = self.get_vm_pci_details(vmuuid)
3483 vdc_obj = VDC(self.client, href=vdc.get("href"))
3484 vapp_resource = vdc_obj.get_vapp(vapp_name)
3485 the_vapp = VApp(self.client, resource=vapp_resource)
3486
3487 vm_details = {}
3488 for vm in the_vapp.get_all_vms():
3489 headers = {
3490 "Accept": "application/*+xml;version=" + API_VERSION,
3491 "x-vcloud-authorization": self.client._session.headers[
3492 "x-vcloud-authorization"
3493 ],
3494 }
3495 response = self.perform_request(
3496 req_type="GET", url=vm.get("href"), headers=headers
3497 )
3498
3499 if response.status_code != 200:
3500 self.logger.error(
3501 "refresh_vms_status : REST call {} failed reason : {}"
3502 "status code : {}".format(
3503 vm.get("href"), response.text, response.status_code
3504 )
3505 )
3506 raise vimconn.VimConnException(
3507 "refresh_vms_status : Failed to get VM details"
3508 )
3509
3510 xmlroot = XmlElementTree.fromstring(response.text)
3511 result = response.text.replace("\n", " ")
3512 hdd_match = re.search(
3513 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',
3514 result,
3515 )
3516
3517 if hdd_match:
3518 hdd_mb = hdd_match.group(1)
3519 vm_details["hdd_mb"] = int(hdd_mb) if hdd_mb else None
3520
3521 cpus_match = re.search(
3522 "<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>",
3523 result,
3524 )
3525
3526 if cpus_match:
3527 cpus = cpus_match.group(1)
3528 vm_details["cpus"] = int(cpus) if cpus else None
3529
3530 memory_mb = re.search(
3531 "<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>",
3532 result,
3533 ).group(1)
3534 vm_details["memory_mb"] = int(memory_mb) if memory_mb else None
3535 vm_details["status"] = vcdStatusCode2manoFormat[
3536 int(xmlroot.get("status"))
3537 ]
3538 vm_details["id"] = xmlroot.get("id")
3539 vm_details["name"] = xmlroot.get("name")
3540 vm_info = [vm_details]
3541
3542 if vm_pci_details:
3543 vm_info[0].update(vm_pci_details)
3544
3545 vm_dict = {
3546 "status": vcdStatusCode2manoFormat[
3547 int(vapp_resource.get("status"))
3548 ],
3549 "error_msg": vcdStatusCode2manoFormat[
3550 int(vapp_resource.get("status"))
3551 ],
3552 "vim_info": yaml.safe_dump(vm_info),
3553 "interfaces": [],
3554 }
3555
3556 # get networks
3557 vm_ip = None
3558 vm_mac = None
3559 networks = re.findall(
3560 "<NetworkConnection needsCustomization=.*?</NetworkConnection>",
3561 result,
3562 )
3563
3564 for network in networks:
3565 mac_s = re.search("<MACAddress>(.*?)</MACAddress>", network)
3566 vm_mac = mac_s.group(1) if mac_s else None
3567 ip_s = re.search("<IpAddress>(.*?)</IpAddress>", network)
3568 vm_ip = ip_s.group(1) if ip_s else None
3569
3570 if vm_ip is None:
3571 if not nsx_edge_list:
3572 nsx_edge_list = self.get_edge_details()
3573 if nsx_edge_list is None:
3574 raise vimconn.VimConnException(
3575 "refresh_vms_status:"
3576 "Failed to get edge details from NSX Manager"
3577 )
3578
3579 if vm_mac is not None:
3580 vm_ip = self.get_ipaddr_from_NSXedge(
3581 nsx_edge_list, vm_mac
3582 )
3583
3584 net_s = re.search('network="(.*?)"', network)
3585 network_name = net_s.group(1) if net_s else None
3586 vm_net_id = self.get_network_id_by_name(network_name)
3587 interface = {
3588 "mac_address": vm_mac,
3589 "vim_net_id": vm_net_id,
3590 "vim_interface_id": vm_net_id,
3591 "ip_address": vm_ip,
3592 }
3593 vm_dict["interfaces"].append(interface)
3594
3595 # add a vm to vm dict
3596 vms_dict.setdefault(vmuuid, vm_dict)
3597 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
3598 except Exception as exp:
3599 self.logger.debug("Error in response {}".format(exp))
3600 self.logger.debug(traceback.format_exc())
3601
3602 return vms_dict
3603
3604 def get_edge_details(self):
3605 """Get the NSX edge list from NSX Manager
3606 Returns list of NSX edges
3607 """
3608 edge_list = []
3609 rheaders = {"Content-Type": "application/xml"}
3610 nsx_api_url = "/api/4.0/edges"
3611
3612 self.logger.debug(
3613 "Get edge details from NSX Manager {} {}".format(
3614 self.nsx_manager, nsx_api_url
3615 )
3616 )
3617
3618 try:
3619 resp = requests.get(
3620 self.nsx_manager + nsx_api_url,
3621 auth=(self.nsx_user, self.nsx_password),
3622 verify=False,
3623 headers=rheaders,
3624 )
3625 if resp.status_code == requests.codes.ok:
3626 paged_Edge_List = XmlElementTree.fromstring(resp.text)
3627 for edge_pages in paged_Edge_List:
3628 if edge_pages.tag == "edgePage":
3629 for edge_summary in edge_pages:
3630 if edge_summary.tag == "pagingInfo":
3631 for element in edge_summary:
3632 if (
3633 element.tag == "totalCount"
3634 and element.text == "0"
3635 ):
3636 raise vimconn.VimConnException(
3637 "get_edge_details: No NSX edges details found: {}".format(
3638 self.nsx_manager
3639 )
3640 )
3641
3642 if edge_summary.tag == "edgeSummary":
3643 for element in edge_summary:
3644 if element.tag == "id":
3645 edge_list.append(element.text)
3646 else:
3647 raise vimconn.VimConnException(
3648 "get_edge_details: No NSX edge details found: {}".format(
3649 self.nsx_manager
3650 )
3651 )
3652
3653 if not edge_list:
3654 raise vimconn.VimConnException(
3655 "get_edge_details: "
3656 "No NSX edge details found: {}".format(self.nsx_manager)
3657 )
3658 else:
3659 self.logger.debug(
3660 "get_edge_details: Found NSX edges {}".format(edge_list)
3661 )
3662
3663 return edge_list
3664 else:
3665 self.logger.debug(
3666 "get_edge_details: "
3667 "Failed to get NSX edge details from NSX Manager: {}".format(
3668 resp.content
3669 )
3670 )
3671
3672 return None
3673
3674 except Exception as exp:
3675 self.logger.debug(
3676 "get_edge_details: "
3677 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3678 )
3679 raise vimconn.VimConnException(
3680 "get_edge_details: "
3681 "Failed to get NSX edge details from NSX Manager: {}".format(exp)
3682 )
3683
3684 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
3685 """Get IP address details from NSX edges, using the MAC address
3686 PARAMS: nsx_edges : List of NSX edges
3687 mac_address : Find IP address corresponding to this MAC address
3688 Returns: IP address corrresponding to the provided MAC address
3689 """
3690 ip_addr = None
3691 rheaders = {"Content-Type": "application/xml"}
3692
3693 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
3694
3695 try:
3696 for edge in nsx_edges:
3697 nsx_api_url = "/api/4.0/edges/" + edge + "/dhcp/leaseInfo"
3698
3699 resp = requests.get(
3700 self.nsx_manager + nsx_api_url,
3701 auth=(self.nsx_user, self.nsx_password),
3702 verify=False,
3703 headers=rheaders,
3704 )
3705
3706 if resp.status_code == requests.codes.ok:
3707 dhcp_leases = XmlElementTree.fromstring(resp.text)
3708 for child in dhcp_leases:
3709 if child.tag == "dhcpLeaseInfo":
3710 dhcpLeaseInfo = child
3711 for leaseInfo in dhcpLeaseInfo:
3712 for elem in leaseInfo:
3713 if (elem.tag) == "macAddress":
3714 edge_mac_addr = elem.text
3715
3716 if (elem.tag) == "ipAddress":
3717 ip_addr = elem.text
3718
3719 if edge_mac_addr is not None:
3720 if edge_mac_addr == mac_address:
3721 self.logger.debug(
3722 "Found ip addr {} for mac {} at NSX edge {}".format(
3723 ip_addr, mac_address, edge
3724 )
3725 )
3726
3727 return ip_addr
3728 else:
3729 self.logger.debug(
3730 "get_ipaddr_from_NSXedge: "
3731 "Error occurred while getting DHCP lease info from NSX Manager: {}".format(
3732 resp.content
3733 )
3734 )
3735
3736 self.logger.debug(
3737 "get_ipaddr_from_NSXedge: No IP addr found in any NSX edge"
3738 )
3739
3740 return None
3741
3742 except XmlElementTree.ParseError as Err:
3743 self.logger.debug(
3744 "ParseError in response from NSX Manager {}".format(Err.message),
3745 exc_info=True,
3746 )
3747
3748 def action_vminstance(self, vm_id=None, action_dict=None, created_items={}):
3749 """Send and action over a VM instance from VIM
3750 Returns the vm_id if the action was successfully sent to the VIM"""
3751
3752 self.logger.debug(
3753 "Received action for vm {} and action dict {}".format(vm_id, action_dict)
3754 )
3755
3756 if vm_id is None or action_dict is None:
3757 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
3758
3759 _, vdc = self.get_vdc_details()
3760 if vdc is None:
3761 raise vimconn.VimConnException(
3762 "Failed to get a reference of VDC for a tenant {}".format(
3763 self.tenant_name
3764 )
3765 )
3766
3767 vapp_name = self.get_namebyvappid(vm_id)
3768 if vapp_name is None:
3769 self.logger.debug(
3770 "action_vminstance(): Failed to get vm by given {} vm uuid".format(
3771 vm_id
3772 )
3773 )
3774
3775 raise vimconn.VimConnException(
3776 "Failed to get vm by given {} vm uuid".format(vm_id)
3777 )
3778 else:
3779 self.logger.info(
3780 "Action_vminstance vApp {} and UUID {}".format(vapp_name, vm_id)
3781 )
3782
3783 try:
3784 vdc_obj = VDC(self.client, href=vdc.get("href"))
3785 vapp_resource = vdc_obj.get_vapp(vapp_name)
3786 vapp = VApp(self.client, resource=vapp_resource)
3787
3788 if "start" in action_dict:
3789 self.logger.info(
3790 "action_vminstance: Power on vApp: {}".format(vapp_name)
3791 )
3792 poweron_task = self.power_on_vapp(vm_id, vapp_name)
3793 result = self.client.get_task_monitor().wait_for_success(
3794 task=poweron_task
3795 )
3796 self.instance_actions_result("start", result, vapp_name)
3797 elif "rebuild" in action_dict:
3798 self.logger.info(
3799 "action_vminstance: Rebuild vApp: {}".format(vapp_name)
3800 )
3801 rebuild_task = vapp.deploy(power_on=True)
3802 result = self.client.get_task_monitor().wait_for_success(
3803 task=rebuild_task
3804 )
3805 self.instance_actions_result("rebuild", result, vapp_name)
3806 elif "pause" in action_dict:
3807 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3808 pause_task = vapp.undeploy(action="suspend")
3809 result = self.client.get_task_monitor().wait_for_success(
3810 task=pause_task
3811 )
3812 self.instance_actions_result("pause", result, vapp_name)
3813 elif "resume" in action_dict:
3814 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3815 poweron_task = self.power_on_vapp(vm_id, vapp_name)
3816 result = self.client.get_task_monitor().wait_for_success(
3817 task=poweron_task
3818 )
3819 self.instance_actions_result("resume", result, vapp_name)
3820 elif "shutoff" in action_dict or "shutdown" in action_dict:
3821 action_name, _ = list(action_dict.items())[0]
3822 self.logger.info(
3823 "action_vminstance: {} vApp: {}".format(action_name, vapp_name)
3824 )
3825 shutdown_task = vapp.shutdown()
3826 result = self.client.get_task_monitor().wait_for_success(
3827 task=shutdown_task
3828 )
3829 if action_name == "shutdown":
3830 self.instance_actions_result("shutdown", result, vapp_name)
3831 else:
3832 self.instance_actions_result("shutoff", result, vapp_name)
3833 elif "forceOff" in action_dict:
3834 result = vapp.undeploy(action="powerOff")
3835 self.instance_actions_result("forceOff", result, vapp_name)
3836 elif "reboot" in action_dict:
3837 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3838 reboot_task = vapp.reboot()
3839 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3840 else:
3841 raise vimconn.VimConnException(
3842 "action_vminstance: Invalid action {} or action is None.".format(
3843 action_dict
3844 )
3845 )
3846
3847 return vm_id
3848 except Exception as exp:
3849 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3850
3851 raise vimconn.VimConnException(
3852 "action_vminstance: Failed with Exception {}".format(exp)
3853 )
3854
3855 def instance_actions_result(self, action, result, vapp_name):
3856 if result.get("status") == "success":
3857 self.logger.info(
3858 "action_vminstance: Sucessfully {} the vApp: {}".format(
3859 action, vapp_name
3860 )
3861 )
3862 else:
3863 self.logger.error(
3864 "action_vminstance: Failed to {} vApp: {}".format(action, vapp_name)
3865 )
3866
3867 def get_vminstance_console(self, vm_id, console_type="novnc"):
3868 """
3869 Get a console for the virtual machine
3870 Params:
3871 vm_id: uuid of the VM
3872 console_type, can be:
3873 "novnc" (by default), "xvpvnc" for VNC types,
3874 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3875 Returns dict with the console parameters:
3876 protocol: ssh, ftp, http, https, ...
3877 server: usually ip address
3878 port: the http, ssh, ... port
3879 suffix: extra text, e.g. the http path and query string
3880 """
3881 console_dict = {}
3882
3883 if console_type is None or console_type == "novnc":
3884 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(
3885 self.url, vm_id
3886 )
3887 headers = {
3888 "Accept": "application/*+xml;version=" + API_VERSION,
3889 "x-vcloud-authorization": self.client._session.headers[
3890 "x-vcloud-authorization"
3891 ],
3892 }
3893 response = self.perform_request(
3894 req_type="POST", url=url_rest_call, headers=headers
3895 )
3896
3897 if response.status_code == 403:
3898 response = self.retry_rest("GET", url_rest_call)
3899
3900 if response.status_code != 200:
3901 self.logger.error(
3902 "REST call {} failed reason : {}"
3903 "status code : {}".format(
3904 url_rest_call, response.text, response.status_code
3905 )
3906 )
3907 raise vimconn.VimConnException(
3908 "get_vminstance_console : Failed to get " "VM Mks ticket details"
3909 )
3910
3911 s = re.search("<Host>(.*?)</Host>", response.text)
3912 console_dict["server"] = s.group(1) if s else None
3913 s1 = re.search("<Port>(\d+)</Port>", response.text)
3914 console_dict["port"] = s1.group(1) if s1 else None
3915 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(
3916 self.url, vm_id
3917 )
3918 headers = {
3919 "Accept": "application/*+xml;version=" + API_VERSION,
3920 "x-vcloud-authorization": self.client._session.headers[
3921 "x-vcloud-authorization"
3922 ],
3923 }
3924 response = self.perform_request(
3925 req_type="POST", url=url_rest_call, headers=headers
3926 )
3927
3928 if response.status_code == 403:
3929 response = self.retry_rest("GET", url_rest_call)
3930
3931 if response.status_code != 200:
3932 self.logger.error(
3933 "REST call {} failed reason : {}"
3934 "status code : {}".format(
3935 url_rest_call, response.text, response.status_code
3936 )
3937 )
3938 raise vimconn.VimConnException(
3939 "get_vminstance_console : Failed to get " "VM console details"
3940 )
3941
3942 s = re.search(">.*?/(vm-\d+.*)</", response.text)
3943 console_dict["suffix"] = s.group(1) if s else None
3944 console_dict["protocol"] = "https"
3945
3946 return console_dict
3947
3948 def get_hosts_info(self):
3949 """Get the information of deployed hosts
3950 Returns the hosts content"""
3951 raise vimconn.VimConnNotImplemented("Should have implemented this")
3952
3953 def get_hosts(self, vim_tenant):
3954 """Get the hosts and deployed instances
3955 Returns the hosts content"""
3956 raise vimconn.VimConnNotImplemented("Should have implemented this")
3957
3958 def get_network_name_by_id(self, network_uuid=None):
3959 """Method gets vcloud director network named based on supplied uuid.
3960
3961 Args:
3962 network_uuid: network_id
3963
3964 Returns:
3965 The return network name.
3966 """
3967
3968 if not network_uuid:
3969 return None
3970
3971 try:
3972 org_dict = self.get_org(self.org_uuid)
3973 if "networks" in org_dict:
3974 org_network_dict = org_dict["networks"]
3975
3976 for net_uuid in org_network_dict:
3977 if net_uuid == network_uuid:
3978 return org_network_dict[net_uuid]
3979 except Exception:
3980 self.logger.debug("Exception in get_network_name_by_id")
3981 self.logger.debug(traceback.format_exc())
3982
3983 return None
3984
3985 def get_network_id_by_name(self, network_name=None):
3986 """Method gets vcloud director network uuid based on supplied name.
3987
3988 Args:
3989 network_name: network_name
3990 Returns:
3991 The return network uuid.
3992 network_uuid: network_id
3993 """
3994 if not network_name:
3995 self.logger.debug("get_network_id_by_name() : Network name is empty")
3996 return None
3997
3998 try:
3999 org_dict = self.get_org(self.org_uuid)
4000 if org_dict and "networks" in org_dict:
4001 org_network_dict = org_dict["networks"]
4002
4003 for net_uuid, net_name in org_network_dict.items():
4004 if net_name == network_name:
4005 return net_uuid
4006
4007 except KeyError as exp:
4008 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
4009
4010 return None
4011
4012 def get_physical_network_by_name(self, physical_network_name):
4013 """
4014 Methos returns uuid of physical network which passed
4015 Args:
4016 physical_network_name: physical network name
4017 Returns:
4018 UUID of physical_network_name
4019 """
4020 try:
4021 client_as_admin = self.connect_as_admin()
4022
4023 if not client_as_admin:
4024 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4025
4026 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4027 vm_list_rest_call = "".join(url_list)
4028
4029 if client_as_admin._session:
4030 headers = {
4031 "Accept": "application/*+xml;version=" + API_VERSION,
4032 "x-vcloud-authorization": client_as_admin._session.headers[
4033 "x-vcloud-authorization"
4034 ],
4035 }
4036 response = self.perform_request(
4037 req_type="GET", url=vm_list_rest_call, headers=headers
4038 )
4039 provider_network = None
4040 available_network = None
4041 # add_vdc_rest_url = None
4042
4043 if response.status_code != requests.codes.ok:
4044 self.logger.debug(
4045 "REST API call {} failed. Return status code {}".format(
4046 vm_list_rest_call, response.status_code
4047 )
4048 )
4049 return None
4050 else:
4051 try:
4052 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4053 for child in vm_list_xmlroot:
4054 if child.tag.split("}")[1] == "ProviderVdcReference":
4055 provider_network = child.attrib.get("href")
4056 # application/vnd.vmware.admin.providervdc+xml
4057
4058 if child.tag.split("}")[1] == "Link":
4059 if (
4060 child.attrib.get("type")
4061 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4062 and child.attrib.get("rel") == "add"
4063 ):
4064 child.attrib.get("href")
4065 except Exception:
4066 self.logger.debug(
4067 "Failed parse respond for rest api call {}".format(
4068 vm_list_rest_call
4069 )
4070 )
4071 self.logger.debug("Respond body {}".format(response.text))
4072
4073 return None
4074
4075 # find pvdc provided available network
4076 response = self.perform_request(
4077 req_type="GET", url=provider_network, headers=headers
4078 )
4079
4080 if response.status_code != requests.codes.ok:
4081 self.logger.debug(
4082 "REST API call {} failed. Return status code {}".format(
4083 vm_list_rest_call, response.status_code
4084 )
4085 )
4086
4087 return None
4088
4089 try:
4090 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4091 for child in vm_list_xmlroot.iter():
4092 if child.tag.split("}")[1] == "AvailableNetworks":
4093 for networks in child.iter():
4094 if (
4095 networks.attrib.get("href") is not None
4096 and networks.attrib.get("name") is not None
4097 ):
4098 if (
4099 networks.attrib.get("name")
4100 == physical_network_name
4101 ):
4102 network_url = networks.attrib.get("href")
4103 available_network = network_url[
4104 network_url.rindex("/") + 1 :
4105 ]
4106 break
4107 except Exception:
4108 return None
4109
4110 return available_network
4111 except Exception as e:
4112 self.logger.error("Error while getting physical network: {}".format(e))
4113
4114 def list_org_action(self):
4115 """
4116 Method leverages vCloud director and query for available organization for particular user
4117
4118 Args:
4119 vca - is active VCA connection.
4120 vdc_name - is a vdc name that will be used to query vms action
4121
4122 Returns:
4123 The return XML respond
4124 """
4125 url_list = [self.url, "/api/org"]
4126 vm_list_rest_call = "".join(url_list)
4127
4128 if self.client._session:
4129 headers = {
4130 "Accept": "application/*+xml;version=" + API_VERSION,
4131 "x-vcloud-authorization": self.client._session.headers[
4132 "x-vcloud-authorization"
4133 ],
4134 }
4135
4136 response = self.perform_request(
4137 req_type="GET", url=vm_list_rest_call, headers=headers
4138 )
4139
4140 if response.status_code == 403:
4141 response = self.retry_rest("GET", vm_list_rest_call)
4142
4143 if response.status_code == requests.codes.ok:
4144 return response.text
4145
4146 return None
4147
4148 def get_org_action(self, org_uuid=None):
4149 """
4150 Method leverages vCloud director and retrieve available object for organization.
4151
4152 Args:
4153 org_uuid - vCD organization uuid
4154 self.client - is active connection.
4155
4156 Returns:
4157 The return XML respond
4158 """
4159
4160 if org_uuid is None:
4161 return None
4162
4163 url_list = [self.url, "/api/org/", org_uuid]
4164 vm_list_rest_call = "".join(url_list)
4165
4166 if self.client._session:
4167 headers = {
4168 "Accept": "application/*+xml;version=" + API_VERSION,
4169 "x-vcloud-authorization": self.client._session.headers[
4170 "x-vcloud-authorization"
4171 ],
4172 }
4173
4174 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
4175 response = self.perform_request(
4176 req_type="GET", url=vm_list_rest_call, headers=headers
4177 )
4178
4179 if response.status_code == 403:
4180 response = self.retry_rest("GET", vm_list_rest_call)
4181
4182 if response.status_code == requests.codes.ok:
4183 return response.text
4184
4185 return None
4186
4187 def get_org(self, org_uuid=None):
4188 """
4189 Method retrieves available organization in vCloud Director
4190
4191 Args:
4192 org_uuid - is a organization uuid.
4193
4194 Returns:
4195 The return dictionary with following key
4196 "network" - for network list under the org
4197 "catalogs" - for network list under the org
4198 "vdcs" - for vdc list under org
4199 """
4200
4201 org_dict = {}
4202
4203 if org_uuid is None:
4204 return org_dict
4205
4206 content = self.get_org_action(org_uuid=org_uuid)
4207 try:
4208 vdc_list = {}
4209 network_list = {}
4210 catalog_list = {}
4211 vm_list_xmlroot = XmlElementTree.fromstring(content)
4212 for child in vm_list_xmlroot:
4213 if child.attrib["type"] == "application/vnd.vmware.vcloud.vdc+xml":
4214 vdc_list[child.attrib["href"].split("/")[-1:][0]] = child.attrib[
4215 "name"
4216 ]
4217 org_dict["vdcs"] = vdc_list
4218
4219 if (
4220 child.attrib["type"]
4221 == "application/vnd.vmware.vcloud.orgNetwork+xml"
4222 ):
4223 network_list[
4224 child.attrib["href"].split("/")[-1:][0]
4225 ] = child.attrib["name"]
4226 org_dict["networks"] = network_list
4227
4228 if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
4229 catalog_list[
4230 child.attrib["href"].split("/")[-1:][0]
4231 ] = child.attrib["name"]
4232 org_dict["catalogs"] = catalog_list
4233 except Exception:
4234 pass
4235
4236 return org_dict
4237
4238 def get_org_list(self):
4239 """
4240 Method retrieves available organization in vCloud Director
4241
4242 Args:
4243 vca - is active VCA connection.
4244
4245 Returns:
4246 The return dictionary and key for each entry VDC UUID
4247 """
4248 org_dict = {}
4249
4250 content = self.list_org_action()
4251 try:
4252 vm_list_xmlroot = XmlElementTree.fromstring(content)
4253
4254 for vm_xml in vm_list_xmlroot:
4255 if vm_xml.tag.split("}")[1] == "Org":
4256 org_uuid = vm_xml.attrib["href"].split("/")[-1:]
4257 org_dict[org_uuid[0]] = vm_xml.attrib["name"]
4258 except Exception:
4259 pass
4260
4261 return org_dict
4262
4263 def vms_view_action(self, vdc_name=None):
4264 """Method leverages vCloud director vms query call
4265
4266 Args:
4267 vca - is active VCA connection.
4268 vdc_name - is a vdc name that will be used to query vms action
4269
4270 Returns:
4271 The return XML respond
4272 """
4273 vca = self.connect()
4274 if vdc_name is None:
4275 return None
4276
4277 url_list = [vca.host, "/api/vms/query"]
4278 vm_list_rest_call = "".join(url_list)
4279
4280 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
4281 refs = [
4282 ref
4283 for ref in vca.vcloud_session.organization.Link
4284 if ref.name == vdc_name
4285 and ref.type_ == "application/vnd.vmware.vcloud.vdc+xml"
4286 ]
4287
4288 if len(refs) == 1:
4289 response = self.perform_request(
4290 req_type="GET",
4291 url=vm_list_rest_call,
4292 headers=vca.vcloud_session.get_vcloud_headers(),
4293 verify=vca.verify,
4294 logger=vca.logger,
4295 )
4296
4297 if response.status_code == requests.codes.ok:
4298 return response.text
4299
4300 return None
4301
4302 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
4303 """
4304 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
4305 contains a list of all VM's deployed for queried VDC.
4306 The key for a dictionary is VM UUID
4307
4308
4309 Args:
4310 vca - is active VCA connection.
4311 vdc_name - is a vdc name that will be used to query vms action
4312
4313 Returns:
4314 The return dictionary and key for each entry vapp UUID
4315 """
4316 vm_dict = {}
4317 vca = self.connect()
4318
4319 if not vca:
4320 raise vimconn.VimConnConnectionException("self.connect() is failed")
4321
4322 if vdc_name is None:
4323 return vm_dict
4324
4325 content = self.vms_view_action(vdc_name=vdc_name)
4326 try:
4327 vm_list_xmlroot = XmlElementTree.fromstring(content)
4328 for vm_xml in vm_list_xmlroot:
4329 if (
4330 vm_xml.tag.split("}")[1] == "VMRecord"
4331 and vm_xml.attrib["isVAppTemplate"] == "false"
4332 ):
4333 # lookup done by UUID
4334 if isuuid:
4335 if vapp_name in vm_xml.attrib["container"]:
4336 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4337 if "vm-" in rawuuid[0]:
4338 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4339 break
4340 # lookup done by Name
4341 else:
4342 if vapp_name in vm_xml.attrib["name"]:
4343 rawuuid = vm_xml.attrib["href"].split("/")[-1:]
4344 if "vm-" in rawuuid[0]:
4345 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
4346 break
4347 except Exception:
4348 pass
4349
4350 return vm_dict
4351
4352 def get_network_action(self, network_uuid=None):
4353 """
4354 Method leverages vCloud director and query network based on network uuid
4355
4356 Args:
4357 vca - is active VCA connection.
4358 network_uuid - is a network uuid
4359
4360 Returns:
4361 The return XML respond
4362 """
4363 if network_uuid is None:
4364 return None
4365
4366 url_list = [self.url, "/api/network/", network_uuid]
4367 vm_list_rest_call = "".join(url_list)
4368
4369 if self.client._session:
4370 headers = {
4371 "Accept": "application/*+xml;version=" + API_VERSION,
4372 "x-vcloud-authorization": self.client._session.headers[
4373 "x-vcloud-authorization"
4374 ],
4375 }
4376 response = self.perform_request(
4377 req_type="GET", url=vm_list_rest_call, headers=headers
4378 )
4379
4380 # Retry login if session expired & retry sending request
4381 if response.status_code == 403:
4382 response = self.retry_rest("GET", vm_list_rest_call)
4383
4384 if response.status_code == requests.codes.ok:
4385 return response.text
4386
4387 return None
4388
4389 def get_vcd_network(self, network_uuid=None):
4390 """
4391 Method retrieves available network from vCloud Director
4392
4393 Args:
4394 network_uuid - is VCD network UUID
4395
4396 Each element serialized as key : value pair
4397
4398 Following keys available for access. network_configuration['Gateway'}
4399 <Configuration>
4400 <IpScopes>
4401 <IpScope>
4402 <IsInherited>true</IsInherited>
4403 <Gateway>172.16.252.100</Gateway>
4404 <Netmask>255.255.255.0</Netmask>
4405 <Dns1>172.16.254.201</Dns1>
4406 <Dns2>172.16.254.202</Dns2>
4407 <DnsSuffix>vmwarelab.edu</DnsSuffix>
4408 <IsEnabled>true</IsEnabled>
4409 <IpRanges>
4410 <IpRange>
4411 <StartAddress>172.16.252.1</StartAddress>
4412 <EndAddress>172.16.252.99</EndAddress>
4413 </IpRange>
4414 </IpRanges>
4415 </IpScope>
4416 </IpScopes>
4417 <FenceMode>bridged</FenceMode>
4418
4419 Returns:
4420 The return dictionary and key for each entry vapp UUID
4421 """
4422 network_configuration = {}
4423
4424 if network_uuid is None:
4425 return network_uuid
4426
4427 try:
4428 content = self.get_network_action(network_uuid=network_uuid)
4429 if content is not None:
4430 vm_list_xmlroot = XmlElementTree.fromstring(content)
4431 network_configuration["status"] = vm_list_xmlroot.get("status")
4432 network_configuration["name"] = vm_list_xmlroot.get("name")
4433 network_configuration["uuid"] = vm_list_xmlroot.get("id").split(":")[3]
4434
4435 for child in vm_list_xmlroot:
4436 if child.tag.split("}")[1] == "IsShared":
4437 network_configuration["isShared"] = child.text.strip()
4438
4439 if child.tag.split("}")[1] == "Configuration":
4440 for configuration in child.iter():
4441 tagKey = configuration.tag.split("}")[1].strip()
4442 if tagKey != "":
4443 network_configuration[
4444 tagKey
4445 ] = configuration.text.strip()
4446 except Exception as exp:
4447 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
4448
4449 raise vimconn.VimConnException(
4450 "get_vcd_network: Failed with Exception {}".format(exp)
4451 )
4452
4453 return network_configuration
4454
4455 def delete_network_action(self, network_uuid=None):
4456 """
4457 Method delete given network from vCloud director
4458
4459 Args:
4460 network_uuid - is a network uuid that client wish to delete
4461
4462 Returns:
4463 The return None or XML respond or false
4464 """
4465 client = self.connect_as_admin()
4466
4467 if not client:
4468 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
4469
4470 if network_uuid is None:
4471 return False
4472
4473 url_list = [self.url, "/api/admin/network/", network_uuid]
4474 vm_list_rest_call = "".join(url_list)
4475
4476 if client._session:
4477 headers = {
4478 "Accept": "application/*+xml;version=" + API_VERSION,
4479 "x-vcloud-authorization": client._session.headers[
4480 "x-vcloud-authorization"
4481 ],
4482 }
4483 response = self.perform_request(
4484 req_type="DELETE", url=vm_list_rest_call, headers=headers
4485 )
4486
4487 if response.status_code == 202:
4488 return True
4489
4490 return False
4491
4492 def create_network(
4493 self,
4494 network_name=None,
4495 net_type="bridge",
4496 parent_network_uuid=None,
4497 ip_profile=None,
4498 isshared="true",
4499 ):
4500 """
4501 Method create network in vCloud director
4502
4503 Args:
4504 network_name - is network name to be created.
4505 net_type - can be 'bridge','data','ptp','mgmt'.
4506 ip_profile is a dict containing the IP parameters of the network
4507 isshared - is a boolean
4508 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4509 It optional attribute. by default if no parent network indicate the first available will be used.
4510
4511 Returns:
4512 The return network uuid or return None
4513 """
4514 new_network_name = [network_name, "-", str(uuid.uuid4())]
4515 content = self.create_network_rest(
4516 network_name="".join(new_network_name),
4517 ip_profile=ip_profile,
4518 net_type=net_type,
4519 parent_network_uuid=parent_network_uuid,
4520 isshared=isshared,
4521 )
4522
4523 if content is None:
4524 self.logger.debug("Failed create network {}.".format(network_name))
4525
4526 return None
4527
4528 try:
4529 vm_list_xmlroot = XmlElementTree.fromstring(content)
4530 vcd_uuid = vm_list_xmlroot.get("id").split(":")
4531 if len(vcd_uuid) == 4:
4532 self.logger.info(
4533 "Created new network name: {} uuid: {}".format(
4534 network_name, vcd_uuid[3]
4535 )
4536 )
4537
4538 return vcd_uuid[3]
4539 except Exception:
4540 self.logger.debug("Failed create network {}".format(network_name))
4541
4542 return None
4543
4544 def create_network_rest(
4545 self,
4546 network_name=None,
4547 net_type="bridge",
4548 parent_network_uuid=None,
4549 ip_profile=None,
4550 isshared="true",
4551 ):
4552 """
4553 Method create network in vCloud director
4554
4555 Args:
4556 network_name - is network name to be created.
4557 net_type - can be 'bridge','data','ptp','mgmt'.
4558 ip_profile is a dict containing the IP parameters of the network
4559 isshared - is a boolean
4560 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4561 It optional attribute. by default if no parent network indicate the first available will be used.
4562
4563 Returns:
4564 The return network uuid or return None
4565 """
4566 client_as_admin = self.connect_as_admin()
4567
4568 if not client_as_admin:
4569 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
4570
4571 if network_name is None:
4572 return None
4573
4574 url_list = [self.url, "/api/admin/vdc/", self.tenant_id]
4575 vm_list_rest_call = "".join(url_list)
4576
4577 if client_as_admin._session:
4578 headers = {
4579 "Accept": "application/*+xml;version=" + API_VERSION,
4580 "x-vcloud-authorization": client_as_admin._session.headers[
4581 "x-vcloud-authorization"
4582 ],
4583 }
4584 response = self.perform_request(
4585 req_type="GET", url=vm_list_rest_call, headers=headers
4586 )
4587 provider_network = None
4588 available_networks = None
4589 add_vdc_rest_url = None
4590
4591 if response.status_code != requests.codes.ok:
4592 self.logger.debug(
4593 "REST API call {} failed. Return status code {}".format(
4594 vm_list_rest_call, response.status_code
4595 )
4596 )
4597
4598 return None
4599 else:
4600 try:
4601 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4602 for child in vm_list_xmlroot:
4603 if child.tag.split("}")[1] == "ProviderVdcReference":
4604 provider_network = child.attrib.get("href")
4605 # application/vnd.vmware.admin.providervdc+xml
4606
4607 if child.tag.split("}")[1] == "Link":
4608 if (
4609 child.attrib.get("type")
4610 == "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4611 and child.attrib.get("rel") == "add"
4612 ):
4613 add_vdc_rest_url = child.attrib.get("href")
4614 except Exception:
4615 self.logger.debug(
4616 "Failed parse respond for rest api call {}".format(
4617 vm_list_rest_call
4618 )
4619 )
4620 self.logger.debug("Respond body {}".format(response.text))
4621
4622 return None
4623
4624 # find pvdc provided available network
4625 response = self.perform_request(
4626 req_type="GET", url=provider_network, headers=headers
4627 )
4628
4629 if response.status_code != requests.codes.ok:
4630 self.logger.debug(
4631 "REST API call {} failed. Return status code {}".format(
4632 vm_list_rest_call, response.status_code
4633 )
4634 )
4635
4636 return None
4637
4638 if parent_network_uuid is None:
4639 try:
4640 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4641 for child in vm_list_xmlroot.iter():
4642 if child.tag.split("}")[1] == "AvailableNetworks":
4643 for networks in child.iter():
4644 # application/vnd.vmware.admin.network+xml
4645 if networks.attrib.get("href") is not None:
4646 available_networks = networks.attrib.get("href")
4647 break
4648 except Exception:
4649 return None
4650
4651 try:
4652 # Configure IP profile of the network
4653 ip_profile = (
4654 ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
4655 )
4656
4657 if (
4658 "subnet_address" not in ip_profile
4659 or ip_profile["subnet_address"] is None
4660 ):
4661 subnet_rand = random.randint(0, 255)
4662 ip_base = "192.168.{}.".format(subnet_rand)
4663 ip_profile["subnet_address"] = ip_base + "0/24"
4664 else:
4665 ip_base = ip_profile["subnet_address"].rsplit(".", 1)[0] + "."
4666
4667 if (
4668 "gateway_address" not in ip_profile
4669 or ip_profile["gateway_address"] is None
4670 ):
4671 ip_profile["gateway_address"] = ip_base + "1"
4672
4673 if "dhcp_count" not in ip_profile or ip_profile["dhcp_count"] is None:
4674 ip_profile["dhcp_count"] = DEFAULT_IP_PROFILE["dhcp_count"]
4675
4676 if (
4677 "dhcp_enabled" not in ip_profile
4678 or ip_profile["dhcp_enabled"] is None
4679 ):
4680 ip_profile["dhcp_enabled"] = DEFAULT_IP_PROFILE["dhcp_enabled"]
4681
4682 if (
4683 "dhcp_start_address" not in ip_profile
4684 or ip_profile["dhcp_start_address"] is None
4685 ):
4686 ip_profile["dhcp_start_address"] = ip_base + "3"
4687
4688 if "ip_version" not in ip_profile or ip_profile["ip_version"] is None:
4689 ip_profile["ip_version"] = DEFAULT_IP_PROFILE["ip_version"]
4690
4691 if "dns_address" not in ip_profile or ip_profile["dns_address"] is None:
4692 ip_profile["dns_address"] = ip_base + "2"
4693
4694 gateway_address = ip_profile["gateway_address"]
4695 dhcp_count = int(ip_profile["dhcp_count"])
4696 subnet_address = self.convert_cidr_to_netmask(
4697 ip_profile["subnet_address"]
4698 )
4699
4700 if ip_profile["dhcp_enabled"] is True:
4701 dhcp_enabled = "true"
4702 else:
4703 dhcp_enabled = "false"
4704
4705 dhcp_start_address = ip_profile["dhcp_start_address"]
4706
4707 # derive dhcp_end_address from dhcp_start_address & dhcp_count
4708 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
4709 end_ip_int += dhcp_count - 1
4710 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
4711
4712 # ip_version = ip_profile['ip_version']
4713 dns_address = ip_profile["dns_address"]
4714 except KeyError as exp:
4715 self.logger.debug("Create Network REST: Key error {}".format(exp))
4716
4717 raise vimconn.VimConnException(
4718 "Create Network REST: Key error{}".format(exp)
4719 )
4720
4721 # either use client provided UUID or search for a first available
4722 # if both are not defined we return none
4723 if parent_network_uuid is not None:
4724 provider_network = None
4725 available_networks = None
4726 add_vdc_rest_url = None
4727 url_list = [self.url, "/api/admin/vdc/", self.tenant_id, "/networks"]
4728 add_vdc_rest_url = "".join(url_list)
4729 url_list = [self.url, "/api/admin/network/", parent_network_uuid]
4730 available_networks = "".join(url_list)
4731
4732 # Creating all networks as Direct Org VDC type networks.
4733 # Unused in case of Underlay (data/ptp) network interface.
4734 fence_mode = "isolated"
4735 is_inherited = "false"
4736 dns_list = dns_address.split(";")
4737 dns1 = dns_list[0]
4738 dns2_text = ""
4739
4740 if len(dns_list) >= 2:
4741 dns2_text = "\n <Dns2>{}</Dns2>\n".format(
4742 dns_list[1]
4743 )
4744
4745 if net_type == "isolated":
4746 fence_mode = "isolated"
4747 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4748 <Description>Openmano created</Description>
4749 <Configuration>
4750 <IpScopes>
4751 <IpScope>
4752 <IsInherited>{1:s}</IsInherited>
4753 <Gateway>{2:s}</Gateway>
4754 <Netmask>{3:s}</Netmask>
4755 <Dns1>{4:s}</Dns1>{5:s}
4756 <IsEnabled>{6:s}</IsEnabled>
4757 <IpRanges>
4758 <IpRange>
4759 <StartAddress>{7:s}</StartAddress>
4760 <EndAddress>{8:s}</EndAddress>
4761 </IpRange>
4762 </IpRanges>
4763 </IpScope>
4764 </IpScopes>
4765 <FenceMode>{9:s}</FenceMode>
4766 </Configuration>
4767 <IsShared>{10:s}</IsShared>
4768 </OrgVdcNetwork> """.format(
4769 escape(network_name),
4770 is_inherited,
4771 gateway_address,
4772 subnet_address,
4773 dns1,
4774 dns2_text,
4775 dhcp_enabled,
4776 dhcp_start_address,
4777 dhcp_end_address,
4778 fence_mode,
4779 isshared,
4780 )
4781 else:
4782 fence_mode = "bridged"
4783 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4784 <Description>Openmano created</Description>
4785 <Configuration>
4786 <IpScopes>
4787 <IpScope>
4788 <IsInherited>{1:s}</IsInherited>
4789 <Gateway>{2:s}</Gateway>
4790 <Netmask>{3:s}</Netmask>
4791 <Dns1>{4:s}</Dns1>{5:s}
4792 <IsEnabled>{6:s}</IsEnabled>
4793 <IpRanges>
4794 <IpRange>
4795 <StartAddress>{7:s}</StartAddress>
4796 <EndAddress>{8:s}</EndAddress>
4797 </IpRange>
4798 </IpRanges>
4799 </IpScope>
4800 </IpScopes>
4801 <ParentNetwork href="{9:s}"/>
4802 <FenceMode>{10:s}</FenceMode>
4803 </Configuration>
4804 <IsShared>{11:s}</IsShared>
4805 </OrgVdcNetwork> """.format(
4806 escape(network_name),
4807 is_inherited,
4808 gateway_address,
4809 subnet_address,
4810 dns1,
4811 dns2_text,
4812 dhcp_enabled,
4813 dhcp_start_address,
4814 dhcp_end_address,
4815 available_networks,
4816 fence_mode,
4817 isshared,
4818 )
4819
4820 headers["Content-Type"] = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
4821 try:
4822 response = self.perform_request(
4823 req_type="POST", url=add_vdc_rest_url, headers=headers, data=data
4824 )
4825
4826 if response.status_code != 201:
4827 self.logger.debug(
4828 "Create Network POST REST API call failed. "
4829 "Return status code {}, response.text: {}".format(
4830 response.status_code, response.text
4831 )
4832 )
4833 else:
4834 network_task = self.get_task_from_response(response.text)
4835 self.logger.debug(
4836 "Create Network REST : Waiting for Network creation complete"
4837 )
4838 time.sleep(5)
4839 result = self.client.get_task_monitor().wait_for_success(
4840 task=network_task
4841 )
4842
4843 if result.get("status") == "success":
4844 return response.text
4845 else:
4846 self.logger.debug(
4847 "create_network_rest task failed. Network Create response : {}".format(
4848 response.text
4849 )
4850 )
4851 except Exception as exp:
4852 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
4853
4854 return None
4855
4856 def convert_cidr_to_netmask(self, cidr_ip=None):
4857 """
4858 Method sets convert CIDR netmask address to normal IP format
4859 Args:
4860 cidr_ip : CIDR IP address
4861 Returns:
4862 netmask : Converted netmask
4863 """
4864 if cidr_ip is not None:
4865 if "/" in cidr_ip:
4866 _, net_bits = cidr_ip.split("/")
4867 netmask = socket.inet_ntoa(
4868 struct.pack(">I", (0xFFFFFFFF << (32 - int(net_bits))) & 0xFFFFFFFF)
4869 )
4870 else:
4871 netmask = cidr_ip
4872
4873 return netmask
4874
4875 return None
4876
4877 def get_provider_rest(self, vca=None):
4878 """
4879 Method gets provider vdc view from vcloud director
4880
4881 Args:
4882 network_name - is network name to be created.
4883 parent_network_uuid - is parent provider vdc network that will be used for mapping.
4884 It optional attribute. by default if no parent network indicate the first available will be used.
4885
4886 Returns:
4887 The return xml content of respond or None
4888 """
4889 url_list = [self.url, "/api/admin"]
4890
4891 if vca:
4892 headers = {
4893 "Accept": "application/*+xml;version=" + API_VERSION,
4894 "x-vcloud-authorization": self.client._session.headers[
4895 "x-vcloud-authorization"
4896 ],
4897 }
4898 response = self.perform_request(
4899 req_type="GET", url="".join(url_list), headers=headers
4900 )
4901
4902 if response.status_code == requests.codes.ok:
4903 return response.text
4904
4905 return None
4906
4907 def create_vdc(self, vdc_name=None):
4908 vdc_dict = {}
4909 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4910
4911 if xml_content is not None:
4912 try:
4913 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4914 for child in task_resp_xmlroot:
4915 if child.tag.split("}")[1] == "Owner":
4916 vdc_id = child.attrib.get("href").split("/")[-1]
4917 vdc_dict[vdc_id] = task_resp_xmlroot.get("href")
4918
4919 return vdc_dict
4920 except Exception:
4921 self.logger.debug("Respond body {}".format(xml_content))
4922
4923 return None
4924
4925 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4926 """
4927 Method create vdc in vCloud director based on VDC template.
4928 it uses pre-defined template.
4929
4930 Args:
4931 vdc_name - name of a new vdc.
4932
4933 Returns:
4934 The return xml content of respond or None
4935 """
4936 # pre-requesite atleast one vdc template should be available in vCD
4937 self.logger.info("Creating new vdc {}".format(vdc_name))
4938 vca = self.connect_as_admin()
4939
4940 if not vca:
4941 raise vimconn.VimConnConnectionException("Failed to connect vCD")
4942
4943 if vdc_name is None:
4944 return None
4945
4946 url_list = [self.url, "/api/vdcTemplates"]
4947 vm_list_rest_call = "".join(url_list)
4948 headers = {
4949 "Accept": "application/*+xml;version=" + API_VERSION,
4950 "x-vcloud-authorization": vca._session.headers["x-vcloud-authorization"],
4951 }
4952 response = self.perform_request(
4953 req_type="GET", url=vm_list_rest_call, headers=headers
4954 )
4955
4956 # container url to a template
4957 vdc_template_ref = None
4958 try:
4959 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4960 for child in vm_list_xmlroot:
4961 # application/vnd.vmware.admin.providervdc+xml
4962 # we need find a template from witch we instantiate VDC
4963 if child.tag.split("}")[1] == "VdcTemplate":
4964 if (
4965 child.attrib.get("type")
4966 == "application/vnd.vmware.admin.vdcTemplate+xml"
4967 ):
4968 vdc_template_ref = child.attrib.get("href")
4969 except Exception:
4970 self.logger.debug(
4971 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
4972 )
4973 self.logger.debug("Respond body {}".format(response.text))
4974
4975 return None
4976
4977 # if we didn't found required pre defined template we return None
4978 if vdc_template_ref is None:
4979 return None
4980
4981 try:
4982 # instantiate vdc
4983 url_list = [self.url, "/api/org/", self.org_uuid, "/action/instantiate"]
4984 vm_list_rest_call = "".join(url_list)
4985 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4986 <Source href="{1:s}"></Source>
4987 <Description>opnemano</Description>
4988 </InstantiateVdcTemplateParams>""".format(
4989 vdc_name, vdc_template_ref
4990 )
4991 headers[
4992 "Content-Type"
4993 ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
4994 response = self.perform_request(
4995 req_type="POST", url=vm_list_rest_call, headers=headers, data=data
4996 )
4997 vdc_task = self.get_task_from_response(response.text)
4998 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4999
5000 # if we all ok we respond with content otherwise by default None
5001 if response.status_code >= 200 and response.status_code < 300:
5002 return response.text
5003
5004 return None
5005 except Exception:
5006 self.logger.debug(
5007 "Failed parse respond for rest api call {}".format(vm_list_rest_call)
5008 )
5009 self.logger.debug("Respond body {}".format(response.text))
5010
5011 return None
5012
5013 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
5014 """
5015 Method retrieve vapp detail from vCloud director
5016
5017 Args:
5018 vapp_uuid - is vapp identifier.
5019
5020 Returns:
5021 The return network uuid or return None
5022 """
5023 parsed_respond = {}
5024 vca = None
5025
5026 if need_admin_access:
5027 vca = self.connect_as_admin()
5028 else:
5029 vca = self.client
5030
5031 if not vca:
5032 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5033 if vapp_uuid is None:
5034 return None
5035
5036 url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
5037 get_vapp_restcall = "".join(url_list)
5038
5039 if vca._session:
5040 headers = {
5041 "Accept": "application/*+xml;version=" + API_VERSION,
5042 "x-vcloud-authorization": vca._session.headers[
5043 "x-vcloud-authorization"
5044 ],
5045 }
5046 response = self.perform_request(
5047 req_type="GET", url=get_vapp_restcall, headers=headers
5048 )
5049
5050 if response.status_code == 403:
5051 if need_admin_access is False:
5052 response = self.retry_rest("GET", get_vapp_restcall)
5053
5054 if response.status_code != requests.codes.ok:
5055 self.logger.debug(
5056 "REST API call {} failed. Return status code {}".format(
5057 get_vapp_restcall, response.status_code
5058 )
5059 )
5060
5061 return parsed_respond
5062
5063 try:
5064 xmlroot_respond = XmlElementTree.fromstring(response.text)
5065 parsed_respond["ovfDescriptorUploaded"] = xmlroot_respond.attrib[
5066 "ovfDescriptorUploaded"
5067 ]
5068 namespaces = {
5069 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
5070 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
5071 "vmw": "http://www.vmware.com/schema/ovf",
5072 "vm": "http://www.vmware.com/vcloud/v1.5",
5073 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5074 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
5075 "xmlns": "http://www.vmware.com/vcloud/v1.5",
5076 }
5077
5078 created_section = xmlroot_respond.find("vm:DateCreated", namespaces)
5079 if created_section is not None:
5080 parsed_respond["created"] = created_section.text
5081
5082 network_section = xmlroot_respond.find(
5083 "vm:NetworkConfigSection/vm:NetworkConfig", namespaces
5084 )
5085 if (
5086 network_section is not None
5087 and "networkName" in network_section.attrib
5088 ):
5089 parsed_respond["networkname"] = network_section.attrib[
5090 "networkName"
5091 ]
5092
5093 ipscopes_section = xmlroot_respond.find(
5094 "vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes",
5095 namespaces,
5096 )
5097 if ipscopes_section is not None:
5098 for ipscope in ipscopes_section:
5099 for scope in ipscope:
5100 tag_key = scope.tag.split("}")[1]
5101 if tag_key == "IpRanges":
5102 ip_ranges = scope.getchildren()
5103 for ipblock in ip_ranges:
5104 for block in ipblock:
5105 parsed_respond[
5106 block.tag.split("}")[1]
5107 ] = block.text
5108 else:
5109 parsed_respond[tag_key] = scope.text
5110
5111 # parse children section for other attrib
5112 children_section = xmlroot_respond.find("vm:Children/", namespaces)
5113 if children_section is not None:
5114 parsed_respond["name"] = children_section.attrib["name"]
5115 parsed_respond["nestedHypervisorEnabled"] = (
5116 children_section.attrib["nestedHypervisorEnabled"]
5117 if "nestedHypervisorEnabled" in children_section.attrib
5118 else None
5119 )
5120 parsed_respond["deployed"] = children_section.attrib["deployed"]
5121 parsed_respond["status"] = children_section.attrib["status"]
5122 parsed_respond["vmuuid"] = children_section.attrib["id"].split(":")[
5123 -1
5124 ]
5125 network_adapter = children_section.find(
5126 "vm:NetworkConnectionSection", namespaces
5127 )
5128 nic_list = []
5129 for adapters in network_adapter:
5130 adapter_key = adapters.tag.split("}")[1]
5131 if adapter_key == "PrimaryNetworkConnectionIndex":
5132 parsed_respond["primarynetwork"] = adapters.text
5133
5134 if adapter_key == "NetworkConnection":
5135 vnic = {}
5136 if "network" in adapters.attrib:
5137 vnic["network"] = adapters.attrib["network"]
5138 for adapter in adapters:
5139 setting_key = adapter.tag.split("}")[1]
5140 vnic[setting_key] = adapter.text
5141 nic_list.append(vnic)
5142
5143 for link in children_section:
5144 if link.tag.split("}")[1] == "Link" and "rel" in link.attrib:
5145 if link.attrib["rel"] == "screen:acquireTicket":
5146 parsed_respond["acquireTicket"] = link.attrib
5147
5148 if link.attrib["rel"] == "screen:acquireMksTicket":
5149 parsed_respond["acquireMksTicket"] = link.attrib
5150
5151 parsed_respond["interfaces"] = nic_list
5152 vCloud_extension_section = children_section.find(
5153 "xmlns:VCloudExtension", namespaces
5154 )
5155 if vCloud_extension_section is not None:
5156 vm_vcenter_info = {}
5157 vim_info = vCloud_extension_section.find(
5158 "vmext:VmVimInfo", namespaces
5159 )
5160 vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
5161
5162 if vmext is not None:
5163 vm_vcenter_info["vm_moref_id"] = vmext.find(
5164 "vmext:MoRef", namespaces
5165 ).text
5166
5167 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
5168
5169 virtual_hardware_section = children_section.find(
5170 "ovf:VirtualHardwareSection", namespaces
5171 )
5172 vm_virtual_hardware_info = {}
5173 if virtual_hardware_section is not None:
5174 for item in virtual_hardware_section.iterfind(
5175 "ovf:Item", namespaces
5176 ):
5177 if (
5178 item.find("rasd:Description", namespaces).text
5179 == "Hard disk"
5180 ):
5181 disk_size = item.find(
5182 "rasd:HostResource", namespaces
5183 ).attrib["{" + namespaces["vm"] + "}capacity"]
5184 vm_virtual_hardware_info["disk_size"] = disk_size
5185 break
5186
5187 for link in virtual_hardware_section:
5188 if (
5189 link.tag.split("}")[1] == "Link"
5190 and "rel" in link.attrib
5191 ):
5192 if link.attrib["rel"] == "edit" and link.attrib[
5193 "href"
5194 ].endswith("/disks"):
5195 vm_virtual_hardware_info[
5196 "disk_edit_href"
5197 ] = link.attrib["href"]
5198 break
5199
5200 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
5201 except Exception as exp:
5202 self.logger.info(
5203 "Error occurred calling rest api for getting vApp details {}".format(
5204 exp
5205 )
5206 )
5207
5208 return parsed_respond
5209
5210 def modify_vm_disk(self, vapp_uuid, flavor_disk):
5211 """
5212 Method retrieve vm disk details
5213
5214 Args:
5215 vapp_uuid - is vapp identifier.
5216 flavor_disk - disk size as specified in VNFD (flavor)
5217
5218 Returns:
5219 The return network uuid or return None
5220 """
5221 status = None
5222 try:
5223 # Flavor disk is in GB convert it into MB
5224 flavor_disk = int(flavor_disk) * 1024
5225 vm_details = self.get_vapp_details_rest(vapp_uuid)
5226
5227 if vm_details:
5228 vm_name = vm_details["name"]
5229 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
5230
5231 if vm_details and "vm_virtual_hardware" in vm_details:
5232 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
5233 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5234 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
5235
5236 if flavor_disk > vm_disk:
5237 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
5238 self.logger.info(
5239 "Modify disk of VM {} from {} to {} MB".format(
5240 vm_name, vm_disk, flavor_disk
5241 )
5242 )
5243 else:
5244 status = True
5245 self.logger.info("No need to modify disk of VM {}".format(vm_name))
5246
5247 return status
5248 except Exception as exp:
5249 self.logger.info("Error occurred while modifing disk size {}".format(exp))
5250
5251 def modify_vm_disk_rest(self, disk_href, disk_size):
5252 """
5253 Method retrieve modify vm disk size
5254
5255 Args:
5256 disk_href - vCD API URL to GET and PUT disk data
5257 disk_size - disk size as specified in VNFD (flavor)
5258
5259 Returns:
5260 The return network uuid or return None
5261 """
5262 if disk_href is None or disk_size is None:
5263 return None
5264
5265 if self.client._session:
5266 headers = {
5267 "Accept": "application/*+xml;version=" + API_VERSION,
5268 "x-vcloud-authorization": self.client._session.headers[
5269 "x-vcloud-authorization"
5270 ],
5271 }
5272 response = self.perform_request(
5273 req_type="GET", url=disk_href, headers=headers
5274 )
5275
5276 if response.status_code == 403:
5277 response = self.retry_rest("GET", disk_href)
5278
5279 if response.status_code != requests.codes.ok:
5280 self.logger.debug(
5281 "GET REST API call {} failed. Return status code {}".format(
5282 disk_href, response.status_code
5283 )
5284 )
5285
5286 return None
5287
5288 try:
5289 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5290 namespaces = {
5291 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
5292 }
5293 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5294
5295 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
5296 if item.find("rasd:Description", namespaces).text == "Hard disk":
5297 disk_item = item.find("rasd:HostResource", namespaces)
5298 if disk_item is not None:
5299 disk_item.attrib["{" + namespaces["xmlns"] + "}capacity"] = str(
5300 disk_size
5301 )
5302 break
5303
5304 data = lxmlElementTree.tostring(
5305 lxmlroot_respond, encoding="utf8", method="xml", xml_declaration=True
5306 )
5307
5308 # Send PUT request to modify disk size
5309 headers[
5310 "Content-Type"
5311 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
5312
5313 response = self.perform_request(
5314 req_type="PUT", url=disk_href, headers=headers, data=data
5315 )
5316 if response.status_code == 403:
5317 add_headers = {"Content-Type": headers["Content-Type"]}
5318 response = self.retry_rest("PUT", disk_href, add_headers, data)
5319
5320 if response.status_code != 202:
5321 self.logger.debug(
5322 "PUT REST API call {} failed. Return status code {}".format(
5323 disk_href, response.status_code
5324 )
5325 )
5326 else:
5327 modify_disk_task = self.get_task_from_response(response.text)
5328 result = self.client.get_task_monitor().wait_for_success(
5329 task=modify_disk_task
5330 )
5331 if result.get("status") == "success":
5332 return True
5333 else:
5334 return False
5335
5336 return None
5337 except Exception as exp:
5338 self.logger.info(
5339 "Error occurred calling rest api for modifing disk size {}".format(exp)
5340 )
5341
5342 return None
5343
5344 def add_serial_device(self, vapp_uuid):
5345 """
5346 Method to attach a serial device to a VM
5347
5348 Args:
5349 vapp_uuid - uuid of vApp/VM
5350
5351 Returns:
5352 """
5353 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
5354 _, content = self.get_vcenter_content()
5355 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5356
5357 if vm_moref_id:
5358 try:
5359 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5360 self.logger.info(
5361 "VM {} is currently on host {}".format(vm_obj, host_obj)
5362 )
5363 if host_obj and vm_obj:
5364 spec = vim.vm.ConfigSpec()
5365 spec.deviceChange = []
5366 serial_spec = vim.vm.device.VirtualDeviceSpec()
5367 serial_spec.operation = "add"
5368 serial_port = vim.vm.device.VirtualSerialPort()
5369 serial_port.yieldOnPoll = True
5370 backing = serial_port.URIBackingInfo()
5371 backing.serviceURI = "tcp://:65500"
5372 backing.direction = "server"
5373 serial_port.backing = backing
5374 serial_spec.device = serial_port
5375 spec.deviceChange.append(serial_spec)
5376 vm_obj.ReconfigVM_Task(spec=spec)
5377 self.logger.info("Adding serial device to VM {}".format(vm_obj))
5378 except vmodl.MethodFault as error:
5379 self.logger.error("Error occurred while adding PCI devices {} ", error)
5380
5381 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
5382 """
5383 Method to attach pci devices to VM
5384
5385 Args:
5386 vapp_uuid - uuid of vApp/VM
5387 pci_devices - pci devices infromation as specified in VNFD (flavor)
5388
5389 Returns:
5390 The status of add pci device task , vm object and
5391 vcenter_conect object
5392 """
5393 vm_obj = None
5394 self.logger.info(
5395 "Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid)
5396 )
5397 vcenter_conect, content = self.get_vcenter_content()
5398 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5399
5400 if vm_moref_id:
5401 try:
5402 no_of_pci_devices = len(pci_devices)
5403 if no_of_pci_devices > 0:
5404 # Get VM and its host
5405 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5406 self.logger.info(
5407 "VM {} is currently on host {}".format(vm_obj, host_obj)
5408 )
5409
5410 if host_obj and vm_obj:
5411 # get PCI devies from host on which vapp is currently installed
5412 avilable_pci_devices = self.get_pci_devices(
5413 host_obj, no_of_pci_devices
5414 )
5415
5416 if avilable_pci_devices is None:
5417 # find other hosts with active pci devices
5418 (
5419 new_host_obj,
5420 avilable_pci_devices,
5421 ) = self.get_host_and_PCIdevices(content, no_of_pci_devices)
5422
5423 if (
5424 new_host_obj is not None
5425 and avilable_pci_devices is not None
5426 and len(avilable_pci_devices) > 0
5427 ):
5428 # Migrate vm to the host where PCI devices are availble
5429 self.logger.info(
5430 "Relocate VM {} on new host {}".format(
5431 vm_obj, new_host_obj
5432 )
5433 )
5434
5435 task = self.relocate_vm(new_host_obj, vm_obj)
5436 if task is not None:
5437 result = self.wait_for_vcenter_task(
5438 task, vcenter_conect
5439 )
5440 self.logger.info(
5441 "Migrate VM status: {}".format(result)
5442 )
5443 host_obj = new_host_obj
5444 else:
5445 self.logger.info(
5446 "Fail to migrate VM : {}".format(result)
5447 )
5448 raise vimconn.VimConnNotFoundException(
5449 "Fail to migrate VM : {} to host {}".format(
5450 vmname_andid, new_host_obj
5451 )
5452 )
5453
5454 if (
5455 host_obj is not None
5456 and avilable_pci_devices is not None
5457 and len(avilable_pci_devices) > 0
5458 ):
5459 # Add PCI devices one by one
5460 for pci_device in avilable_pci_devices:
5461 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
5462 if task:
5463 status = self.wait_for_vcenter_task(
5464 task, vcenter_conect
5465 )
5466
5467 if status:
5468 self.logger.info(
5469 "Added PCI device {} to VM {}".format(
5470 pci_device, str(vm_obj)
5471 )
5472 )
5473 else:
5474 self.logger.error(
5475 "Fail to add PCI device {} to VM {}".format(
5476 pci_device, str(vm_obj)
5477 )
5478 )
5479
5480 return True, vm_obj, vcenter_conect
5481 else:
5482 self.logger.error(
5483 "Currently there is no host with"
5484 " {} number of avaialble PCI devices required for VM {}".format(
5485 no_of_pci_devices, vmname_andid
5486 )
5487 )
5488
5489 raise vimconn.VimConnNotFoundException(
5490 "Currently there is no host with {} "
5491 "number of avaialble PCI devices required for VM {}".format(
5492 no_of_pci_devices, vmname_andid
5493 )
5494 )
5495 else:
5496 self.logger.debug(
5497 "No infromation about PCI devices {} ", pci_devices
5498 )
5499 except vmodl.MethodFault as error:
5500 self.logger.error("Error occurred while adding PCI devices {} ", error)
5501
5502 return None, vm_obj, vcenter_conect
5503
5504 def get_vm_obj(self, content, mob_id):
5505 """
5506 Method to get the vsphere VM object associated with a given morf ID
5507 Args:
5508 vapp_uuid - uuid of vApp/VM
5509 content - vCenter content object
5510 mob_id - mob_id of VM
5511
5512 Returns:
5513 VM and host object
5514 """
5515 vm_obj = None
5516 host_obj = None
5517
5518 try:
5519 container = content.viewManager.CreateContainerView(
5520 content.rootFolder, [vim.VirtualMachine], True
5521 )
5522 for vm in container.view:
5523 mobID = vm._GetMoId()
5524
5525 if mobID == mob_id:
5526 vm_obj = vm
5527 host_obj = vm_obj.runtime.host
5528 break
5529 except Exception as exp:
5530 self.logger.error("Error occurred while finding VM object : {}".format(exp))
5531
5532 return host_obj, vm_obj
5533
5534 def get_pci_devices(self, host, need_devices):
5535 """
5536 Method to get the details of pci devices on given host
5537 Args:
5538 host - vSphere host object
5539 need_devices - number of pci devices needed on host
5540
5541 Returns:
5542 array of pci devices
5543 """
5544 all_devices = []
5545 all_device_ids = []
5546 used_devices_ids = []
5547
5548 try:
5549 if host:
5550 pciPassthruInfo = host.config.pciPassthruInfo
5551 pciDevies = host.hardware.pciDevice
5552
5553 for pci_status in pciPassthruInfo:
5554 if pci_status.passthruActive:
5555 for device in pciDevies:
5556 if device.id == pci_status.id:
5557 all_device_ids.append(device.id)
5558 all_devices.append(device)
5559
5560 # check if devices are in use
5561 avalible_devices = all_devices
5562 for vm in host.vm:
5563 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
5564 vm_devices = vm.config.hardware.device
5565 for device in vm_devices:
5566 if type(device) is vim.vm.device.VirtualPCIPassthrough:
5567 if device.backing.id in all_device_ids:
5568 for use_device in avalible_devices:
5569 if use_device.id == device.backing.id:
5570 avalible_devices.remove(use_device)
5571
5572 used_devices_ids.append(device.backing.id)
5573 self.logger.debug(
5574 "Device {} from devices {}"
5575 "is in use".format(device.backing.id, device)
5576 )
5577 if len(avalible_devices) < need_devices:
5578 self.logger.debug(
5579 "Host {} don't have {} number of active devices".format(
5580 host, need_devices
5581 )
5582 )
5583 self.logger.debug(
5584 "found only {} devices {}".format(
5585 len(avalible_devices), avalible_devices
5586 )
5587 )
5588
5589 return None
5590 else:
5591 required_devices = avalible_devices[:need_devices]
5592 self.logger.info(
5593 "Found {} PCI devices on host {} but required only {}".format(
5594 len(avalible_devices), host, need_devices
5595 )
5596 )
5597 self.logger.info(
5598 "Retruning {} devices as {}".format(need_devices, required_devices)
5599 )
5600
5601 return required_devices
5602 except Exception as exp:
5603 self.logger.error(
5604 "Error {} occurred while finding pci devices on host: {}".format(
5605 exp, host
5606 )
5607 )
5608
5609 return None
5610
5611 def get_host_and_PCIdevices(self, content, need_devices):
5612 """
5613 Method to get the details of pci devices infromation on all hosts
5614
5615 Args:
5616 content - vSphere host object
5617 need_devices - number of pci devices needed on host
5618
5619 Returns:
5620 array of pci devices and host object
5621 """
5622 host_obj = None
5623 pci_device_objs = None
5624
5625 try:
5626 if content:
5627 container = content.viewManager.CreateContainerView(
5628 content.rootFolder, [vim.HostSystem], True
5629 )
5630 for host in container.view:
5631 devices = self.get_pci_devices(host, need_devices)
5632
5633 if devices:
5634 host_obj = host
5635 pci_device_objs = devices
5636 break
5637 except Exception as exp:
5638 self.logger.error(
5639 "Error {} occurred while finding pci devices on host: {}".format(
5640 exp, host_obj
5641 )
5642 )
5643
5644 return host_obj, pci_device_objs
5645
5646 def relocate_vm(self, dest_host, vm):
5647 """
5648 Method to get the relocate VM to new host
5649
5650 Args:
5651 dest_host - vSphere host object
5652 vm - vSphere VM object
5653
5654 Returns:
5655 task object
5656 """
5657 task = None
5658
5659 try:
5660 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
5661 task = vm.Relocate(relocate_spec)
5662 self.logger.info(
5663 "Migrating {} to destination host {}".format(vm, dest_host)
5664 )
5665 except Exception as exp:
5666 self.logger.error(
5667 "Error occurred while relocate VM {} to new host {}: {}".format(
5668 dest_host, vm, exp
5669 )
5670 )
5671
5672 return task
5673
5674 def wait_for_vcenter_task(self, task, actionName="job", hideResult=False):
5675 """
5676 Waits and provides updates on a vSphere task
5677 """
5678 while task.info.state == vim.TaskInfo.State.running:
5679 time.sleep(2)
5680
5681 if task.info.state == vim.TaskInfo.State.success:
5682 if task.info.result is not None and not hideResult:
5683 self.logger.info(
5684 "{} completed successfully, result: {}".format(
5685 actionName, task.info.result
5686 )
5687 )
5688 else:
5689 self.logger.info("Task {} completed successfully.".format(actionName))
5690 else:
5691 self.logger.error(
5692 "{} did not complete successfully: {} ".format(
5693 actionName, task.info.error
5694 )
5695 )
5696
5697 return task.info.result
5698
5699 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
5700 """
5701 Method to add pci device in given VM
5702
5703 Args:
5704 host_object - vSphere host object
5705 vm_object - vSphere VM object
5706 host_pci_dev - host_pci_dev must be one of the devices from the
5707 host_object.hardware.pciDevice list
5708 which is configured as a PCI passthrough device
5709
5710 Returns:
5711 task object
5712 """
5713 task = None
5714
5715 if vm_object and host_object and host_pci_dev:
5716 try:
5717 # Add PCI device to VM
5718 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(
5719 host=None
5720 ).pciPassthrough
5721 systemid_by_pciid = {
5722 item.pciDevice.id: item.systemId for item in pci_passthroughs
5723 }
5724
5725 if host_pci_dev.id not in systemid_by_pciid:
5726 self.logger.error(
5727 "Device {} is not a passthrough device ".format(host_pci_dev)
5728 )
5729 return None
5730
5731 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip("0x")
5732 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
5733 deviceId=deviceId,
5734 id=host_pci_dev.id,
5735 systemId=systemid_by_pciid[host_pci_dev.id],
5736 vendorId=host_pci_dev.vendorId,
5737 deviceName=host_pci_dev.deviceName,
5738 )
5739
5740 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
5741 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
5742 new_device_config.operation = "add"
5743 vmConfigSpec = vim.vm.ConfigSpec()
5744 vmConfigSpec.deviceChange = [new_device_config]
5745 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
5746 self.logger.info(
5747 "Adding PCI device {} into VM {} from host {} ".format(
5748 host_pci_dev, vm_object, host_object
5749 )
5750 )
5751 except Exception as exp:
5752 self.logger.error(
5753 "Error occurred while adding pci devive {} to VM {}: {}".format(
5754 host_pci_dev, vm_object, exp
5755 )
5756 )
5757
5758 return task
5759
5760 def get_vm_vcenter_info(self):
5761 """
5762 Method to get details of vCenter and vm
5763
5764 Args:
5765 vapp_uuid - uuid of vApp or VM
5766
5767 Returns:
5768 Moref Id of VM and deails of vCenter
5769 """
5770 vm_vcenter_info = {}
5771
5772 if self.vcenter_ip is not None:
5773 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
5774 else:
5775 raise vimconn.VimConnException(
5776 message="vCenter IP is not provided."
5777 " Please provide vCenter IP while attaching datacenter "
5778 "to tenant in --config"
5779 )
5780
5781 if self.vcenter_port is not None:
5782 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
5783 else:
5784 raise vimconn.VimConnException(
5785 message="vCenter port is not provided."
5786 " Please provide vCenter port while attaching datacenter "
5787 "to tenant in --config"
5788 )
5789
5790 if self.vcenter_user is not None:
5791 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
5792 else:
5793 raise vimconn.VimConnException(
5794 message="vCenter user is not provided."
5795 " Please provide vCenter user while attaching datacenter "
5796 "to tenant in --config"
5797 )
5798
5799 if self.vcenter_password is not None:
5800 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
5801 else:
5802 raise vimconn.VimConnException(
5803 message="vCenter user password is not provided."
5804 " Please provide vCenter user password while attaching datacenter "
5805 "to tenant in --config"
5806 )
5807
5808 return vm_vcenter_info
5809
5810 def get_vm_pci_details(self, vmuuid):
5811 """
5812 Method to get VM PCI device details from vCenter
5813
5814 Args:
5815 vm_obj - vSphere VM object
5816
5817 Returns:
5818 dict of PCI devives attached to VM
5819
5820 """
5821 vm_pci_devices_info = {}
5822
5823 try:
5824 _, content = self.get_vcenter_content()
5825 vm_moref_id = self.get_vm_moref_id(vmuuid)
5826 if vm_moref_id:
5827 # Get VM and its host
5828 if content:
5829 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5830 if host_obj and vm_obj:
5831 vm_pci_devices_info["host_name"] = host_obj.name
5832 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[
5833 0
5834 ].spec.ip.ipAddress
5835
5836 for device in vm_obj.config.hardware.device:
5837 if type(device) == vim.vm.device.VirtualPCIPassthrough:
5838 device_details = {
5839 "devide_id": device.backing.id,
5840 "pciSlotNumber": device.slotInfo.pciSlotNumber,
5841 }
5842 vm_pci_devices_info[
5843 device.deviceInfo.label
5844 ] = device_details
5845 else:
5846 self.logger.error(
5847 "Can not connect to vCenter while getting "
5848 "PCI devices infromationn"
5849 )
5850
5851 return vm_pci_devices_info
5852 except Exception as exp:
5853 self.logger.error(
5854 "Error occurred while getting VM information" " for VM : {}".format(exp)
5855 )
5856
5857 raise vimconn.VimConnException(message=exp)
5858
5859 def reserve_memory_for_all_vms(self, vapp, memory_mb):
5860 """
5861 Method to reserve memory for all VMs
5862 Args :
5863 vapp - VApp
5864 memory_mb - Memory in MB
5865 Returns:
5866 None
5867 """
5868 self.logger.info("Reserve memory for all VMs")
5869
5870 for vms in vapp.get_all_vms():
5871 vm_id = vms.get("id").split(":")[-1]
5872 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(
5873 self.url, vm_id
5874 )
5875 headers = {
5876 "Accept": "application/*+xml;version=" + API_VERSION,
5877 "x-vcloud-authorization": self.client._session.headers[
5878 "x-vcloud-authorization"
5879 ],
5880 }
5881 headers["Content-Type"] = "application/vnd.vmware.vcloud.rasdItem+xml"
5882 response = self.perform_request(
5883 req_type="GET", url=url_rest_call, headers=headers
5884 )
5885
5886 if response.status_code == 403:
5887 response = self.retry_rest("GET", url_rest_call)
5888
5889 if response.status_code != 200:
5890 self.logger.error(
5891 "REST call {} failed reason : {}"
5892 "status code : {}".format(
5893 url_rest_call, response.text, response.status_code
5894 )
5895 )
5896 raise vimconn.VimConnException(
5897 "reserve_memory_for_all_vms : Failed to get " "memory"
5898 )
5899
5900 bytexml = bytes(bytearray(response.text, encoding="utf-8"))
5901 contentelem = lxmlElementTree.XML(bytexml)
5902 namespaces = {
5903 prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix
5904 }
5905 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5906
5907 # Find the reservation element in the response
5908 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
5909 for memelem in memelem_list:
5910 memelem.text = str(memory_mb)
5911
5912 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
5913
5914 response = self.perform_request(
5915 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
5916 )
5917
5918 if response.status_code == 403:
5919 add_headers = {"Content-Type": headers["Content-Type"]}
5920 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
5921
5922 if response.status_code != 202:
5923 self.logger.error(
5924 "REST call {} failed reason : {}"
5925 "status code : {} ".format(
5926 url_rest_call, response.text, response.status_code
5927 )
5928 )
5929 raise vimconn.VimConnException(
5930 "reserve_memory_for_all_vms : Failed to update "
5931 "virtual hardware memory section"
5932 )
5933 else:
5934 mem_task = self.get_task_from_response(response.text)
5935 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
5936
5937 if result.get("status") == "success":
5938 self.logger.info(
5939 "reserve_memory_for_all_vms(): VM {} succeeded ".format(vm_id)
5940 )
5941 else:
5942 self.logger.error(
5943 "reserve_memory_for_all_vms(): VM {} failed ".format(vm_id)
5944 )
5945
5946 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
5947 """
5948 Configure VApp network config with org vdc network
5949 Args :
5950 vapp - VApp
5951 Returns:
5952 None
5953 """
5954
5955 self.logger.info(
5956 "Connecting vapp {} to org vdc network {}".format(vapp_id, net_name)
5957 )
5958
5959 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(
5960 self.url, vapp_id
5961 )
5962
5963 headers = {
5964 "Accept": "application/*+xml;version=" + API_VERSION,
5965 "x-vcloud-authorization": self.client._session.headers[
5966 "x-vcloud-authorization"
5967 ],
5968 }
5969 response = self.perform_request(
5970 req_type="GET", url=url_rest_call, headers=headers
5971 )
5972
5973 if response.status_code == 403:
5974 response = self.retry_rest("GET", url_rest_call)
5975
5976 if response.status_code != 200:
5977 self.logger.error(
5978 "REST call {} failed reason : {}"
5979 "status code : {}".format(
5980 url_rest_call, response.text, response.status_code
5981 )
5982 )
5983 raise vimconn.VimConnException(
5984 "connect_vapp_to_org_vdc_network : Failed to get "
5985 "network config section"
5986 )
5987
5988 data = response.text
5989 headers[
5990 "Content-Type"
5991 ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
5992 net_id = self.get_network_id_by_name(net_name)
5993 if not net_id:
5994 raise vimconn.VimConnException(
5995 "connect_vapp_to_org_vdc_network : Failed to find " "existing network"
5996 )
5997
5998 bytexml = bytes(bytearray(data, encoding="utf-8"))
5999 newelem = lxmlElementTree.XML(bytexml)
6000 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
6001 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6002 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
6003
6004 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
6005 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
6006 if parentnetworklist:
6007 for pn in parentnetworklist:
6008 if "href" not in pn.keys():
6009 id_val = pn.get("id")
6010 href_val = "{}/api/network/{}".format(self.url, id_val)
6011 pn.set("href", href_val)
6012
6013 newstr = """<NetworkConfig networkName="{}">
6014 <Configuration>
6015 <ParentNetwork href="{}/api/network/{}"/>
6016 <FenceMode>bridged</FenceMode>
6017 </Configuration>
6018 </NetworkConfig>
6019 """.format(
6020 net_name, self.url, net_id
6021 )
6022 newcfgelem = lxmlElementTree.fromstring(newstr)
6023 if nwcfglist:
6024 nwcfglist[0].addnext(newcfgelem)
6025
6026 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
6027
6028 response = self.perform_request(
6029 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6030 )
6031
6032 if response.status_code == 403:
6033 add_headers = {"Content-Type": headers["Content-Type"]}
6034 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6035
6036 if response.status_code != 202:
6037 self.logger.error(
6038 "REST call {} failed reason : {}"
6039 "status code : {} ".format(
6040 url_rest_call, response.text, response.status_code
6041 )
6042 )
6043 raise vimconn.VimConnException(
6044 "connect_vapp_to_org_vdc_network : Failed to update "
6045 "network config section"
6046 )
6047 else:
6048 vapp_task = self.get_task_from_response(response.text)
6049 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
6050 if result.get("status") == "success":
6051 self.logger.info(
6052 "connect_vapp_to_org_vdc_network(): Vapp {} connected to "
6053 "network {}".format(vapp_id, net_name)
6054 )
6055 else:
6056 self.logger.error(
6057 "connect_vapp_to_org_vdc_network(): Vapp {} failed to "
6058 "connect to network {}".format(vapp_id, net_name)
6059 )
6060
6061 def remove_primary_network_adapter_from_all_vms(self, vapp):
6062 """
6063 Method to remove network adapter type to vm
6064 Args :
6065 vapp - VApp
6066 Returns:
6067 None
6068 """
6069 self.logger.info("Removing network adapter from all VMs")
6070
6071 for vms in vapp.get_all_vms():
6072 vm_id = vms.get("id").split(":")[-1]
6073
6074 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6075 self.url, vm_id
6076 )
6077
6078 headers = {
6079 "Accept": "application/*+xml;version=" + API_VERSION,
6080 "x-vcloud-authorization": self.client._session.headers[
6081 "x-vcloud-authorization"
6082 ],
6083 }
6084 response = self.perform_request(
6085 req_type="GET", url=url_rest_call, headers=headers
6086 )
6087
6088 if response.status_code == 403:
6089 response = self.retry_rest("GET", url_rest_call)
6090
6091 if response.status_code != 200:
6092 self.logger.error(
6093 "REST call {} failed reason : {}"
6094 "status code : {}".format(
6095 url_rest_call, response.text, response.status_code
6096 )
6097 )
6098 raise vimconn.VimConnException(
6099 "remove_primary_network_adapter : Failed to get "
6100 "network connection section"
6101 )
6102
6103 data = response.text
6104 data = data.split('<Link rel="edit"')[0]
6105
6106 headers[
6107 "Content-Type"
6108 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6109
6110 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6111 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
6112 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
6113 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6114 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
6115 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6116 xmlns:vmw="http://www.vmware.com/schema/ovf"
6117 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
6118 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
6119 xmlns:ns9="http://www.vmware.com/vcloud/versions"
6120 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
6121 ovf:required="false">
6122 <ovf:Info>Specifies the available VM network connections</ovf:Info>
6123 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
6124 <Link rel="edit" href="{url}"
6125 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
6126 </NetworkConnectionSection>""".format(
6127 url=url_rest_call
6128 )
6129 response = self.perform_request(
6130 req_type="PUT", url=url_rest_call, headers=headers, data=newdata
6131 )
6132
6133 if response.status_code == 403:
6134 add_headers = {"Content-Type": headers["Content-Type"]}
6135 response = self.retry_rest("PUT", url_rest_call, add_headers, newdata)
6136
6137 if response.status_code != 202:
6138 self.logger.error(
6139 "REST call {} failed reason : {}"
6140 "status code : {} ".format(
6141 url_rest_call, response.text, response.status_code
6142 )
6143 )
6144 raise vimconn.VimConnException(
6145 "remove_primary_network_adapter : Failed to update "
6146 "network connection section"
6147 )
6148 else:
6149 nic_task = self.get_task_from_response(response.text)
6150 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
6151 if result.get("status") == "success":
6152 self.logger.info(
6153 "remove_primary_network_adapter(): VM {} conneced to "
6154 "default NIC type".format(vm_id)
6155 )
6156 else:
6157 self.logger.error(
6158 "remove_primary_network_adapter(): VM {} failed to "
6159 "connect NIC type".format(vm_id)
6160 )
6161
6162 def add_network_adapter_to_vms(
6163 self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None
6164 ):
6165 """
6166 Method to add network adapter type to vm
6167 Args :
6168 network_name - name of network
6169 primary_nic_index - int value for primary nic index
6170 nicIndex - int value for nic index
6171 nic_type - specify model name to which add to vm
6172 Returns:
6173 None
6174 """
6175
6176 self.logger.info(
6177 "Add network adapter to VM: network_name {} nicIndex {} nic_type {}".format(
6178 network_name, nicIndex, nic_type
6179 )
6180 )
6181 try:
6182 ip_address = None
6183 floating_ip = False
6184 mac_address = None
6185 if "floating_ip" in net:
6186 floating_ip = net["floating_ip"]
6187
6188 # Stub for ip_address feature
6189 if "ip_address" in net:
6190 ip_address = net["ip_address"]
6191
6192 if "mac_address" in net:
6193 mac_address = net["mac_address"]
6194
6195 if floating_ip:
6196 allocation_mode = "POOL"
6197 elif ip_address:
6198 allocation_mode = "MANUAL"
6199 else:
6200 allocation_mode = "DHCP"
6201
6202 if not nic_type:
6203 for vms in vapp.get_all_vms():
6204 vm_id = vms.get("id").split(":")[-1]
6205
6206 url_rest_call = (
6207 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6208 self.url, vm_id
6209 )
6210 )
6211
6212 headers = {
6213 "Accept": "application/*+xml;version=" + API_VERSION,
6214 "x-vcloud-authorization": self.client._session.headers[
6215 "x-vcloud-authorization"
6216 ],
6217 }
6218 response = self.perform_request(
6219 req_type="GET", url=url_rest_call, headers=headers
6220 )
6221
6222 if response.status_code == 403:
6223 response = self.retry_rest("GET", url_rest_call)
6224
6225 if response.status_code != 200:
6226 self.logger.error(
6227 "REST call {} failed reason : {}"
6228 "status code : {}".format(
6229 url_rest_call, response.text, response.status_code
6230 )
6231 )
6232 raise vimconn.VimConnException(
6233 "add_network_adapter_to_vms : Failed to get "
6234 "network connection section"
6235 )
6236
6237 data = response.text
6238 data = data.split('<Link rel="edit"')[0]
6239 if "<PrimaryNetworkConnectionIndex>" not in data:
6240 self.logger.debug("add_network_adapter PrimaryNIC not in data")
6241 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6242 <NetworkConnection network="{}">
6243 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6244 <IsConnected>true</IsConnected>
6245 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6246 </NetworkConnection>""".format(
6247 primary_nic_index, network_name, nicIndex, allocation_mode
6248 )
6249
6250 # Stub for ip_address feature
6251 if ip_address:
6252 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6253 item = item.replace(
6254 "</NetworkConnectionIndex>\n",
6255 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6256 )
6257
6258 if mac_address:
6259 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6260 item = item.replace(
6261 "</IsConnected>\n",
6262 "</IsConnected>\n{}\n".format(mac_tag),
6263 )
6264
6265 data = data.replace(
6266 "</ovf:Info>\n",
6267 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6268 )
6269 else:
6270 self.logger.debug("add_network_adapter PrimaryNIC in data")
6271 new_item = """<NetworkConnection network="{}">
6272 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6273 <IsConnected>true</IsConnected>
6274 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6275 </NetworkConnection>""".format(
6276 network_name, nicIndex, allocation_mode
6277 )
6278
6279 # Stub for ip_address feature
6280 if ip_address:
6281 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6282 new_item = new_item.replace(
6283 "</NetworkConnectionIndex>\n",
6284 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6285 )
6286
6287 if mac_address:
6288 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6289 new_item = new_item.replace(
6290 "</IsConnected>\n",
6291 "</IsConnected>\n{}\n".format(mac_tag),
6292 )
6293
6294 data = data + new_item + "</NetworkConnectionSection>"
6295
6296 headers[
6297 "Content-Type"
6298 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6299
6300 response = self.perform_request(
6301 req_type="PUT", url=url_rest_call, headers=headers, data=data
6302 )
6303
6304 if response.status_code == 403:
6305 add_headers = {"Content-Type": headers["Content-Type"]}
6306 response = self.retry_rest(
6307 "PUT", url_rest_call, add_headers, data
6308 )
6309
6310 if response.status_code != 202:
6311 self.logger.error(
6312 "REST call {} failed reason : {}"
6313 "status code : {} ".format(
6314 url_rest_call, response.text, response.status_code
6315 )
6316 )
6317 raise vimconn.VimConnException(
6318 "add_network_adapter_to_vms : Failed to update "
6319 "network connection section"
6320 )
6321 else:
6322 nic_task = self.get_task_from_response(response.text)
6323 result = self.client.get_task_monitor().wait_for_success(
6324 task=nic_task
6325 )
6326
6327 if result.get("status") == "success":
6328 self.logger.info(
6329 "add_network_adapter_to_vms(): VM {} conneced to "
6330 "default NIC type".format(vm_id)
6331 )
6332 else:
6333 self.logger.error(
6334 "add_network_adapter_to_vms(): VM {} failed to "
6335 "connect NIC type".format(vm_id)
6336 )
6337 else:
6338 for vms in vapp.get_all_vms():
6339 vm_id = vms.get("id").split(":")[-1]
6340
6341 url_rest_call = (
6342 "{}/api/vApp/vm-{}/networkConnectionSection/".format(
6343 self.url, vm_id
6344 )
6345 )
6346
6347 headers = {
6348 "Accept": "application/*+xml;version=" + API_VERSION,
6349 "x-vcloud-authorization": self.client._session.headers[
6350 "x-vcloud-authorization"
6351 ],
6352 }
6353 response = self.perform_request(
6354 req_type="GET", url=url_rest_call, headers=headers
6355 )
6356
6357 if response.status_code == 403:
6358 response = self.retry_rest("GET", url_rest_call)
6359
6360 if response.status_code != 200:
6361 self.logger.error(
6362 "REST call {} failed reason : {}"
6363 "status code : {}".format(
6364 url_rest_call, response.text, response.status_code
6365 )
6366 )
6367 raise vimconn.VimConnException(
6368 "add_network_adapter_to_vms : Failed to get "
6369 "network connection section"
6370 )
6371 data = response.text
6372 data = data.split('<Link rel="edit"')[0]
6373 vcd_netadapter_type = nic_type
6374
6375 if nic_type in ["SR-IOV", "VF"]:
6376 vcd_netadapter_type = "SRIOVETHERNETCARD"
6377
6378 if "<PrimaryNetworkConnectionIndex>" not in data:
6379 self.logger.debug(
6380 "add_network_adapter PrimaryNIC not in data nic_type {}".format(
6381 nic_type
6382 )
6383 )
6384 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
6385 <NetworkConnection network="{}">
6386 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6387 <IsConnected>true</IsConnected>
6388 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6389 <NetworkAdapterType>{}</NetworkAdapterType>
6390 </NetworkConnection>""".format(
6391 primary_nic_index,
6392 network_name,
6393 nicIndex,
6394 allocation_mode,
6395 vcd_netadapter_type,
6396 )
6397
6398 # Stub for ip_address feature
6399 if ip_address:
6400 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6401 item = item.replace(
6402 "</NetworkConnectionIndex>\n",
6403 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6404 )
6405
6406 if mac_address:
6407 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6408 item = item.replace(
6409 "</IsConnected>\n",
6410 "</IsConnected>\n{}\n".format(mac_tag),
6411 )
6412
6413 data = data.replace(
6414 "</ovf:Info>\n",
6415 "</ovf:Info>\n{}\n</NetworkConnectionSection>".format(item),
6416 )
6417 else:
6418 self.logger.debug(
6419 "add_network_adapter PrimaryNIC in data nic_type {}".format(
6420 nic_type
6421 )
6422 )
6423 new_item = """<NetworkConnection network="{}">
6424 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
6425 <IsConnected>true</IsConnected>
6426 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
6427 <NetworkAdapterType>{}</NetworkAdapterType>
6428 </NetworkConnection>""".format(
6429 network_name, nicIndex, allocation_mode, vcd_netadapter_type
6430 )
6431
6432 # Stub for ip_address feature
6433 if ip_address:
6434 ip_tag = "<IpAddress>{}</IpAddress>".format(ip_address)
6435 new_item = new_item.replace(
6436 "</NetworkConnectionIndex>\n",
6437 "</NetworkConnectionIndex>\n{}\n".format(ip_tag),
6438 )
6439
6440 if mac_address:
6441 mac_tag = "<MACAddress>{}</MACAddress>".format(mac_address)
6442 new_item = new_item.replace(
6443 "</IsConnected>\n",
6444 "</IsConnected>\n{}\n".format(mac_tag),
6445 )
6446
6447 data = data + new_item + "</NetworkConnectionSection>"
6448
6449 headers[
6450 "Content-Type"
6451 ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
6452
6453 response = self.perform_request(
6454 req_type="PUT", url=url_rest_call, headers=headers, data=data
6455 )
6456
6457 if response.status_code == 403:
6458 add_headers = {"Content-Type": headers["Content-Type"]}
6459 response = self.retry_rest(
6460 "PUT", url_rest_call, add_headers, data
6461 )
6462
6463 if response.status_code != 202:
6464 self.logger.error(
6465 "REST call {} failed reason : {}"
6466 "status code : {}".format(
6467 url_rest_call, response.text, response.status_code
6468 )
6469 )
6470 raise vimconn.VimConnException(
6471 "add_network_adapter_to_vms : Failed to update "
6472 "network connection section"
6473 )
6474 else:
6475 nic_task = self.get_task_from_response(response.text)
6476 result = self.client.get_task_monitor().wait_for_success(
6477 task=nic_task
6478 )
6479
6480 if result.get("status") == "success":
6481 self.logger.info(
6482 "add_network_adapter_to_vms(): VM {} "
6483 "conneced to NIC type {}".format(vm_id, nic_type)
6484 )
6485 else:
6486 self.logger.error(
6487 "add_network_adapter_to_vms(): VM {} "
6488 "failed to connect NIC type {}".format(vm_id, nic_type)
6489 )
6490 except Exception as exp:
6491 self.logger.error(
6492 "add_network_adapter_to_vms() : exception occurred "
6493 "while adding Network adapter"
6494 )
6495
6496 raise vimconn.VimConnException(message=exp)
6497
6498 def set_numa_affinity(self, vmuuid, paired_threads_id):
6499 """
6500 Method to assign numa affinity in vm configuration parammeters
6501 Args :
6502 vmuuid - vm uuid
6503 paired_threads_id - one or more virtual processor
6504 numbers
6505 Returns:
6506 return if True
6507 """
6508 try:
6509 vcenter_conect, content = self.get_vcenter_content()
6510 vm_moref_id = self.get_vm_moref_id(vmuuid)
6511 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
6512
6513 if vm_obj:
6514 config_spec = vim.vm.ConfigSpec()
6515 config_spec.extraConfig = []
6516 opt = vim.option.OptionValue()
6517 opt.key = "numa.nodeAffinity"
6518 opt.value = str(paired_threads_id)
6519 config_spec.extraConfig.append(opt)
6520 task = vm_obj.ReconfigVM_Task(config_spec)
6521
6522 if task:
6523 self.wait_for_vcenter_task(task, vcenter_conect)
6524 extra_config = vm_obj.config.extraConfig
6525 flag = False
6526
6527 for opts in extra_config:
6528 if "numa.nodeAffinity" in opts.key:
6529 flag = True
6530 self.logger.info(
6531 "set_numa_affinity: Sucessfully assign numa affinity "
6532 "value {} for vm {}".format(opt.value, vm_obj)
6533 )
6534
6535 if flag:
6536 return
6537 else:
6538 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
6539 except Exception as exp:
6540 self.logger.error(
6541 "set_numa_affinity : exception occurred while setting numa affinity "
6542 "for VM {} : {}".format(vm_obj, vm_moref_id)
6543 )
6544
6545 raise vimconn.VimConnException(
6546 "set_numa_affinity : Error {} failed to assign numa "
6547 "affinity".format(exp)
6548 )
6549
6550 def add_new_disk(self, vapp_uuid, disk_size):
6551 """
6552 Method to create an empty vm disk
6553
6554 Args:
6555 vapp_uuid - is vapp identifier.
6556 disk_size - size of disk to be created in GB
6557
6558 Returns:
6559 None
6560 """
6561 status = False
6562 vm_details = None
6563 try:
6564 # Disk size in GB, convert it into MB
6565 if disk_size is not None:
6566 disk_size_mb = int(disk_size) * 1024
6567 vm_details = self.get_vapp_details_rest(vapp_uuid)
6568
6569 if vm_details and "vm_virtual_hardware" in vm_details:
6570 self.logger.info(
6571 "Adding disk to VM: {} disk size:{}GB".format(
6572 vm_details["name"], disk_size
6573 )
6574 )
6575 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
6576 status = self.add_new_disk_rest(disk_href, disk_size_mb)
6577 except Exception as exp:
6578 msg = "Error occurred while creating new disk {}.".format(exp)
6579 self.rollback_newvm(vapp_uuid, msg)
6580
6581 if status:
6582 self.logger.info(
6583 "Added new disk to VM: {} disk size:{}GB".format(
6584 vm_details["name"], disk_size
6585 )
6586 )
6587 else:
6588 # If failed to add disk, delete VM
6589 msg = "add_new_disk: Failed to add new disk to {}".format(
6590 vm_details["name"]
6591 )
6592 self.rollback_newvm(vapp_uuid, msg)
6593
6594 def add_new_disk_rest(self, disk_href, disk_size_mb):
6595 """
6596 Retrives vApp Disks section & add new empty disk
6597
6598 Args:
6599 disk_href: Disk section href to addd disk
6600 disk_size_mb: Disk size in MB
6601
6602 Returns: Status of add new disk task
6603 """
6604 status = False
6605 if self.client._session:
6606 headers = {
6607 "Accept": "application/*+xml;version=" + API_VERSION,
6608 "x-vcloud-authorization": self.client._session.headers[
6609 "x-vcloud-authorization"
6610 ],
6611 }
6612 response = self.perform_request(
6613 req_type="GET", url=disk_href, headers=headers
6614 )
6615
6616 if response.status_code == 403:
6617 response = self.retry_rest("GET", disk_href)
6618
6619 if response.status_code != requests.codes.ok:
6620 self.logger.error(
6621 "add_new_disk_rest: GET REST API call {} failed. Return status code {}".format(
6622 disk_href, response.status_code
6623 )
6624 )
6625
6626 return status
6627
6628 try:
6629 # Find but type & max of instance IDs assigned to disks
6630 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
6631 namespaces = {
6632 prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix
6633 }
6634 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
6635 instance_id = 0
6636
6637 for item in lxmlroot_respond.iterfind("xmlns:Item", namespaces):
6638 if item.find("rasd:Description", namespaces).text == "Hard disk":
6639 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
6640
6641 if inst_id > instance_id:
6642 instance_id = inst_id
6643 disk_item = item.find("rasd:HostResource", namespaces)
6644 bus_subtype = disk_item.attrib[
6645 "{" + namespaces["xmlns"] + "}busSubType"
6646 ]
6647 bus_type = disk_item.attrib[
6648 "{" + namespaces["xmlns"] + "}busType"
6649 ]
6650
6651 instance_id = instance_id + 1
6652 new_item = """<Item>
6653 <rasd:Description>Hard disk</rasd:Description>
6654 <rasd:ElementName>New disk</rasd:ElementName>
6655 <rasd:HostResource
6656 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
6657 vcloud:capacity="{}"
6658 vcloud:busSubType="{}"
6659 vcloud:busType="{}"></rasd:HostResource>
6660 <rasd:InstanceID>{}</rasd:InstanceID>
6661 <rasd:ResourceType>17</rasd:ResourceType>
6662 </Item>""".format(
6663 disk_size_mb, bus_subtype, bus_type, instance_id
6664 )
6665
6666 new_data = response.text
6667 # Add new item at the bottom
6668 new_data = new_data.replace(
6669 "</Item>\n</RasdItemsList>",
6670 "</Item>\n{}\n</RasdItemsList>".format(new_item),
6671 )
6672
6673 # Send PUT request to modify virtual hardware section with new disk
6674 headers[
6675 "Content-Type"
6676 ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
6677
6678 response = self.perform_request(
6679 req_type="PUT", url=disk_href, data=new_data, headers=headers
6680 )
6681
6682 if response.status_code == 403:
6683 add_headers = {"Content-Type": headers["Content-Type"]}
6684 response = self.retry_rest("PUT", disk_href, add_headers, new_data)
6685
6686 if response.status_code != 202:
6687 self.logger.error(
6688 "PUT REST API call {} failed. Return status code {}. response.text:{}".format(
6689 disk_href, response.status_code, response.text
6690 )
6691 )
6692 else:
6693 add_disk_task = self.get_task_from_response(response.text)
6694 result = self.client.get_task_monitor().wait_for_success(
6695 task=add_disk_task
6696 )
6697
6698 if result.get("status") == "success":
6699 status = True
6700 else:
6701 self.logger.error(
6702 "Add new disk REST task failed to add {} MB disk".format(
6703 disk_size_mb
6704 )
6705 )
6706 except Exception as exp:
6707 self.logger.error(
6708 "Error occurred calling rest api for creating new disk {}".format(exp)
6709 )
6710
6711 return status
6712
6713 def add_existing_disk(
6714 self,
6715 catalogs=None,
6716 image_id=None,
6717 size=None,
6718 template_name=None,
6719 vapp_uuid=None,
6720 ):
6721 """
6722 Method to add existing disk to vm
6723 Args :
6724 catalogs - List of VDC catalogs
6725 image_id - Catalog ID
6726 template_name - Name of template in catalog
6727 vapp_uuid - UUID of vApp
6728 Returns:
6729 None
6730 """
6731 disk_info = None
6732 vcenter_conect, content = self.get_vcenter_content()
6733 # find moref-id of vm in image
6734 catalog_vm_info = self.get_vapp_template_details(
6735 catalogs=catalogs,
6736 image_id=image_id,
6737 )
6738
6739 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
6740 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
6741 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get(
6742 "vm_moref_id", None
6743 )
6744
6745 if catalog_vm_moref_id:
6746 self.logger.info(
6747 "Moref_id of VM in catalog : {}".format(catalog_vm_moref_id)
6748 )
6749 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
6750
6751 if catalog_vm_obj:
6752 # find existing disk
6753 disk_info = self.find_disk(catalog_vm_obj)
6754 else:
6755 exp_msg = "No VM with image id {} found".format(image_id)
6756 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
6757 else:
6758 exp_msg = "No Image found with image ID {} ".format(image_id)
6759 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
6760
6761 if disk_info:
6762 self.logger.info("Existing disk_info : {}".format(disk_info))
6763 # get VM
6764 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
6765 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
6766
6767 if vm_obj:
6768 status = self.add_disk(
6769 vcenter_conect=vcenter_conect,
6770 vm=vm_obj,
6771 disk_info=disk_info,
6772 size=size,
6773 vapp_uuid=vapp_uuid,
6774 )
6775
6776 if status:
6777 self.logger.info(
6778 "Disk from image id {} added to {}".format(
6779 image_id, vm_obj.config.name
6780 )
6781 )
6782 else:
6783 msg = "No disk found with image id {} to add in VM {}".format(
6784 image_id, vm_obj.config.name
6785 )
6786 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
6787
6788 def find_disk(self, vm_obj):
6789 """
6790 Method to find details of existing disk in VM
6791 Args:
6792 vm_obj - vCenter object of VM
6793 Returns:
6794 disk_info : dict of disk details
6795 """
6796 disk_info = {}
6797 if vm_obj:
6798 try:
6799 devices = vm_obj.config.hardware.device
6800
6801 for device in devices:
6802 if type(device) is vim.vm.device.VirtualDisk:
6803 if isinstance(
6804 device.backing,
6805 vim.vm.device.VirtualDisk.FlatVer2BackingInfo,
6806 ) and hasattr(device.backing, "fileName"):
6807 disk_info["full_path"] = device.backing.fileName
6808 disk_info["datastore"] = device.backing.datastore
6809 disk_info["capacityKB"] = device.capacityInKB
6810 break
6811 except Exception as exp:
6812 self.logger.error(
6813 "find_disk() : exception occurred while "
6814 "getting existing disk details :{}".format(exp)
6815 )
6816
6817 return disk_info
6818
6819 def add_disk(
6820 self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}
6821 ):
6822 """
6823 Method to add existing disk in VM
6824 Args :
6825 vcenter_conect - vCenter content object
6826 vm - vCenter vm object
6827 disk_info : dict of disk details
6828 Returns:
6829 status : status of add disk task
6830 """
6831 datastore = disk_info["datastore"] if "datastore" in disk_info else None
6832 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
6833 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
6834 if size is not None:
6835 # Convert size from GB to KB
6836 sizeKB = int(size) * 1024 * 1024
6837 # compare size of existing disk and user given size.Assign whicherver is greater
6838 self.logger.info(
6839 "Add Existing disk : sizeKB {} , capacityKB {}".format(
6840 sizeKB, capacityKB
6841 )
6842 )
6843
6844 if sizeKB > capacityKB:
6845 capacityKB = sizeKB
6846
6847 if datastore and fullpath and capacityKB:
6848 try:
6849 spec = vim.vm.ConfigSpec()
6850 # get all disks on a VM, set unit_number to the next available
6851 unit_number = 0
6852 for dev in vm.config.hardware.device:
6853 if hasattr(dev.backing, "fileName"):
6854 unit_number = int(dev.unitNumber) + 1
6855 # unit_number 7 reserved for scsi controller
6856
6857 if unit_number == 7:
6858 unit_number += 1
6859
6860 if isinstance(dev, vim.vm.device.VirtualDisk):
6861 # vim.vm.device.VirtualSCSIController
6862 controller_key = dev.controllerKey
6863
6864 self.logger.info(
6865 "Add Existing disk : unit number {} , controller key {}".format(
6866 unit_number, controller_key
6867 )
6868 )
6869 # add disk here
6870 dev_changes = []
6871 disk_spec = vim.vm.device.VirtualDeviceSpec()
6872 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6873 disk_spec.device = vim.vm.device.VirtualDisk()
6874 disk_spec.device.backing = (
6875 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
6876 )
6877 disk_spec.device.backing.thinProvisioned = True
6878 disk_spec.device.backing.diskMode = "persistent"
6879 disk_spec.device.backing.datastore = datastore
6880 disk_spec.device.backing.fileName = fullpath
6881
6882 disk_spec.device.unitNumber = unit_number
6883 disk_spec.device.capacityInKB = capacityKB
6884 disk_spec.device.controllerKey = controller_key
6885 dev_changes.append(disk_spec)
6886 spec.deviceChange = dev_changes
6887 task = vm.ReconfigVM_Task(spec=spec)
6888 status = self.wait_for_vcenter_task(task, vcenter_conect)
6889
6890 return status
6891 except Exception as exp:
6892 exp_msg = (
6893 "add_disk() : exception {} occurred while adding disk "
6894 "{} to vm {}".format(exp, fullpath, vm.config.name)
6895 )
6896 self.rollback_newvm(vapp_uuid, exp_msg)
6897 else:
6898 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(
6899 disk_info
6900 )
6901 self.rollback_newvm(vapp_uuid, msg)
6902
6903 def get_vcenter_content(self):
6904 """
6905 Get the vsphere content object
6906 """
6907 try:
6908 vm_vcenter_info = self.get_vm_vcenter_info()
6909 except Exception as exp:
6910 self.logger.error(
6911 "Error occurred while getting vCenter infromationn"
6912 " for VM : {}".format(exp)
6913 )
6914
6915 raise vimconn.VimConnException(message=exp)
6916
6917 context = None
6918 if hasattr(ssl, "_create_unverified_context"):
6919 context = ssl._create_unverified_context()
6920
6921 vcenter_conect = SmartConnect(
6922 host=vm_vcenter_info["vm_vcenter_ip"],
6923 user=vm_vcenter_info["vm_vcenter_user"],
6924 pwd=vm_vcenter_info["vm_vcenter_password"],
6925 port=int(vm_vcenter_info["vm_vcenter_port"]),
6926 sslContext=context,
6927 )
6928 atexit.register(Disconnect, vcenter_conect)
6929 content = vcenter_conect.RetrieveContent()
6930
6931 return vcenter_conect, content
6932
6933 def get_vm_moref_id(self, vapp_uuid):
6934 """
6935 Get the moref_id of given VM
6936 """
6937 try:
6938 if vapp_uuid:
6939 vm_details = self.get_vapp_details_rest(
6940 vapp_uuid, need_admin_access=True
6941 )
6942
6943 if vm_details and "vm_vcenter_info" in vm_details:
6944 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
6945
6946 return vm_moref_id
6947 except Exception as exp:
6948 self.logger.error(
6949 "Error occurred while getting VM moref ID " " for VM : {}".format(exp)
6950 )
6951
6952 return None
6953
6954 def get_vapp_template_details(
6955 self, catalogs=None, image_id=None, template_name=None
6956 ):
6957 """
6958 Method to get vApp template details
6959 Args :
6960 catalogs - list of VDC catalogs
6961 image_id - Catalog ID to find
6962 template_name : template name in catalog
6963 Returns:
6964 parsed_respond : dict of vApp tempalte details
6965 """
6966 parsed_response = {}
6967
6968 vca = self.connect_as_admin()
6969 if not vca:
6970 raise vimconn.VimConnConnectionException("Failed to connect vCD")
6971
6972 try:
6973 org, _ = self.get_vdc_details()
6974 catalog = self.get_catalog_obj(image_id, catalogs)
6975 if catalog:
6976 items = org.get_catalog_item(catalog.get("name"), catalog.get("name"))
6977 catalog_items = [items.attrib]
6978
6979 if len(catalog_items) == 1:
6980 headers = {
6981 "Accept": "application/*+xml;version=" + API_VERSION,
6982 "x-vcloud-authorization": vca._session.headers[
6983 "x-vcloud-authorization"
6984 ],
6985 }
6986 response = self.perform_request(
6987 req_type="GET",
6988 url=catalog_items[0].get("href"),
6989 headers=headers,
6990 )
6991 catalogItem = XmlElementTree.fromstring(response.text)
6992 entity = [
6993 child
6994 for child in catalogItem
6995 if child.get("type")
6996 == "application/vnd.vmware.vcloud.vAppTemplate+xml"
6997 ][0]
6998 vapp_tempalte_href = entity.get("href")
6999 # get vapp details and parse moref id
7000
7001 namespaces = {
7002 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
7003 "ovf": "http://schemas.dmtf.org/ovf/envelope/1",
7004 "vmw": "http://www.vmware.com/schema/ovf",
7005 "vm": "http://www.vmware.com/vcloud/v1.5",
7006 "rasd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
7007 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
7008 "xmlns": "http://www.vmware.com/vcloud/v1.5",
7009 }
7010
7011 if vca._session:
7012 response = self.perform_request(
7013 req_type="GET", url=vapp_tempalte_href, headers=headers
7014 )
7015
7016 if response.status_code != requests.codes.ok:
7017 self.logger.debug(
7018 "REST API call {} failed. Return status code {}".format(
7019 vapp_tempalte_href, response.status_code
7020 )
7021 )
7022 else:
7023 xmlroot_respond = XmlElementTree.fromstring(response.text)
7024 children_section = xmlroot_respond.find(
7025 "vm:Children/", namespaces
7026 )
7027
7028 if children_section is not None:
7029 vCloud_extension_section = children_section.find(
7030 "xmlns:VCloudExtension", namespaces
7031 )
7032
7033 if vCloud_extension_section is not None:
7034 vm_vcenter_info = {}
7035 vim_info = vCloud_extension_section.find(
7036 "vmext:VmVimInfo", namespaces
7037 )
7038 vmext = vim_info.find(
7039 "vmext:VmVimObjectRef", namespaces
7040 )
7041
7042 if vmext is not None:
7043 vm_vcenter_info["vm_moref_id"] = vmext.find(
7044 "vmext:MoRef", namespaces
7045 ).text
7046
7047 parsed_response["vm_vcenter_info"] = vm_vcenter_info
7048 except Exception as exp:
7049 self.logger.info(
7050 "Error occurred calling rest api for getting vApp details {}".format(
7051 exp
7052 )
7053 )
7054
7055 return parsed_response
7056
7057 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
7058 """
7059 Method to delete vApp
7060 Args :
7061 vapp_uuid - vApp UUID
7062 msg - Error message to be logged
7063 exp_type : Exception type
7064 Returns:
7065 None
7066 """
7067 if vapp_uuid:
7068 self.delete_vminstance(vapp_uuid)
7069 else:
7070 msg = "No vApp ID"
7071
7072 self.logger.error(msg)
7073
7074 if exp_type == "Genric":
7075 raise vimconn.VimConnException(msg)
7076 elif exp_type == "NotFound":
7077 raise vimconn.VimConnNotFoundException(message=msg)
7078
7079 def get_sriov_devices(self, host, no_of_vfs):
7080 """
7081 Method to get the details of SRIOV devices on given host
7082 Args:
7083 host - vSphere host object
7084 no_of_vfs - number of VFs needed on host
7085
7086 Returns:
7087 array of SRIOV devices
7088 """
7089 sriovInfo = []
7090
7091 if host:
7092 for device in host.config.pciPassthruInfo:
7093 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
7094 if device.numVirtualFunction >= no_of_vfs:
7095 sriovInfo.append(device)
7096 break
7097
7098 return sriovInfo
7099
7100 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
7101 """
7102 Method to reconfigure disributed virtual portgroup
7103
7104 Args:
7105 dvPort_group_name - name of disributed virtual portgroup
7106 content - vCenter content object
7107 config_info - disributed virtual portgroup configuration
7108
7109 Returns:
7110 task object
7111 """
7112 try:
7113 dvPort_group = self.get_dvport_group(dvPort_group_name)
7114
7115 if dvPort_group:
7116 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
7117 dv_pg_spec.configVersion = dvPort_group.config.configVersion
7118 dv_pg_spec.defaultPortConfig = (
7119 vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
7120 )
7121
7122 if "vlanID" in config_info:
7123 dv_pg_spec.defaultPortConfig.vlan = (
7124 vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
7125 )
7126 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get("vlanID")
7127
7128 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
7129
7130 return task
7131 else:
7132 return None
7133 except Exception as exp:
7134 self.logger.error(
7135 "Error occurred while reconfiguraing disributed virtaul port group {}"
7136 " : {}".format(dvPort_group_name, exp)
7137 )
7138
7139 return None
7140
7141 def get_dvport_group(self, dvPort_group_name):
7142 """
7143 Method to get disributed virtual portgroup
7144
7145 Args:
7146 network_name - name of network/portgroup
7147
7148 Returns:
7149 portgroup object
7150 """
7151 _, content = self.get_vcenter_content()
7152 dvPort_group = None
7153
7154 try:
7155 container = content.viewManager.CreateContainerView(
7156 content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
7157 )
7158
7159 for item in container.view:
7160 if item.key == dvPort_group_name:
7161 dvPort_group = item
7162 break
7163
7164 return dvPort_group
7165 except vmodl.MethodFault as exp:
7166 self.logger.error(
7167 "Caught vmodl fault {} for disributed virtual port group {}".format(
7168 exp, dvPort_group_name
7169 )
7170 )
7171
7172 return None
7173
7174 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
7175 """
7176 Method to get disributed virtual portgroup vlanID
7177
7178 Args:
7179 network_name - name of network/portgroup
7180
7181 Returns:
7182 vlan ID
7183 """
7184 vlanId = None
7185
7186 try:
7187 dvPort_group = self.get_dvport_group(dvPort_group_name)
7188
7189 if dvPort_group:
7190 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
7191 except vmodl.MethodFault as exp:
7192 self.logger.error(
7193 "Caught vmodl fault {} for disributed virtaul port group {}".format(
7194 exp, dvPort_group_name
7195 )
7196 )
7197
7198 return vlanId
7199
7200 def insert_media_to_vm(self, vapp, image_id):
7201 """
7202 Method to insert media CD-ROM (ISO image) from catalog to vm.
7203 vapp - vapp object to get vm id
7204 Image_id - image id for cdrom to be inerted to vm
7205 """
7206 # create connection object
7207 vca = self.connect()
7208 try:
7209 # fetching catalog details
7210 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
7211
7212 if vca._session:
7213 headers = {
7214 "Accept": "application/*+xml;version=" + API_VERSION,
7215 "x-vcloud-authorization": vca._session.headers[
7216 "x-vcloud-authorization"
7217 ],
7218 }
7219 response = self.perform_request(
7220 req_type="GET", url=rest_url, headers=headers
7221 )
7222
7223 if response.status_code != 200:
7224 self.logger.error(
7225 "REST call {} failed reason : {}"
7226 "status code : {}".format(
7227 rest_url, response.text, response.status_code
7228 )
7229 )
7230
7231 raise vimconn.VimConnException(
7232 "insert_media_to_vm(): Failed to get " "catalog details"
7233 )
7234
7235 # searching iso name and id
7236 iso_name, media_id = self.get_media_details(vca, response.text)
7237
7238 if iso_name and media_id:
7239 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
7240 <ns6:MediaInsertOrEjectParams
7241 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
7242 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
7243 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
7244 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
7245 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
7246 xmlns:ns7="http://www.vmware.com/schema/ovf"
7247 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
7248 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
7249 <ns6:Media
7250 type="application/vnd.vmware.vcloud.media+xml"
7251 name="{}"
7252 id="urn:vcloud:media:{}"
7253 href="https://{}/api/media/{}"/>
7254 </ns6:MediaInsertOrEjectParams>""".format(
7255 iso_name, media_id, self.url, media_id
7256 )
7257
7258 for vms in vapp.get_all_vms():
7259 vm_id = vms.get("id").split(":")[-1]
7260
7261 headers[
7262 "Content-Type"
7263 ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
7264 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
7265 self.url, vm_id
7266 )
7267
7268 response = self.perform_request(
7269 req_type="POST", url=rest_url, data=data, headers=headers
7270 )
7271
7272 if response.status_code != 202:
7273 error_msg = (
7274 "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
7275 "Status code {}".format(response.text, response.status_code)
7276 )
7277 self.logger.error(error_msg)
7278
7279 raise vimconn.VimConnException(error_msg)
7280 else:
7281 task = self.get_task_from_response(response.text)
7282 result = self.client.get_task_monitor().wait_for_success(
7283 task=task
7284 )
7285
7286 if result.get("status") == "success":
7287 self.logger.info(
7288 "insert_media_to_vm(): Sucessfully inserted media ISO"
7289 " image to vm {}".format(vm_id)
7290 )
7291 except Exception as exp:
7292 self.logger.error(
7293 "insert_media_to_vm() : exception occurred "
7294 "while inserting media CD-ROM"
7295 )
7296
7297 raise vimconn.VimConnException(message=exp)
7298
7299 def get_media_details(self, vca, content):
7300 """
7301 Method to get catalog item details
7302 vca - connection object
7303 content - Catalog details
7304 Return - Media name, media id
7305 """
7306 cataloghref_list = []
7307 try:
7308 if content:
7309 vm_list_xmlroot = XmlElementTree.fromstring(content)
7310
7311 for child in vm_list_xmlroot.iter():
7312 if "CatalogItem" in child.tag:
7313 cataloghref_list.append(child.attrib.get("href"))
7314
7315 if cataloghref_list is not None:
7316 for href in cataloghref_list:
7317 if href:
7318 headers = {
7319 "Accept": "application/*+xml;version=" + API_VERSION,
7320 "x-vcloud-authorization": vca._session.headers[
7321 "x-vcloud-authorization"
7322 ],
7323 }
7324 response = self.perform_request(
7325 req_type="GET", url=href, headers=headers
7326 )
7327
7328 if response.status_code != 200:
7329 self.logger.error(
7330 "REST call {} failed reason : {}"
7331 "status code : {}".format(
7332 href, response.text, response.status_code
7333 )
7334 )
7335
7336 raise vimconn.VimConnException(
7337 "get_media_details : Failed to get "
7338 "catalogitem details"
7339 )
7340
7341 list_xmlroot = XmlElementTree.fromstring(response.text)
7342
7343 for child in list_xmlroot.iter():
7344 if "Entity" in child.tag:
7345 if "media" in child.attrib.get("href"):
7346 name = child.attrib.get("name")
7347 media_id = (
7348 child.attrib.get("href").split("/").pop()
7349 )
7350
7351 return name, media_id
7352 else:
7353 self.logger.debug("Media name and id not found")
7354
7355 return False, False
7356 except Exception as exp:
7357 self.logger.error(
7358 "get_media_details : exception occurred " "getting media details"
7359 )
7360
7361 raise vimconn.VimConnException(message=exp)
7362
7363 def retry_rest(self, method, url, add_headers=None, data=None):
7364 """Method to get Token & retry respective REST request
7365 Args:
7366 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
7367 url - request url to be used
7368 add_headers - Additional headers (optional)
7369 data - Request payload data to be passed in request
7370 Returns:
7371 response - Response of request
7372 """
7373 response = None
7374
7375 # Get token
7376 self.get_token()
7377
7378 if self.client._session:
7379 headers = {
7380 "Accept": "application/*+xml;version=" + API_VERSION,
7381 "x-vcloud-authorization": self.client._session.headers[
7382 "x-vcloud-authorization"
7383 ],
7384 }
7385
7386 if add_headers:
7387 headers.update(add_headers)
7388
7389 if method == "GET":
7390 response = self.perform_request(req_type="GET", url=url, headers=headers)
7391 elif method == "PUT":
7392 response = self.perform_request(
7393 req_type="PUT", url=url, headers=headers, data=data
7394 )
7395 elif method == "POST":
7396 response = self.perform_request(
7397 req_type="POST", url=url, headers=headers, data=data
7398 )
7399 elif method == "DELETE":
7400 response = self.perform_request(req_type="DELETE", url=url, headers=headers)
7401
7402 return response
7403
7404 def get_token(self):
7405 """Generate a new token if expired
7406
7407 Returns:
7408 The return client object that letter can be used to connect to vCloud director as admin for VDC
7409 """
7410 self.client = self.connect()
7411
7412 def get_vdc_details(self):
7413 """Get VDC details using pyVcloud Lib
7414
7415 Returns org and vdc object
7416 """
7417 vdc = None
7418
7419 try:
7420 org = Org(self.client, resource=self.client.get_org())
7421 vdc = org.get_vdc(self.tenant_name)
7422 except Exception as e:
7423 # pyvcloud not giving a specific exception, Refresh nevertheless
7424 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
7425
7426 # Retry once, if failed by refreshing token
7427 if vdc is None:
7428 self.get_token()
7429 org = Org(self.client, resource=self.client.get_org())
7430 vdc = org.get_vdc(self.tenant_name)
7431
7432 return org, vdc
7433
7434 def perform_request(self, req_type, url, headers=None, data=None):
7435 """Perform the POST/PUT/GET/DELETE request."""
7436 # Log REST request details
7437 self.log_request(req_type, url=url, headers=headers, data=data)
7438 # perform request and return its result
7439
7440 if req_type == "GET":
7441 response = requests.get(url=url, headers=headers, verify=False)
7442 elif req_type == "PUT":
7443 response = requests.put(url=url, headers=headers, data=data, verify=False)
7444 elif req_type == "POST":
7445 response = requests.post(url=url, headers=headers, data=data, verify=False)
7446 elif req_type == "DELETE":
7447 response = requests.delete(url=url, headers=headers, verify=False)
7448
7449 # Log the REST response
7450 self.log_response(response)
7451
7452 return response
7453
7454 def log_request(self, req_type, url=None, headers=None, data=None):
7455 """Logs REST request details"""
7456
7457 if req_type is not None:
7458 self.logger.debug("Request type: {}".format(req_type))
7459
7460 if url is not None:
7461 self.logger.debug("Request url: {}".format(url))
7462
7463 if headers is not None:
7464 for header in headers:
7465 self.logger.debug(
7466 "Request header: {}: {}".format(header, headers[header])
7467 )
7468
7469 if data is not None:
7470 self.logger.debug("Request data: {}".format(data))
7471
7472 def log_response(self, response):
7473 """Logs REST response details"""
7474
7475 self.logger.debug("Response status code: {} ".format(response.status_code))
7476
7477 def get_task_from_response(self, content):
7478 """
7479 content - API response.text(response.text)
7480 return task object
7481 """
7482 xmlroot = XmlElementTree.fromstring(content)
7483
7484 if xmlroot.tag.split("}")[1] == "Task":
7485 return xmlroot
7486 else:
7487 for ele in xmlroot:
7488 if ele.tag.split("}")[1] == "Tasks":
7489 task = ele[0]
7490 break
7491
7492 return task
7493
7494 def power_on_vapp(self, vapp_id, vapp_name):
7495 """
7496 vapp_id - vApp uuid
7497 vapp_name - vAapp name
7498 return - Task object
7499 """
7500 headers = {
7501 "Accept": "application/*+xml;version=" + API_VERSION,
7502 "x-vcloud-authorization": self.client._session.headers[
7503 "x-vcloud-authorization"
7504 ],
7505 }
7506
7507 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(
7508 self.url, vapp_id
7509 )
7510 response = self.perform_request(
7511 req_type="POST", url=poweron_href, headers=headers
7512 )
7513
7514 if response.status_code != 202:
7515 self.logger.error(
7516 "REST call {} failed reason : {}"
7517 "status code : {} ".format(
7518 poweron_href, response.text, response.status_code
7519 )
7520 )
7521
7522 raise vimconn.VimConnException(
7523 "power_on_vapp() : Failed to power on " "vApp {}".format(vapp_name)
7524 )
7525 else:
7526 poweron_task = self.get_task_from_response(response.text)
7527
7528 return poweron_task
7529
7530 def migrate_instance(self, vm_id, compute_host=None):
7531 """
7532 Migrate a vdu
7533 param:
7534 vm_id: ID of an instance
7535 compute_host: Host to migrate the vdu to
7536 """
7537 # TODO: Add support for migration
7538 raise vimconn.VimConnNotImplemented("Should have implemented this")
7539
7540 def resize_instance(self, vm_id, flavor_id=None):
7541 """
7542 resize a vdu
7543 param:
7544 vm_id: ID of an instance
7545 flavor_id: flavor_id to resize the vdu to
7546 """
7547 # TODO: Add support for resize
7548 raise vimconn.VimConnNotImplemented("Should have implemented this")