b698f4cc1f3fba2241d65376caa81ad2a599c354
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 # #
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 # #
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 """
27
28 from lxml import etree as lxmlElementTree
29 from osm_ro_plugin import vimconn
30 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
31 from pyVim.connect import SmartConnect, Disconnect
32 from pyVmomi import vim, vmodl # @UnresolvedImport
33 from pyvcloud.vcd.client import BasicLoginCredentials, Client
34 from pyvcloud.vcd.org import Org
35 from pyvcloud.vcd.vapp import VApp
36 from pyvcloud.vcd.vdc import VDC
37 from xml.etree import ElementTree as XmlElementTree
38 from xml.sax.saxutils import escape
39 import atexit
40 import hashlib
41 import json
42 import logging
43 import netaddr
44 import os
45 import random
46 import re
47 import requests
48 import shutil
49 import socket
50 import ssl
51 import struct
52 import subprocess
53 import tempfile
54 import time
55 import traceback
56 import uuid
57 import yaml
58
59 # global variable for vcd connector type
60 STANDALONE = 'standalone'
61
62 # key for flavor dicts
63 FLAVOR_RAM_KEY = 'ram'
64 FLAVOR_VCPUS_KEY = 'vcpus'
65 FLAVOR_DISK_KEY = 'disk'
66 DEFAULT_IP_PROFILE = {'dhcp_count': 50,
67 'dhcp_enabled': True,
68 'ip_version': "IPv4"
69 }
70 # global variable for wait time
71 INTERVAL_TIME = 5
72 MAX_WAIT_TIME = 1800
73
74 API_VERSION = '27.0'
75
76 # -1: "Could not be created",
77 # 0: "Unresolved",
78 # 1: "Resolved",
79 # 2: "Deployed",
80 # 3: "Suspended",
81 # 4: "Powered on",
82 # 5: "Waiting for user input",
83 # 6: "Unknown state",
84 # 7: "Unrecognized state",
85 # 8: "Powered off",
86 # 9: "Inconsistent state",
87 # 10: "Children do not all have the same status",
88 # 11: "Upload initiated, OVF descriptor pending",
89 # 12: "Upload initiated, copying contents",
90 # 13: "Upload initiated , disk contents pending",
91 # 14: "Upload has been quarantined",
92 # 15: "Upload quarantine period has expired"
93
94 # mapping vCD status to MANO
95 vcdStatusCode2manoFormat = {4: 'ACTIVE',
96 7: 'PAUSED',
97 3: 'SUSPENDED',
98 8: 'INACTIVE',
99 12: 'BUILD',
100 -1: 'ERROR',
101 14: 'DELETED'}
102
103 #
104 netStatus2manoFormat = {'ACTIVE': 'ACTIVE',
105 'PAUSED': 'PAUSED',
106 'INACTIVE': 'INACTIVE',
107 'BUILD': 'BUILD',
108 'ERROR': 'ERROR',
109 'DELETED': 'DELETED'
110 }
111
112
113 class vimconnector(vimconn.VimConnector):
114 # dict used to store flavor in memory
115 flavorlist = {}
116
117 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
118 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
119 """
120 Constructor create vmware connector to vCloud director.
121
122 By default construct doesn't validate connection state. So client can create object with None arguments.
123 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
124
125 a) It initialize organization UUID
126 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
127
128 Args:
129 uuid - is organization uuid.
130 name - is organization name that must be presented in vCloud director.
131 tenant_id - is VDC uuid it must be presented in vCloud director
132 tenant_name - is VDC name.
133 url - is hostname or ip address of vCloud director
134 url_admin - same as above.
135 user - is user that administrator for organization. Caller must make sure that
136 username has right privileges.
137
138 password - is password for a user.
139
140 VMware connector also requires PVDC administrative privileges and separate account.
141 This variables must be passed via config argument dict contains keys
142
143 dict['admin_username']
144 dict['admin_password']
145 config - Provide NSX and vCenter information
146
147 Returns:
148 Nothing.
149 """
150
151 vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
152 url_admin, user, passwd, log_level, config)
153
154 self.logger = logging.getLogger('openmano.vim.vmware')
155 self.logger.setLevel(10)
156 self.persistent_info = persistent_info
157
158 self.name = name
159 self.id = uuid
160 self.url = url
161 self.url_admin = url_admin
162 self.tenant_id = tenant_id
163 self.tenant_name = tenant_name
164 self.user = user
165 self.passwd = passwd
166 self.config = config
167 self.admin_password = None
168 self.admin_user = None
169 self.org_name = ""
170 self.nsx_manager = None
171 self.nsx_user = None
172 self.nsx_password = None
173 self.availability_zone = None
174
175 # Disable warnings from self-signed certificates.
176 requests.packages.urllib3.disable_warnings()
177
178 if tenant_name is not None:
179 orgnameandtenant = tenant_name.split(":")
180 if len(orgnameandtenant) == 2:
181 self.tenant_name = orgnameandtenant[1]
182 self.org_name = orgnameandtenant[0]
183 else:
184 self.tenant_name = tenant_name
185 if "orgname" in config:
186 self.org_name = config['orgname']
187
188 if log_level:
189 self.logger.setLevel(getattr(logging, log_level))
190
191 try:
192 self.admin_user = config['admin_username']
193 self.admin_password = config['admin_password']
194 except KeyError:
195 raise vimconn.VimConnException(message="Error admin username or admin password is empty.")
196
197 try:
198 self.nsx_manager = config['nsx_manager']
199 self.nsx_user = config['nsx_user']
200 self.nsx_password = config['nsx_password']
201 except KeyError:
202 raise vimconn.VimConnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
203
204 self.vcenter_ip = config.get("vcenter_ip", None)
205 self.vcenter_port = config.get("vcenter_port", None)
206 self.vcenter_user = config.get("vcenter_user", None)
207 self.vcenter_password = config.get("vcenter_password", None)
208
209 # Set availability zone for Affinity rules
210 self.availability_zone = self.set_availability_zones()
211
212 # ############# Stub code for SRIOV #################
213 # try:
214 # self.dvs_name = config['dv_switch_name']
215 # except KeyError:
216 # raise vimconn.VimConnException(message="Error: distributed virtaul switch name is empty in Config")
217 #
218 # self.vlanID_range = config.get("vlanID_range", None)
219
220 self.org_uuid = None
221 self.client = None
222
223 if not url:
224 raise vimconn.VimConnException('url param can not be NoneType')
225
226 if not self.url_admin: # try to use normal url
227 self.url_admin = self.url
228
229 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
230 self.tenant_id, self.tenant_name))
231 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
232 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
233
234 # initialize organization
235 if self.user is not None and self.passwd is not None and self.url:
236 self.init_organization()
237
238 def __getitem__(self, index):
239 if index == 'name':
240 return self.name
241 if index == 'tenant_id':
242 return self.tenant_id
243 if index == 'tenant_name':
244 return self.tenant_name
245 elif index == 'id':
246 return self.id
247 elif index == 'org_name':
248 return self.org_name
249 elif index == 'org_uuid':
250 return self.org_uuid
251 elif index == 'user':
252 return self.user
253 elif index == 'passwd':
254 return self.passwd
255 elif index == 'url':
256 return self.url
257 elif index == 'url_admin':
258 return self.url_admin
259 elif index == "config":
260 return self.config
261 else:
262 raise KeyError("Invalid key '{}'".format(index))
263
264 def __setitem__(self, index, value):
265 if index == 'name':
266 self.name = value
267 if index == 'tenant_id':
268 self.tenant_id = value
269 if index == 'tenant_name':
270 self.tenant_name = value
271 elif index == 'id':
272 self.id = value
273 elif index == 'org_name':
274 self.org_name = value
275 elif index == 'org_uuid':
276 self.org_uuid = value
277 elif index == 'user':
278 self.user = value
279 elif index == 'passwd':
280 self.passwd = value
281 elif index == 'url':
282 self.url = value
283 elif index == 'url_admin':
284 self.url_admin = value
285 else:
286 raise KeyError("Invalid key '{}'".format(index))
287
288 def connect_as_admin(self):
289 """ Method connect as pvdc admin user to vCloud director.
290 There are certain action that can be done only by provider vdc admin user.
291 Organization creation / provider network creation etc.
292
293 Returns:
294 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
295 """
296 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
297
298 try:
299 host = self.url
300 org = 'System'
301 client_as_admin = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
302 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
303 except Exception as e:
304 raise vimconn.VimConnException(
305 "Can't connect to vCloud director as: {} with exception {}".format(self.admin_user, e))
306
307 return client_as_admin
308
309 def connect(self):
310 """ Method connect as normal user to vCloud director.
311
312 Returns:
313 The return client object that latter can be used to connect to vCloud director as admin for VDC
314 """
315 try:
316 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
317 self.user,
318 self.org_name))
319 host = self.url
320 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
321 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
322 except Exception as e:
323 raise vimconn.VimConnConnectionException("Can't connect to vCloud director org: "
324 "{} as user {} with exception: {}".format(self.org_name,
325 self.user,
326 e))
327
328 return client
329
330 def init_organization(self):
331 """ Method initialize organization UUID and VDC parameters.
332
333 At bare minimum client must provide organization name that present in vCloud director and VDC.
334
335 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
336 The Org - UUID will be initialized at the run time if data center present in vCloud director.
337
338 Returns:
339 The return vca object that letter can be used to connect to vcloud direct as admin
340 """
341 client = self.connect()
342 if not client:
343 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
344
345 self.client = client
346 try:
347 if self.org_uuid is None:
348 org_list = client.get_org_list()
349 for org in org_list.Org:
350 # we set org UUID at the init phase but we can do it only when we have valid credential.
351 if org.get('name') == self.org_name:
352 self.org_uuid = org.get('href').split('/')[-1]
353 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
354 break
355 else:
356 raise vimconn.VimConnException("Vcloud director organization {} not found".format(self.org_name))
357
358 # if well good we require for org details
359 org_details_dict = self.get_org(org_uuid=self.org_uuid)
360
361 # we have two case if we want to initialize VDC ID or VDC name at run time
362 # tenant_name provided but no tenant id
363 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
364 vdcs_dict = org_details_dict['vdcs']
365 for vdc in vdcs_dict:
366 if vdcs_dict[vdc] == self.tenant_name:
367 self.tenant_id = vdc
368 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
369 self.org_name))
370 break
371 else:
372 raise vimconn.VimConnException("Tenant name indicated but not present in vcloud director.")
373 # case two we have tenant_id but we don't have tenant name so we find and set it.
374 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
375 vdcs_dict = org_details_dict['vdcs']
376 for vdc in vdcs_dict:
377 if vdc == self.tenant_id:
378 self.tenant_name = vdcs_dict[vdc]
379 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
380 self.org_name))
381 break
382 else:
383 raise vimconn.VimConnException("Tenant id indicated but not present in vcloud director")
384 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
385 except Exception as e:
386 self.logger.debug("Failed initialize organization UUID for org {}: {}".format(self.org_name), e)
387 self.logger.debug(traceback.format_exc())
388 self.org_uuid = None
389
390 def new_tenant(self, tenant_name=None, tenant_description=None):
391 """ Method adds a new tenant to VIM with this name.
392 This action requires access to create VDC action in vCloud director.
393
394 Args:
395 tenant_name is tenant_name to be created.
396 tenant_description not used for this call
397
398 Return:
399 returns the tenant identifier in UUID format.
400 If action is failed method will throw vimconn.VimConnException method
401 """
402 vdc_task = self.create_vdc(vdc_name=tenant_name)
403 if vdc_task is not None:
404 vdc_uuid, _ = vdc_task.popitem()
405 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
406 return vdc_uuid
407 else:
408 raise vimconn.VimConnException("Failed create tenant {}".format(tenant_name))
409
410 def delete_tenant(self, tenant_id=None):
411 """ Delete a tenant from VIM
412 Args:
413 tenant_id is tenant_id to be deleted.
414
415 Return:
416 returns the tenant identifier in UUID format.
417 If action is failed method will throw exception
418 """
419 vca = self.connect_as_admin()
420 if not vca:
421 raise vimconn.VimConnConnectionException("Failed to connect vCD")
422
423 if tenant_id is not None:
424 if vca._session:
425 # Get OrgVDC
426 url_list = [self.url, '/api/vdc/', tenant_id]
427 orgvdc_herf = ''.join(url_list)
428
429 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
430 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
431 response = self.perform_request(req_type='GET',
432 url=orgvdc_herf,
433 headers=headers)
434
435 if response.status_code != requests.codes.ok:
436 self.logger.debug("delete_tenant():GET REST API call {} failed. "
437 "Return status code {}".format(orgvdc_herf,
438 response.status_code))
439 raise vimconn.VimConnNotFoundException("Fail to get tenant {}".format(tenant_id))
440
441 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
442 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
443 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
444 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']", namespaces).attrib['href']
445 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
446
447 response = self.perform_request(req_type='DELETE',
448 url=vdc_remove_href,
449 headers=headers)
450
451 if response.status_code == 202:
452 time.sleep(5)
453 return tenant_id
454 else:
455 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "
456 "Return status code {}".format(vdc_remove_href,
457 response.status_code))
458 raise vimconn.VimConnException("Fail to delete tenant with ID {}".format(tenant_id))
459 else:
460 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
461 raise vimconn.VimConnNotFoundException("Fail to get tenant {}".format(tenant_id))
462
463 def get_tenant_list(self, filter_dict={}):
464 """Obtain tenants of VIM
465 filter_dict can contain the following keys:
466 name: filter by tenant name
467 id: filter by tenant uuid/id
468 <other VIM specific>
469 Returns the tenant list of dictionaries:
470 [{'name':'<name>, 'id':'<id>, ...}, ...]
471
472 """
473 org_dict = self.get_org(self.org_uuid)
474 vdcs_dict = org_dict['vdcs']
475
476 vdclist = []
477 try:
478 for k in vdcs_dict:
479 entry = {'name': vdcs_dict[k], 'id': k}
480 # if caller didn't specify dictionary we return all tenants.
481 if filter_dict is not None and filter_dict:
482 filtered_entry = entry.copy()
483 filtered_dict = set(entry.keys()) - set(filter_dict)
484 for unwanted_key in filtered_dict:
485 del entry[unwanted_key]
486 if filter_dict == entry:
487 vdclist.append(filtered_entry)
488 else:
489 vdclist.append(entry)
490 except Exception:
491 self.logger.debug("Error in get_tenant_list()")
492 self.logger.debug(traceback.format_exc())
493 raise vimconn.VimConnException("Incorrect state. {}")
494
495 return vdclist
496
497 def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
498 """Adds a tenant network to VIM
499 Params:
500 'net_name': name of the network
501 'net_type': one of:
502 'bridge': overlay isolated network
503 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
504 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
505 'ip_profile': is a dict containing the IP parameters of the network
506 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
507 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
508 'gateway_address': (Optional) ip_schema, that is X.X.X.X
509 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
510 'dhcp_enabled': True or False
511 'dhcp_start_address': ip_schema, first IP to grant
512 'dhcp_count': number of IPs to grant.
513 'shared': if this network can be seen/use by other tenants/organization
514 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
515 Returns a tuple with the network identifier and created_items, or raises an exception on error
516 created_items can be None or a dictionary where this method can include key-values that will be passed to
517 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
518 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
519 as not present.
520 """
521
522 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
523 .format(net_name, net_type, ip_profile, shared, provider_network_profile))
524 # vlan = None
525 # if provider_network_profile:
526 # vlan = provider_network_profile.get("segmentation-id")
527
528 created_items = {}
529 isshared = 'false'
530 if shared:
531 isshared = 'true'
532
533 # ############# Stub code for SRIOV #################
534 # if net_type == "data" or net_type == "ptp":
535 # if self.config.get('dv_switch_name') == None:
536 # raise vimconn.VimConnConflictException("You must provide 'dv_switch_name' at config value")
537 # network_uuid = self.create_dvPort_group(net_name)
538 parent_network_uuid = None
539
540 if provider_network_profile is not None:
541 for k, v in provider_network_profile.items():
542 if k == 'physical_network':
543 parent_network_uuid = self.get_physical_network_by_name(v)
544
545 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
546 ip_profile=ip_profile, isshared=isshared,
547 parent_network_uuid=parent_network_uuid)
548 if network_uuid is not None:
549 return network_uuid, created_items
550 else:
551 raise vimconn.VimConnUnexpectedResponse("Failed create a new network {}".format(net_name))
552
553 def get_vcd_network_list(self):
554 """ Method available organization for a logged in tenant
555
556 Returns:
557 The return vca object that letter can be used to connect to vcloud direct as admin
558 """
559
560 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
561
562 if not self.tenant_name:
563 raise vimconn.VimConnConnectionException("Tenant name is empty.")
564
565 _, vdc = self.get_vdc_details()
566 if vdc is None:
567 raise vimconn.VimConnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
568
569 vdc_uuid = vdc.get('id').split(":")[3]
570 if self.client._session:
571 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
572 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
573 response = self.perform_request(req_type='GET',
574 url=vdc.get('href'),
575 headers=headers)
576 if response.status_code != 200:
577 self.logger.error("Failed to get vdc content")
578 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
579 else:
580 content = XmlElementTree.fromstring(response.text)
581
582 network_list = []
583 try:
584 for item in content:
585 if item.tag.split('}')[-1] == 'AvailableNetworks':
586 for net in item:
587 response = self.perform_request(req_type='GET',
588 url=net.get('href'),
589 headers=headers)
590
591 if response.status_code != 200:
592 self.logger.error("Failed to get network content")
593 raise vimconn.VimConnNotFoundException("Failed to get network content")
594 else:
595 net_details = XmlElementTree.fromstring(response.text)
596
597 filter_dict = {}
598 net_uuid = net_details.get('id').split(":")
599 if len(net_uuid) != 4:
600 continue
601 else:
602 net_uuid = net_uuid[3]
603 # create dict entry
604 self.logger.debug("get_vcd_network_list(): Adding network {} "
605 "to a list vcd id {} network {}".format(net_uuid,
606 vdc_uuid,
607 net_details.get('name')))
608 filter_dict["name"] = net_details.get('name')
609 filter_dict["id"] = net_uuid
610 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
611 shared = True
612 else:
613 shared = False
614 filter_dict["shared"] = shared
615 filter_dict["tenant_id"] = vdc_uuid
616 if int(net_details.get('status')) == 1:
617 filter_dict["admin_state_up"] = True
618 else:
619 filter_dict["admin_state_up"] = False
620 filter_dict["status"] = "ACTIVE"
621 filter_dict["type"] = "bridge"
622 network_list.append(filter_dict)
623 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
624 except Exception:
625 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
626 pass
627
628 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
629 return network_list
630
631 def get_network_list(self, filter_dict={}):
632 """Obtain tenant networks of VIM
633 Filter_dict can be:
634 name: network name OR/AND
635 id: network uuid OR/AND
636 shared: boolean OR/AND
637 tenant_id: tenant OR/AND
638 admin_state_up: boolean
639 status: 'ACTIVE'
640
641 [{key : value , key : value}]
642
643 Returns the network list of dictionaries:
644 [{<the fields at Filter_dict plus some VIM specific>}, ...]
645 List can be empty
646 """
647
648 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
649
650 if not self.tenant_name:
651 raise vimconn.VimConnConnectionException("Tenant name is empty.")
652
653 _, vdc = self.get_vdc_details()
654 if vdc is None:
655 raise vimconn.VimConnConnectionException(
656 "Can't retrieve information for a VDC {}.".format(self.tenant_name))
657
658 try:
659 vdcid = vdc.get('id').split(":")[3]
660
661 if self.client._session:
662 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
663 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
664 response = self.perform_request(req_type='GET',
665 url=vdc.get('href'),
666 headers=headers)
667 if response.status_code != 200:
668 self.logger.error("Failed to get vdc content")
669 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
670 else:
671 content = XmlElementTree.fromstring(response.text)
672
673 network_list = []
674 for item in content:
675 if item.tag.split('}')[-1] == 'AvailableNetworks':
676 for net in item:
677 response = self.perform_request(req_type='GET',
678 url=net.get('href'),
679 headers=headers)
680
681 if response.status_code != 200:
682 self.logger.error("Failed to get network content")
683 raise vimconn.VimConnNotFoundException("Failed to get network content")
684 else:
685 net_details = XmlElementTree.fromstring(response.text)
686
687 filter_entry = {}
688 net_uuid = net_details.get('id').split(":")
689 if len(net_uuid) != 4:
690 continue
691 else:
692 net_uuid = net_uuid[3]
693 # create dict entry
694 self.logger.debug("get_network_list(): Adding net {}"
695 " to a list vcd id {} network {}".format(net_uuid,
696 vdcid,
697 net_details.get('name')))
698 filter_entry["name"] = net_details.get('name')
699 filter_entry["id"] = net_uuid
700 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
701 shared = True
702 else:
703 shared = False
704 filter_entry["shared"] = shared
705 filter_entry["tenant_id"] = vdcid
706 if int(net_details.get('status')) == 1:
707 filter_entry["admin_state_up"] = True
708 else:
709 filter_entry["admin_state_up"] = False
710 filter_entry["status"] = "ACTIVE"
711 filter_entry["type"] = "bridge"
712 filtered_entry = filter_entry.copy()
713
714 if filter_dict is not None and filter_dict:
715 # we remove all the key : value we don't care and match only
716 # respected field
717 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
718 for unwanted_key in filtered_dict:
719 del filter_entry[unwanted_key]
720 if filter_dict == filter_entry:
721 network_list.append(filtered_entry)
722 else:
723 network_list.append(filtered_entry)
724 except Exception as e:
725 self.logger.debug("Error in get_network_list", exc_info=True)
726 if isinstance(e, vimconn.VimConnException):
727 raise
728 else:
729 raise vimconn.VimConnNotFoundException("Failed : Networks list not found {} ".format(e))
730
731 self.logger.debug("Returning {}".format(network_list))
732 return network_list
733
734 def get_network(self, net_id):
735 """Method obtains network details of net_id VIM network
736 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
737
738 try:
739 _, vdc = self.get_vdc_details()
740 vdc_id = vdc.get('id').split(":")[3]
741 if self.client._session:
742 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
743 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
744 response = self.perform_request(req_type='GET',
745 url=vdc.get('href'),
746 headers=headers)
747 if response.status_code != 200:
748 self.logger.error("Failed to get vdc content")
749 raise vimconn.VimConnNotFoundException("Failed to get vdc content")
750 else:
751 content = XmlElementTree.fromstring(response.text)
752
753 filter_dict = {}
754
755 for item in content:
756 if item.tag.split('}')[-1] == 'AvailableNetworks':
757 for net in item:
758 response = self.perform_request(req_type='GET',
759 url=net.get('href'),
760 headers=headers)
761
762 if response.status_code != 200:
763 self.logger.error("Failed to get network content")
764 raise vimconn.VimConnNotFoundException("Failed to get network content")
765 else:
766 net_details = XmlElementTree.fromstring(response.text)
767
768 vdc_network_id = net_details.get('id').split(":")
769 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
770 filter_dict["name"] = net_details.get('name')
771 filter_dict["id"] = vdc_network_id[3]
772 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
773 shared = True
774 else:
775 shared = False
776 filter_dict["shared"] = shared
777 filter_dict["tenant_id"] = vdc_id
778 if int(net_details.get('status')) == 1:
779 filter_dict["admin_state_up"] = True
780 else:
781 filter_dict["admin_state_up"] = False
782 filter_dict["status"] = "ACTIVE"
783 filter_dict["type"] = "bridge"
784 self.logger.debug("Returning {}".format(filter_dict))
785 return filter_dict
786 else:
787 raise vimconn.VimConnNotFoundException("Network {} not found".format(net_id))
788 except Exception as e:
789 self.logger.debug("Error in get_network")
790 self.logger.debug(traceback.format_exc())
791 if isinstance(e, vimconn.VimConnException):
792 raise
793 else:
794 raise vimconn.VimConnNotFoundException("Failed : Network not found {} ".format(e))
795
796 return filter_dict
797
798 def delete_network(self, net_id, created_items=None):
799 """
800 Removes a tenant network from VIM and its associated elements
801 :param net_id: VIM identifier of the network, provided by method new_network
802 :param created_items: dictionary with extra items to be deleted. provided by method new_network
803 Returns the network identifier or raises an exception upon error or when network is not found
804 """
805
806 # ############# Stub code for SRIOV #################
807 # dvport_group = self.get_dvport_group(net_id)
808 # if dvport_group:
809 # #delete portgroup
810 # status = self.destroy_dvport_group(net_id)
811 # if status:
812 # # Remove vlanID from persistent info
813 # if net_id in self.persistent_info["used_vlanIDs"]:
814 # del self.persistent_info["used_vlanIDs"][net_id]
815 #
816 # return net_id
817
818 vcd_network = self.get_vcd_network(network_uuid=net_id)
819 if vcd_network is not None and vcd_network:
820 if self.delete_network_action(network_uuid=net_id):
821 return net_id
822 else:
823 raise vimconn.VimConnNotFoundException("Network {} not found".format(net_id))
824
825 def refresh_nets_status(self, net_list):
826 """Get the status of the networks
827 Params: the list of network identifiers
828 Returns a dictionary with:
829 net_id: #VIM id of this network
830 status: #Mandatory. Text with one of:
831 # DELETED (not found at vim)
832 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
833 # OTHER (Vim reported other status not understood)
834 # ERROR (VIM indicates an ERROR status)
835 # ACTIVE, INACTIVE, DOWN (admin down),
836 # BUILD (on building process)
837 #
838 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
839 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
840
841 """
842
843 dict_entry = {}
844 try:
845 for net in net_list:
846 errormsg = ''
847 vcd_network = self.get_vcd_network(network_uuid=net)
848 if vcd_network is not None and vcd_network:
849 if vcd_network['status'] == '1':
850 status = 'ACTIVE'
851 else:
852 status = 'DOWN'
853 else:
854 status = 'DELETED'
855 errormsg = 'Network not found.'
856
857 dict_entry[net] = {'status': status, 'error_msg': errormsg,
858 'vim_info': yaml.safe_dump(vcd_network)}
859 except Exception:
860 self.logger.debug("Error in refresh_nets_status")
861 self.logger.debug(traceback.format_exc())
862
863 return dict_entry
864
865 def get_flavor(self, flavor_id):
866 """Obtain flavor details from the VIM
867 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
868 """
869 if flavor_id not in vimconnector.flavorlist:
870 raise vimconn.VimConnNotFoundException("Flavor not found.")
871 return vimconnector.flavorlist[flavor_id]
872
873 def new_flavor(self, flavor_data):
874 """Adds a tenant flavor to VIM
875 flavor_data contains a dictionary with information, keys:
876 name: flavor name
877 ram: memory (cloud type) in MBytes
878 vpcus: cpus (cloud type)
879 extended: EPA parameters
880 - numas: #items requested in same NUMA
881 memory: number of 1G huge pages memory
882 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual
883 threads
884 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
885 - name: interface name
886 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
887 bandwidth: X Gbps; requested guarantee bandwidth
888 vpci: requested virtual PCI address
889 disk: disk size
890 is_public:
891 #TODO to concrete
892 Returns the flavor identifier"""
893
894 # generate a new uuid put to internal dict and return it.
895 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
896 new_flavor = flavor_data
897 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
898 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
899 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
900
901 if not isinstance(ram, int):
902 raise vimconn.VimConnException("Non-integer value for ram")
903 elif not isinstance(cpu, int):
904 raise vimconn.VimConnException("Non-integer value for cpu")
905 elif not isinstance(disk, int):
906 raise vimconn.VimConnException("Non-integer value for disk")
907
908 extended_flv = flavor_data.get("extended")
909 if extended_flv:
910 numas = extended_flv.get("numas")
911 if numas:
912 for numa in numas:
913 # overwrite ram and vcpus
914 if 'memory' in numa:
915 ram = numa['memory'] * 1024
916 if 'paired-threads' in numa:
917 cpu = numa['paired-threads'] * 2
918 elif 'cores' in numa:
919 cpu = numa['cores']
920 elif 'threads' in numa:
921 cpu = numa['threads']
922
923 new_flavor[FLAVOR_RAM_KEY] = ram
924 new_flavor[FLAVOR_VCPUS_KEY] = cpu
925 new_flavor[FLAVOR_DISK_KEY] = disk
926 # generate a new uuid put to internal dict and return it.
927 flavor_id = uuid.uuid4()
928 vimconnector.flavorlist[str(flavor_id)] = new_flavor
929 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
930
931 return str(flavor_id)
932
933 def delete_flavor(self, flavor_id):
934 """Deletes a tenant flavor from VIM identify by its id
935
936 Returns the used id or raise an exception
937 """
938 if flavor_id not in vimconnector.flavorlist:
939 raise vimconn.VimConnNotFoundException("Flavor not found.")
940
941 vimconnector.flavorlist.pop(flavor_id, None)
942 return flavor_id
943
944 def new_image(self, image_dict):
945 """
946 Adds a tenant image to VIM
947 Returns:
948 200, image-id if the image is created
949 <0, message if there is an error
950 """
951
952 return self.get_image_id_from_path(image_dict['location'])
953
954 def delete_image(self, image_id):
955 """
956 Deletes a tenant image from VIM
957 Args:
958 image_id is ID of Image to be deleted
959 Return:
960 returns the image identifier in UUID format or raises an exception on error
961 """
962 conn = self.connect_as_admin()
963 if not conn:
964 raise vimconn.VimConnConnectionException("Failed to connect vCD")
965 # Get Catalog details
966 url_list = [self.url, '/api/catalog/', image_id]
967 catalog_herf = ''.join(url_list)
968
969 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
970 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
971
972 response = self.perform_request(req_type='GET',
973 url=catalog_herf,
974 headers=headers)
975
976 if response.status_code != requests.codes.ok:
977 self.logger.debug("delete_image():GET REST API call {} failed. "
978 "Return status code {}".format(catalog_herf,
979 response.status_code))
980 raise vimconn.VimConnNotFoundException("Fail to get image {}".format(image_id))
981
982 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
983 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
984 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
985
986 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
987 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
988 for catalogItem in catalogItems:
989 catalogItem_href = catalogItem.attrib['href']
990
991 response = self.perform_request(req_type='GET',
992 url=catalogItem_href,
993 headers=headers)
994
995 if response.status_code != requests.codes.ok:
996 self.logger.debug("delete_image():GET REST API call {} failed. "
997 "Return status code {}".format(catalog_herf,
998 response.status_code))
999 raise vimconn.VimConnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
1000 catalogItem,
1001 image_id))
1002
1003 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1004 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
1005 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1006 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']", namespaces).attrib['href']
1007
1008 # Remove catalogItem
1009 response = self.perform_request(req_type='DELETE',
1010 url=catalogitem_remove_href,
1011 headers=headers)
1012 if response.status_code == requests.codes.no_content:
1013 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1014 else:
1015 raise vimconn.VimConnException("Fail to delete Catalog Item {}".format(catalogItem))
1016
1017 # Remove catalog
1018 url_list = [self.url, '/api/admin/catalog/', image_id]
1019 catalog_remove_herf = ''.join(url_list)
1020 response = self.perform_request(req_type='DELETE',
1021 url=catalog_remove_herf,
1022 headers=headers)
1023
1024 if response.status_code == requests.codes.no_content:
1025 self.logger.debug("Deleted Catalog {}".format(image_id))
1026 return image_id
1027 else:
1028 raise vimconn.VimConnException("Fail to delete Catalog {}".format(image_id))
1029
1030 def catalog_exists(self, catalog_name, catalogs):
1031 """
1032
1033 :param catalog_name:
1034 :param catalogs:
1035 :return:
1036 """
1037 for catalog in catalogs:
1038 if catalog['name'] == catalog_name:
1039 return catalog['id']
1040
1041 def create_vimcatalog(self, vca=None, catalog_name=None):
1042 """ Create new catalog entry in vCloud director.
1043
1044 Args
1045 vca: vCloud director.
1046 catalog_name catalog that client wish to create. Note no validation done for a name.
1047 Client must make sure that provide valid string representation.
1048
1049 Returns catalog id if catalog created else None.
1050
1051 """
1052 try:
1053 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1054 if lxml_catalog_element:
1055 id_attr_value = lxml_catalog_element.get('id')
1056 return id_attr_value.split(':')[-1]
1057 catalogs = vca.list_catalogs()
1058 except Exception as ex:
1059 self.logger.error(
1060 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1061 raise
1062 return self.catalog_exists(catalog_name, catalogs)
1063
1064 # noinspection PyIncorrectDocstring
1065 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1066 description='', progress=False, chunk_bytes=128 * 1024):
1067 """
1068 Uploads a OVF file to a vCloud catalog
1069
1070 :param chunk_bytes:
1071 :param progress:
1072 :param description:
1073 :param image_name:
1074 :param vca:
1075 :param catalog_name: (str): The name of the catalog to upload the media.
1076 :param media_file_name: (str): The name of the local media file to upload.
1077 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1078 """
1079 os.path.isfile(media_file_name)
1080 statinfo = os.stat(media_file_name)
1081
1082 # find a catalog entry where we upload OVF.
1083 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1084 # status change.
1085 # if VCD can parse OVF we upload VMDK file
1086 try:
1087 for catalog in vca.list_catalogs():
1088 if catalog_name != catalog['name']:
1089 continue
1090 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1091 data = """
1092 <UploadVAppTemplateParams name="{}"
1093 xmlns="http://www.vmware.com/vcloud/v1.5"
1094 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
1095 <Description>{} vApp Template</Description>
1096 </UploadVAppTemplateParams>
1097 """.format(catalog_name, description)
1098
1099 if self.client:
1100 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
1101 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1102 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1103
1104 response = self.perform_request(req_type='POST',
1105 url=catalog_href,
1106 headers=headers,
1107 data=data)
1108
1109 if response.status_code == requests.codes.created:
1110 catalogItem = XmlElementTree.fromstring(response.text)
1111 entity = [child for child in catalogItem if
1112 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1113 href = entity.get('href')
1114 template = href
1115
1116 response = self.perform_request(req_type='GET',
1117 url=href,
1118 headers=headers)
1119
1120 if response.status_code == requests.codes.ok:
1121 headers['Content-Type'] = 'Content-Type text/xml'
1122 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"', response.text)
1123 if result:
1124 transfer_href = result.group(1)
1125
1126 response = self.perform_request(req_type='PUT',
1127 url=transfer_href,
1128 headers=headers,
1129 data=open(media_file_name, 'rb'))
1130 if response.status_code != requests.codes.ok:
1131 self.logger.debug(
1132 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1133 media_file_name))
1134 return False
1135
1136 # TODO fix this with aync block
1137 time.sleep(5)
1138
1139 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name,
1140 media_file_name))
1141
1142 # uploading VMDK file
1143 # check status of OVF upload and upload remaining files.
1144 response = self.perform_request(req_type='GET',
1145 url=template,
1146 headers=headers)
1147
1148 if response.status_code == requests.codes.ok:
1149 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"', response.text)
1150 if result:
1151 link_href = result.group(1)
1152 # we skip ovf since it already uploaded.
1153 if 'ovf' in link_href:
1154 continue
1155 # The OVF file and VMDK must be in a same directory
1156 head, _ = os.path.split(media_file_name)
1157 file_vmdk = head + '/' + link_href.split("/")[-1]
1158 if not os.path.isfile(file_vmdk):
1159 return False
1160 statinfo = os.stat(file_vmdk)
1161 if statinfo.st_size == 0:
1162 return False
1163 hrefvmdk = link_href
1164
1165 if progress:
1166 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1167 FileTransferSpeed()]
1168 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1169
1170 bytes_transferred = 0
1171 f = open(file_vmdk, 'rb')
1172 while bytes_transferred < statinfo.st_size:
1173 my_bytes = f.read(chunk_bytes)
1174 if len(my_bytes) <= chunk_bytes:
1175 headers['Content-Range'] = 'bytes {}-{}/{}'.format(
1176 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1177 headers['Content-Length'] = str(len(my_bytes))
1178 response = requests.put(url=hrefvmdk,
1179 headers=headers,
1180 data=my_bytes,
1181 verify=False)
1182 if response.status_code == requests.codes.ok:
1183 bytes_transferred += len(my_bytes)
1184 if progress:
1185 progress_bar.update(bytes_transferred)
1186 else:
1187 self.logger.debug(
1188 'file upload failed with error: [{}] {}'.format(response.status_code,
1189 response.text))
1190
1191 f.close()
1192 return False
1193 f.close()
1194 if progress:
1195 progress_bar.finish()
1196 time.sleep(10)
1197 return True
1198 else:
1199 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1200 format(catalog_name, media_file_name))
1201 return False
1202 except Exception as exp:
1203 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1204 .format(catalog_name, media_file_name, exp))
1205 raise vimconn.VimConnException(
1206 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1207 .format(catalog_name, media_file_name, exp))
1208
1209 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1210 return False
1211
1212 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1213 """Upload media file"""
1214 # TODO add named parameters for readability
1215
1216 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1217 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1218
1219 def validate_uuid4(self, uuid_string=None):
1220 """ Method validate correct format of UUID.
1221
1222 Return: true if string represent valid uuid
1223 """
1224 try:
1225 uuid.UUID(uuid_string, version=4)
1226 except ValueError:
1227 return False
1228 return True
1229
1230 def get_catalogid(self, catalog_name=None, catalogs=None):
1231 """ Method check catalog and return catalog ID in UUID format.
1232
1233 Args
1234 catalog_name: catalog name as string
1235 catalogs: list of catalogs.
1236
1237 Return: catalogs uuid
1238 """
1239
1240 for catalog in catalogs:
1241 if catalog['name'] == catalog_name:
1242 catalog_id = catalog['id']
1243 return catalog_id
1244 return None
1245
1246 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1247 """ Method check catalog and return catalog name lookup done by catalog UUID.
1248
1249 Args
1250 catalog_name: catalog name as string
1251 catalogs: list of catalogs.
1252
1253 Return: catalogs name or None
1254 """
1255
1256 if not self.validate_uuid4(uuid_string=catalog_uuid):
1257 return None
1258
1259 for catalog in catalogs:
1260 catalog_id = catalog.get('id')
1261 if catalog_id == catalog_uuid:
1262 return catalog.get('name')
1263 return None
1264
1265 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1266 """ Method check catalog and return catalog name lookup done by catalog UUID.
1267
1268 Args
1269 catalog_name: catalog name as string
1270 catalogs: list of catalogs.
1271
1272 Return: catalogs name or None
1273 """
1274
1275 if not self.validate_uuid4(uuid_string=catalog_uuid):
1276 return None
1277
1278 for catalog in catalogs:
1279 catalog_id = catalog.get('id')
1280 if catalog_id == catalog_uuid:
1281 return catalog
1282 return None
1283
1284 def get_image_id_from_path(self, path=None, progress=False):
1285 """ Method upload OVF image to vCloud director.
1286
1287 Each OVF image represented as single catalog entry in vcloud director.
1288 The method check for existing catalog entry. The check done by file name without file extension.
1289
1290 if given catalog name already present method will respond with existing catalog uuid otherwise
1291 it will create new catalog entry and upload OVF file to newly created catalog.
1292
1293 If method can't create catalog entry or upload a file it will throw exception.
1294
1295 Method accept boolean flag progress that will output progress bar. It useful method
1296 for standalone upload use case. In case to test large file upload.
1297
1298 Args
1299 path: - valid path to OVF file.
1300 progress - boolean progress bar show progress bar.
1301
1302 Return: if image uploaded correct method will provide image catalog UUID.
1303 """
1304
1305 if not path:
1306 raise vimconn.VimConnException("Image path can't be None.")
1307
1308 if not os.path.isfile(path):
1309 raise vimconn.VimConnException("Can't read file. File not found.")
1310
1311 if not os.access(path, os.R_OK):
1312 raise vimconn.VimConnException("Can't read file. Check file permission to read.")
1313
1314 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1315
1316 _, filename = os.path.split(path)
1317 _, file_extension = os.path.splitext(path)
1318 if file_extension != '.ovf':
1319 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1320 raise vimconn.VimConnException("Wrong container. vCloud director supports only OVF.")
1321
1322 catalog_name = os.path.splitext(filename)[0]
1323 catalog_md5_name = hashlib.md5(path.encode('utf-8')).hexdigest()
1324 self.logger.debug("File name {} Catalog Name {} file path {} "
1325 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1326
1327 try:
1328 org, _ = self.get_vdc_details()
1329 catalogs = org.list_catalogs()
1330 except Exception as exp:
1331 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1332 raise vimconn.VimConnException("Failed get catalogs() with Exception {} ".format(exp))
1333
1334 if len(catalogs) == 0:
1335 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1336 if self.create_vimcatalog(org, catalog_md5_name) is None:
1337 raise vimconn.VimConnException("Failed create new catalog {} ".format(catalog_md5_name))
1338
1339 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1340 media_name=filename, medial_file_name=path, progress=progress)
1341 if not result:
1342 raise vimconn.VimConnException("Failed create vApp template for catalog {} ".format(catalog_name))
1343 return self.get_catalogid(catalog_name, catalogs)
1344 else:
1345 for catalog in catalogs:
1346 # search for existing catalog if we find same name we return ID
1347 # TODO optimize this
1348 if catalog['name'] == catalog_md5_name:
1349 self.logger.debug("Found existing catalog entry for {} "
1350 "catalog id {}".format(catalog_name,
1351 self.get_catalogid(catalog_md5_name, catalogs)))
1352 return self.get_catalogid(catalog_md5_name, catalogs)
1353
1354 # if we didn't find existing catalog we create a new one and upload image.
1355 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1356 if self.create_vimcatalog(org, catalog_md5_name) is None:
1357 raise vimconn.VimConnException("Failed create new catalog {} ".format(catalog_md5_name))
1358
1359 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1360 media_name=filename, medial_file_name=path, progress=progress)
1361 if not result:
1362 raise vimconn.VimConnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1363
1364 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1365
1366 def get_image_list(self, filter_dict={}):
1367 '''Obtain tenant images from VIM
1368 Filter_dict can be:
1369 name: image name
1370 id: image uuid
1371 checksum: image checksum
1372 location: image path
1373 Returns the image list of dictionaries:
1374 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1375 List can be empty
1376 '''
1377
1378 try:
1379 org, _ = self.get_vdc_details()
1380 image_list = []
1381 catalogs = org.list_catalogs()
1382 if len(catalogs) == 0:
1383 return image_list
1384 else:
1385 for catalog in catalogs:
1386 catalog_uuid = catalog.get('id')
1387 name = catalog.get('name')
1388 filtered_dict = {}
1389 if filter_dict.get("name") and filter_dict["name"] != name:
1390 continue
1391 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1392 continue
1393 filtered_dict["name"] = name
1394 filtered_dict["id"] = catalog_uuid
1395 image_list.append(filtered_dict)
1396
1397 self.logger.debug("List of already created catalog items: {}".format(image_list))
1398 return image_list
1399 except Exception as exp:
1400 raise vimconn.VimConnException("Exception occured while retriving catalog items {}".format(exp))
1401
1402 def get_vappid(self, vdc=None, vapp_name=None):
1403 """ Method takes vdc object and vApp name and returns vapp uuid or None
1404
1405 Args:
1406 vdc: The VDC object.
1407 vapp_name: is application vappp name identifier
1408
1409 Returns:
1410 The return vApp name otherwise None
1411 """
1412 if vdc is None or vapp_name is None:
1413 return None
1414 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1415 try:
1416 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity
1417 if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1418 if len(refs) == 1:
1419 return refs[0].href.split("vapp")[1][1:]
1420 except Exception as e:
1421 self.logger.exception(e)
1422 return False
1423 return None
1424
1425 def check_vapp(self, vdc=None, vapp_uuid=None):
1426 """ Method Method returns True or False if vapp deployed in vCloud director
1427
1428 Args:
1429 vca: Connector to VCA
1430 vdc: The VDC object.
1431 vappid: vappid is application identifier
1432
1433 Returns:
1434 The return True if vApp deployed
1435 :param vdc:
1436 :param vapp_uuid:
1437 """
1438 try:
1439 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity
1440 if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1441 for ref in refs:
1442 vappid = ref.href.split("vapp")[1][1:]
1443 # find vapp with respected vapp uuid
1444 if vappid == vapp_uuid:
1445 return True
1446 except Exception as e:
1447 self.logger.exception(e)
1448 return False
1449 return False
1450
1451 def get_namebyvappid(self, vapp_uuid=None):
1452 """Method returns vApp name from vCD and lookup done by vapp_id.
1453
1454 Args:
1455 vapp_uuid: vappid is application identifier
1456
1457 Returns:
1458 The return vApp name otherwise None
1459 """
1460 try:
1461 if self.client and vapp_uuid:
1462 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1463 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
1464 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1465
1466 response = self.perform_request(req_type='GET',
1467 url=vapp_call,
1468 headers=headers)
1469 # Retry login if session expired & retry sending request
1470 if response.status_code == 403:
1471 response = self.retry_rest('GET', vapp_call)
1472
1473 tree = XmlElementTree.fromstring(response.text)
1474 return tree.attrib['name'] if 'name' in tree.attrib else None
1475 except Exception as e:
1476 self.logger.exception(e)
1477 return None
1478 return None
1479
1480 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1481 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1482 """Adds a VM instance to VIM
1483 Params:
1484 'start': (boolean) indicates if VM must start or created in pause mode.
1485 'image_id','flavor_id': image and flavor VIM id to use for the VM
1486 'net_list': list of interfaces, each one is a dictionary with:
1487 'name': (optional) name for the interface.
1488 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1489 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM
1490 capabilities
1491 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1492 'mac_address': (optional) mac address to assign to this interface
1493 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not
1494 provided, the VLAN tag to be used. In case net_id is provided, the internal network vlan is used
1495 for tagging VF
1496 'type': (mandatory) can be one of:
1497 'virtual', in this case always connected to a network of type 'net_type=bridge'
1498 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a
1499 data/ptp network or it can created unconnected
1500 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1501 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1502 are allocated on the same physical NIC
1503 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1504 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1505 or True, it must apply the default VIM behaviour
1506 After execution the method will add the key:
1507 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1508 interface. 'net_list' is modified
1509 'cloud_config': (optional) dictionary with:
1510 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1511 'users': (optional) list of users to be inserted, each item is a dict with:
1512 'name': (mandatory) user name,
1513 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1514 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1515 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1516 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1517 'dest': (mandatory) string with the destination absolute path
1518 'encoding': (optional, by default text). Can be one of:
1519 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1520 'content' (mandatory): string with the content of the file
1521 'permissions': (optional) string with file permissions, typically octal notation '0644'
1522 'owner': (optional) file owner, string with the format 'owner:group'
1523 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1524 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1525 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1526 'size': (mandatory) string with the size of the disk in GB
1527 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1528 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1529 availability_zone_index is None
1530 Returns a tuple with the instance identifier and created_items or raises an exception on error
1531 created_items can be None or a dictionary where this method can include key-values that will be passed to
1532 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1533 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1534 as not present.
1535 """
1536 self.logger.info("Creating new instance for entry {}".format(name))
1537 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "
1538 "availability_zone_index {} availability_zone_list {}"
1539 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,
1540 availability_zone_index, availability_zone_list))
1541
1542 # new vm name = vmname + tenant_id + uuid
1543 new_vm_name = [name, '-', str(uuid.uuid4())]
1544 vmname_andid = ''.join(new_vm_name)
1545
1546 for net in net_list:
1547 if net['type'] == "PCI-PASSTHROUGH":
1548 raise vimconn.VimConnNotSupportedException(
1549 "Current vCD version does not support type : {}".format(net['type']))
1550
1551 if len(net_list) > 10:
1552 raise vimconn.VimConnNotSupportedException(
1553 "The VM hardware versions 7 and above support upto 10 NICs only")
1554
1555 # if vm already deployed we return existing uuid
1556 # we check for presence of VDC, Catalog entry and Flavor.
1557 org, vdc = self.get_vdc_details()
1558 if vdc is None:
1559 raise vimconn.VimConnNotFoundException(
1560 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1561 catalogs = org.list_catalogs()
1562 if catalogs is None:
1563 # Retry once, if failed by refreshing token
1564 self.get_token()
1565 org = Org(self.client, resource=self.client.get_org())
1566 catalogs = org.list_catalogs()
1567 if catalogs is None:
1568 raise vimconn.VimConnNotFoundException(
1569 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1570
1571 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1572 if catalog_hash_name:
1573 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1574 else:
1575 raise vimconn.VimConnNotFoundException("new_vminstance(): Failed create vApp {}: "
1576 "(Failed retrieve catalog information {})".format(name, image_id))
1577
1578 # Set vCPU and Memory based on flavor.
1579 vm_cpus = None
1580 vm_memory = None
1581 vm_disk = None
1582 numas = None
1583
1584 if flavor_id is not None:
1585 if flavor_id not in vimconnector.flavorlist:
1586 raise vimconn.VimConnNotFoundException("new_vminstance(): Failed create vApp {}: "
1587 "Failed retrieve flavor information "
1588 "flavor id {}".format(name, flavor_id))
1589 else:
1590 try:
1591 flavor = vimconnector.flavorlist[flavor_id]
1592 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1593 vm_memory = flavor[FLAVOR_RAM_KEY]
1594 vm_disk = flavor[FLAVOR_DISK_KEY]
1595 extended = flavor.get("extended", None)
1596 if extended:
1597 numas = extended.get("numas", None)
1598
1599 except Exception as exp:
1600 raise vimconn.VimConnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1601
1602 # image upload creates template name as catalog name space Template.
1603 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1604 # power_on = 'false'
1605 # if start:
1606 # power_on = 'true'
1607
1608 # client must provide at least one entry in net_list if not we report error
1609 # If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1610 # If no mgmt, then the 1st NN in netlist is considered as primary net.
1611 primary_net = None
1612 primary_netname = None
1613 primary_net_href = None
1614 # network_mode = 'bridged'
1615 if net_list is not None and len(net_list) > 0:
1616 for net in net_list:
1617 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1618 primary_net = net
1619 if primary_net is None:
1620 primary_net = net_list[0]
1621
1622 try:
1623 primary_net_id = primary_net['net_id']
1624 url_list = [self.url, '/api/network/', primary_net_id]
1625 primary_net_href = ''.join(url_list)
1626 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1627 if 'name' in network_dict:
1628 primary_netname = network_dict['name']
1629
1630 except KeyError:
1631 raise vimconn.VimConnException("Corrupted flavor. {}".format(primary_net))
1632 else:
1633 raise vimconn.VimConnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1634
1635 # use: 'data', 'bridge', 'mgmt'
1636 # create vApp. Set vcpu and ram based on flavor id.
1637 try:
1638 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1639 if not vdc_obj:
1640 raise vimconn.VimConnNotFoundException("new_vminstance(): Failed to get VDC object")
1641
1642 for retry in (1, 2):
1643 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1644 catalog_items = [items.attrib]
1645
1646 if len(catalog_items) == 1:
1647 if self.client:
1648 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
1649 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1650
1651 response = self.perform_request(req_type='GET',
1652 url=catalog_items[0].get('href'),
1653 headers=headers)
1654 catalogItem = XmlElementTree.fromstring(response.text)
1655 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1656 vapp_tempalte_href = entity.get("href")
1657
1658 response = self.perform_request(req_type='GET',
1659 url=vapp_tempalte_href,
1660 headers=headers)
1661 if response.status_code != requests.codes.ok:
1662 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1663 response.status_code))
1664 else:
1665 result = (response.text).replace("\n", " ")
1666
1667 vapp_template_tree = XmlElementTree.fromstring(response.text)
1668 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1669 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1670 vm_name = vm_element.get('name')
1671 vm_id = vm_element.get('id')
1672 vm_href = vm_element.get('href')
1673
1674 # cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',
1675 # result).group(1)
1676 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',
1677 result).group(1)
1678 # cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
1679
1680 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1681 vdc_id = vdc.get('id').split(':')[-1]
1682 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1683 vdc_id)
1684 with open(os.path.join(os.path.dirname(__file__), 'InstantiateVAppTemplateParams.xml'), 'r') as f:
1685 template = f.read()
1686
1687 data = template.format(vmname_andid,
1688 primary_netname,
1689 primary_net_href,
1690 vapp_tempalte_href,
1691 vm_href,
1692 vm_id,
1693 vm_name,
1694 primary_netname,
1695 cpu=vm_cpus,
1696 core=1,
1697 memory=vm_memory)
1698
1699 response = self.perform_request(req_type='POST',
1700 url=instantiate_vapp_href,
1701 headers=headers,
1702 data=data)
1703
1704 if response.status_code != 201:
1705 self.logger.error("REST call {} failed reason : {}"
1706 "status code : {}".format(instantiate_vapp_href,
1707 response.text,
1708 response.status_code))
1709 raise vimconn.VimConnException("new_vminstance(): Failed to create"
1710 "vAapp {}".format(vmname_andid))
1711 else:
1712 vapptask = self.get_task_from_response(response.text)
1713
1714 if vapptask is None and retry == 1:
1715 self.get_token() # Retry getting token
1716 continue
1717 else:
1718 break
1719
1720 if vapptask is None or vapptask is False:
1721 raise vimconn.VimConnUnexpectedResponse(
1722 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1723
1724 # wait for task to complete
1725 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1726
1727 if result.get('status') == 'success':
1728 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1729 else:
1730 raise vimconn.VimConnUnexpectedResponse(
1731 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1732
1733 except Exception as exp:
1734 raise vimconn.VimConnUnexpectedResponse(
1735 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1736
1737 # we should have now vapp in undeployed state.
1738 try:
1739 vdc_obj = VDC(self.client, href=vdc.get('href'))
1740 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1741 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1742 vapp = VApp(self.client, resource=vapp_resource)
1743
1744 except Exception as exp:
1745 raise vimconn.VimConnUnexpectedResponse(
1746 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1747 .format(vmname_andid, exp))
1748
1749 if vapp_uuid is None:
1750 raise vimconn.VimConnUnexpectedResponse(
1751 "new_vminstance(): Failed to retrieve vApp {} after creation".format(vmname_andid))
1752
1753 # Add PCI passthrough/SRIOV configrations
1754 pci_devices_info = []
1755 reserve_memory = False
1756
1757 for net in net_list:
1758 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1759 pci_devices_info.append(net)
1760 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1761 reserve_memory = True
1762
1763 # Add PCI
1764 if len(pci_devices_info) > 0:
1765 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1766 vmname_andid))
1767 PCI_devices_status, _, _ = self.add_pci_devices(vapp_uuid,
1768 pci_devices_info,
1769 vmname_andid)
1770 if PCI_devices_status:
1771 self.logger.info("Added PCI devives {} to VM {}".format(
1772 pci_devices_info,
1773 vmname_andid))
1774 reserve_memory = True
1775 else:
1776 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1777 pci_devices_info,
1778 vmname_andid))
1779
1780 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
1781 self.add_serial_device(vapp_uuid)
1782
1783 if vm_disk:
1784 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1785 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1786 if result:
1787 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1788
1789 # Add new or existing disks to vApp
1790 if disk_list:
1791 added_existing_disk = False
1792 for disk in disk_list:
1793 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1794 image_id = disk['image_id']
1795 # Adding CD-ROM to VM
1796 # will revisit code once specification ready to support this feature
1797 self.insert_media_to_vm(vapp, image_id)
1798 elif "image_id" in disk and disk["image_id"] is not None:
1799 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1800 disk["image_id"], vapp_uuid))
1801 self.add_existing_disk(catalogs=catalogs,
1802 image_id=disk["image_id"],
1803 size=disk["size"],
1804 template_name=templateName,
1805 vapp_uuid=vapp_uuid
1806 )
1807 added_existing_disk = True
1808 else:
1809 # Wait till added existing disk gets reflected into vCD database/API
1810 if added_existing_disk:
1811 time.sleep(5)
1812 added_existing_disk = False
1813 self.add_new_disk(vapp_uuid, disk['size'])
1814
1815 if numas:
1816 # Assigning numa affinity setting
1817 for numa in numas:
1818 if 'paired-threads-id' in numa:
1819 paired_threads_id = numa['paired-threads-id']
1820 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1821
1822 # add NICs & connect to networks in netlist
1823 try:
1824 vdc_obj = VDC(self.client, href=vdc.get('href'))
1825 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1826 vapp = VApp(self.client, resource=vapp_resource)
1827 vapp_id = vapp_resource.get('id').split(':')[-1]
1828
1829 self.logger.info("Removing primary NIC: ")
1830 # First remove all NICs so that NIC properties can be adjusted as needed
1831 self.remove_primary_network_adapter_from_all_vms(vapp)
1832
1833 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1834 primary_nic_index = 0
1835 nicIndex = 0
1836 for net in net_list:
1837 # openmano uses network id in UUID format.
1838 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1839 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1840 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1841
1842 if 'net_id' not in net:
1843 continue
1844
1845 # Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1846 # Same will be returned in refresh_vms_status() as vim_interface_id
1847 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1848
1849 interface_net_id = net['net_id']
1850 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1851 interface_network_mode = net['use']
1852
1853 if interface_network_mode == 'mgmt':
1854 primary_nic_index = nicIndex
1855
1856 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1857 - DHCP (The IP address is obtained from a DHCP service.)
1858 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1859 - NONE (No IP addressing mode specified.)"""
1860
1861 if primary_netname is not None:
1862 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1863 nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1864 if len(nets) == 1:
1865 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1866
1867 if interface_net_name != primary_netname:
1868 # connect network to VM - with all DHCP by default
1869 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1870 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1871
1872 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1873 nic_type = 'VMXNET3'
1874 if 'type' in net and net['type'] not in type_list:
1875 # fetching nic type from vnf
1876 if 'model' in net:
1877 if net['model'] is not None:
1878 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1879 nic_type = 'VMXNET3'
1880 else:
1881 nic_type = net['model']
1882
1883 self.logger.info("new_vminstance(): adding network adapter "
1884 "to a network {}".format(nets[0].get('name')))
1885 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1886 primary_nic_index,
1887 nicIndex,
1888 net,
1889 nic_type=nic_type)
1890 else:
1891 self.logger.info("new_vminstance(): adding network adapter "
1892 "to a network {}".format(nets[0].get('name')))
1893 if net['type'] in ['SR-IOV', 'VF']:
1894 nic_type = net['type']
1895 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1896 primary_nic_index,
1897 nicIndex,
1898 net,
1899 nic_type=nic_type)
1900 nicIndex += 1
1901
1902 # cloud-init for ssh-key injection
1903 if cloud_config:
1904 # Create a catalog which will be carrying the config drive ISO
1905 # This catalog is deleted during vApp deletion. The catalog name carries
1906 # vApp UUID and thats how it gets identified during its deletion.
1907 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1908 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1909 config_drive_catalog_name))
1910 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1911 if config_drive_catalog_id is None:
1912 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1913 "ISO".format(config_drive_catalog_name)
1914 raise Exception(error_msg)
1915
1916 # Create config-drive ISO
1917 _, userdata = self._create_user_data(cloud_config)
1918 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
1919 iso_path = self.create_config_drive_iso(userdata)
1920 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
1921
1922 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
1923 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
1924 # Attach the config-drive ISO to the VM
1925 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
1926 self.insert_media_to_vm(vapp, config_drive_catalog_id)
1927 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
1928
1929 # If VM has PCI devices or SRIOV reserve memory for VM
1930 if reserve_memory:
1931 self.reserve_memory_for_all_vms(vapp, memory_mb)
1932
1933 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1934
1935 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1936 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1937 if result.get('status') == 'success':
1938 self.logger.info("new_vminstance(): Successfully power on "
1939 "vApp {}".format(vmname_andid))
1940 else:
1941 self.logger.error("new_vminstance(): failed to power on vApp "
1942 "{}".format(vmname_andid))
1943
1944 except Exception as exp:
1945 try:
1946 self.delete_vminstance(vapp_uuid)
1947 except Exception as exp2:
1948 self.logger.error("new_vminstance rollback fail {}".format(exp2))
1949 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1950 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
1951 .format(name, exp))
1952 raise vimconn.VimConnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1953 .format(name, exp))
1954 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1955 wait_time = 0
1956 vapp_uuid = None
1957 while wait_time <= MAX_WAIT_TIME:
1958 try:
1959 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1960 vapp = VApp(self.client, resource=vapp_resource)
1961 except Exception as exp:
1962 raise vimconn.VimConnUnexpectedResponse(
1963 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1964 .format(vmname_andid, exp))
1965
1966 # if vapp and vapp.me.deployed:
1967 if vapp and vapp_resource.get('deployed') == 'true':
1968 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1969 break
1970 else:
1971 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1972 time.sleep(INTERVAL_TIME)
1973
1974 wait_time += INTERVAL_TIME
1975
1976 # SET Affinity Rule for VM
1977 # Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
1978 # While creating VIM account user has to pass the Host Group names in availability_zone list
1979 # "availability_zone" is a part of VIM "config" parameters
1980 # For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
1981 # Host groups are referred as availability zones
1982 # With following procedure, deployed VM will be added into a VM group.
1983 # Then A VM to Host Affinity rule will be created using the VM group & Host group.
1984 if(availability_zone_list):
1985 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
1986 # Admin access required for creating Affinity rules
1987 client = self.connect_as_admin()
1988 if not client:
1989 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
1990 else:
1991 self.client = client
1992 if self.client:
1993 headers = {'Accept': 'application/*+xml;version=27.0',
1994 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1995 # Step1: Get provider vdc details from organization
1996 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
1997 if pvdc_href is not None:
1998 # Step2: Found required pvdc, now get resource pool information
1999 respool_href = self.get_resource_pool_details(pvdc_href, headers)
2000 if respool_href is None:
2001 # Raise error if respool_href not found
2002 msg = "new_vminstance():Error in finding resource pool details in pvdc {}".format(pvdc_href)
2003 self.log_message(msg)
2004
2005 # Step3: Verify requested availability zone(hostGroup) is present in vCD
2006 # get availability Zone
2007 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2008 # check if provided av zone(hostGroup) is present in vCD VIM
2009 status = self.check_availibility_zone(vm_az, respool_href, headers)
2010 if status is False:
2011 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "
2012 "resource pool {} status: {}".format(vm_az, respool_href, status)
2013 self.log_message(msg)
2014 else:
2015 self.logger.debug("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2016
2017 # Step4: Find VM group references to create vm group
2018 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2019 if vmgrp_href is None:
2020 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2021 self.log_message(msg)
2022
2023 # Step5: Create a VmGroup with name az_VmGroup
2024 vmgrp_name = vm_az + "_" + name # Formed VM Group name = Host Group name + VM name
2025 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2026 if status is not True:
2027 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2028 self.log_message(msg)
2029
2030 # VM Group url to add vms to vm group
2031 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/" + vmgrp_name
2032
2033 # Step6: Add VM to VM Group
2034 # Find VM uuid from vapp_uuid
2035 vm_details = self.get_vapp_details_rest(vapp_uuid)
2036 vm_uuid = vm_details['vmuuid']
2037
2038 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2039 if status is not True:
2040 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2041 self.log_message(msg)
2042
2043 # Step7: Create VM to Host affinity rule
2044 addrule_href = self.get_add_rule_reference(respool_href, headers)
2045 if addrule_href is None:
2046 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2047 .format(respool_href)
2048 self.log_message(msg)
2049
2050 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2051 if status is False:
2052 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2053 .format(name, vm_az)
2054 self.log_message(msg)
2055 else:
2056 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"
2057 .format(name, vm_az))
2058 # Reset token to a normal user to perform other operations
2059 self.get_token()
2060
2061 if vapp_uuid is not None:
2062 return vapp_uuid, None
2063 else:
2064 raise vimconn.VimConnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2065
2066 def create_config_drive_iso(self, user_data):
2067 tmpdir = tempfile.mkdtemp()
2068 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2069 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2070 os.makedirs(latest_dir)
2071 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2072 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2073 userdata_file_obj.write(user_data)
2074 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2075 "launch_index": 0,
2076 "name": "ConfigDrive",
2077 "uuid": str(uuid.uuid4())}
2078 )
2079 )
2080 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2081 iso_path=iso_path, source_dir_path=tmpdir)
2082 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2083 try:
2084 FNULL = open(os.devnull, 'w')
2085 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2086 except subprocess.CalledProcessError as e:
2087 shutil.rmtree(tmpdir, ignore_errors=True)
2088 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2089 self.logger.error(error_msg)
2090 raise Exception(error_msg)
2091 return iso_path
2092
2093 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2094 if not os.path.isfile(iso_file_path):
2095 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2096 self.logger.error(error_msg)
2097 raise Exception(error_msg)
2098 iso_file_stat = os.stat(iso_file_path)
2099 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2100 <Media
2101 xmlns="http://www.vmware.com/vcloud/v1.5"
2102 name="{iso_name}"
2103 size="{iso_size}"
2104 imageType="iso">
2105 <Description>ISO image for config-drive</Description>
2106 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2107 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
2108 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2109 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2110 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2111 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2112
2113 if response.status_code != 201:
2114 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2115 self.logger.error(error_msg)
2116 raise Exception(error_msg)
2117
2118 catalogItem = XmlElementTree.fromstring(response.text)
2119 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2120 entity_href = entity.get('href')
2121
2122 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2123 if response.status_code != 200:
2124 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2125
2126 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2127 if match:
2128 media_upload_href = match.group(1)
2129 else:
2130 raise Exception('Could not parse the upload URL for the media file from the last response')
2131 upload_iso_task = self.get_task_from_response(response.text)
2132 headers['Content-Type'] = 'application/octet-stream'
2133 response = self.perform_request(req_type='PUT',
2134 url=media_upload_href,
2135 headers=headers,
2136 data=open(iso_file_path, 'rb'))
2137
2138 if response.status_code != 200:
2139 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2140 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2141 if result.get('status') != 'success':
2142 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2143
2144 def get_vcd_availibility_zones(self, respool_href, headers):
2145 """ Method to find presence of av zone is VIM resource pool
2146
2147 Args:
2148 respool_href - resource pool href
2149 headers - header information
2150
2151 Returns:
2152 vcd_az - list of azone present in vCD
2153 """
2154 vcd_az = []
2155 url = respool_href
2156 resp = self.perform_request(req_type='GET', url=respool_href, headers=headers)
2157
2158 if resp.status_code != requests.codes.ok:
2159 self.logger.debug("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2160 else:
2161 # Get the href to hostGroups and find provided hostGroup is present in it
2162 resp_xml = XmlElementTree.fromstring(resp.content)
2163 for child in resp_xml:
2164 if 'VMWProviderVdcResourcePool' in child.tag:
2165 for schild in child:
2166 if 'Link' in schild.tag:
2167 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2168 hostGroup = schild.attrib.get('href')
2169 hg_resp = self.perform_request(req_type='GET', url=hostGroup, headers=headers)
2170 if hg_resp.status_code != requests.codes.ok:
2171 self.logger.debug("REST API call {} failed. Return status code {}".format(
2172 hostGroup, hg_resp.status_code))
2173 else:
2174 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2175 for hostGroup in hg_resp_xml:
2176 if 'HostGroup' in hostGroup.tag:
2177 # append host group name to the list
2178 vcd_az.append(hostGroup.attrib.get("name"))
2179 return vcd_az
2180
2181 def set_availability_zones(self):
2182 """
2183 Set vim availability zone
2184 """
2185
2186 vim_availability_zones = None
2187 availability_zone = None
2188 if 'availability_zone' in self.config:
2189 vim_availability_zones = self.config.get('availability_zone')
2190 if isinstance(vim_availability_zones, str):
2191 availability_zone = [vim_availability_zones]
2192 elif isinstance(vim_availability_zones, list):
2193 availability_zone = vim_availability_zones
2194 else:
2195 return availability_zone
2196
2197 return availability_zone
2198
2199 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2200 """
2201 Return the availability zone to be used by the created VM.
2202 returns: The VIM availability zone to be used or None
2203 """
2204 if availability_zone_index is None:
2205 if not self.config.get('availability_zone'):
2206 return None
2207 elif isinstance(self.config.get('availability_zone'), str):
2208 return self.config['availability_zone']
2209 else:
2210 return self.config['availability_zone'][0]
2211
2212 vim_availability_zones = self.availability_zone
2213
2214 # check if VIM offer enough availability zones describe in the VNFD
2215 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2216 # check if all the names of NFV AV match VIM AV names
2217 match_by_index = False
2218 for av in availability_zone_list:
2219 if av not in vim_availability_zones:
2220 match_by_index = True
2221 break
2222 if match_by_index:
2223 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2224 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2225 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2226 self.logger.debug("VIM Availability zones will be used by index")
2227 return vim_availability_zones[availability_zone_index]
2228 else:
2229 return availability_zone_list[availability_zone_index]
2230 else:
2231 raise vimconn.VimConnConflictException("No enough availability zones at VIM for this deployment")
2232
2233 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2234 """ Method to create VM to Host Affinity rule in vCD
2235
2236 Args:
2237 addrule_href - href to make a POST request
2238 vmgrpname - name of the VM group created
2239 hostgrpnmae - name of the host group created earlier
2240 polarity - Affinity or Anti-affinity (default: Affinity)
2241 headers - headers to make REST call
2242
2243 Returns:
2244 True- if rule is created
2245 False- Failed to create rule due to some error
2246
2247 """
2248 task_status = False
2249 rule_name = polarity + "_" + vmgrpname
2250 payload = """<?xml version="1.0" encoding="UTF-8"?>
2251 <vmext:VMWVmHostAffinityRule
2252 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2253 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2254 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2255 <vcloud:Name>{}</vcloud:Name>
2256 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2257 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2258 <vcloud:Polarity>{}</vcloud:Polarity>
2259 <vmext:HostGroupName>{}</vmext:HostGroupName>
2260 <vmext:VmGroupName>{}</vmext:VmGroupName>
2261 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2262
2263 resp = self.perform_request(req_type='POST', url=addrule_href, headers=headers, data=payload)
2264
2265 if resp.status_code != requests.codes.accepted:
2266 self.logger.debug("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2267 task_status = False
2268 return task_status
2269 else:
2270 affinity_task = self.get_task_from_response(resp.content)
2271 self.logger.debug("affinity_task: {}".format(affinity_task))
2272 if affinity_task is None or affinity_task is False:
2273 raise vimconn.VimConnUnexpectedResponse("failed to find affinity task")
2274 # wait for task to complete
2275 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2276 if result.get('status') == 'success':
2277 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2278 return True
2279 else:
2280 raise vimconn.VimConnUnexpectedResponse(
2281 "failed to create affinity rule {}".format(rule_name))
2282
2283 def get_add_rule_reference(self, respool_href, headers):
2284 """ This method finds href to add vm to host affinity rule to vCD
2285
2286 Args:
2287 respool_href- href to resource pool
2288 headers- header information to make REST call
2289
2290 Returns:
2291 None - if no valid href to add rule found or
2292 addrule_href - href to add vm to host affinity rule of resource pool
2293 """
2294 addrule_href = None
2295 resp = self.perform_request(req_type='GET', url=respool_href, headers=headers)
2296
2297 if resp.status_code != requests.codes.ok:
2298 self.logger.debug("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2299 else:
2300
2301 resp_xml = XmlElementTree.fromstring(resp.content)
2302 for child in resp_xml:
2303 if 'VMWProviderVdcResourcePool' in child.tag:
2304 for schild in child:
2305 if 'Link' in schild.tag:
2306 if (schild.attrib.get(
2307 'type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and
2308 schild.attrib.get('rel') == "add"):
2309 addrule_href = schild.attrib.get('href')
2310 break
2311
2312 return addrule_href
2313
2314 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2315 """ Method to add deployed VM to newly created VM Group.
2316 This is required to create VM to Host affinity in vCD
2317
2318 Args:
2319 vm_uuid- newly created vm uuid
2320 vmGroupNameURL- URL to VM Group name
2321 vmGroup_name- Name of VM group created
2322 headers- Headers for REST request
2323
2324 Returns:
2325 True- if VM added to VM group successfully
2326 False- if any error encounter
2327 """
2328
2329 addvm_resp = self.perform_request(req_type='GET', url=vmGroupNameURL, headers=headers) # , data=payload)
2330
2331 if addvm_resp.status_code != requests.codes.ok:
2332 self.logger.debug("REST API call to get VM Group Name url {} failed. Return status code {}"
2333 .format(vmGroupNameURL, addvm_resp.status_code))
2334 return False
2335 else:
2336 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2337 for child in resp_xml:
2338 if child.tag.split('}')[1] == 'Link':
2339 if child.attrib.get("rel") == "addVms":
2340 addvmtogrpURL = child.attrib.get("href")
2341
2342 # Get vm details
2343 url_list = [self.url, '/api/vApp/vm-', vm_uuid]
2344 vmdetailsURL = ''.join(url_list)
2345
2346 resp = self.perform_request(req_type='GET', url=vmdetailsURL, headers=headers)
2347
2348 if resp.status_code != requests.codes.ok:
2349 self.logger.debug("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2350 return False
2351
2352 # Parse VM details
2353 resp_xml = XmlElementTree.fromstring(resp.content)
2354 if resp_xml.tag.split('}')[1] == "Vm":
2355 vm_id = resp_xml.attrib.get("id")
2356 vm_name = resp_xml.attrib.get("name")
2357 vm_href = resp_xml.attrib.get("href")
2358 # print vm_id, vm_name, vm_href
2359 # Add VM into VMgroup
2360 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2361 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2362 xmlns="http://www.vmware.com/vcloud/versions" \
2363 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2364 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2365 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2366 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2367 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2368 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2369 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2370 <ns2:VmReference href="{}" id="{}" name="{}" \
2371 type="application/vnd.vmware.vcloud.vm+xml" />\
2372 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2373
2374 addvmtogrp_resp = self.perform_request(req_type='POST', url=addvmtogrpURL, headers=headers, data=payload)
2375
2376 if addvmtogrp_resp.status_code != requests.codes.accepted:
2377 self.logger.debug("REST API call {} failed. Return status code {}".format(addvmtogrpURL,
2378 addvmtogrp_resp.
2379 status_code))
2380 return False
2381 else:
2382 self.logger.debug("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2383 return True
2384
2385 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2386 """Method to create a VM group in vCD
2387
2388 Args:
2389 vmgroup_name : Name of VM group to be created
2390 vmgroup_href : href for vmgroup
2391 headers- Headers for REST request
2392 """
2393 # POST to add URL with required data
2394 vmgroup_status = False
2395 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2396 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2397 <vmCount>1</vmCount>\
2398 </VMWVmGroup>""".format(vmgroup_name)
2399 resp = self.perform_request(req_type='POST', url=vmgroup_href, headers=headers, data=payload)
2400
2401 if resp.status_code != requests.codes.accepted:
2402 self.logger.debug("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2403 return vmgroup_status
2404 else:
2405 vmgroup_task = self.get_task_from_response(resp.content)
2406 if vmgroup_task is None or vmgroup_task is False:
2407 raise vimconn.VimConnUnexpectedResponse(
2408 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2409
2410 # wait for task to complete
2411 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2412
2413 if result.get('status') == 'success':
2414 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2415 # time.sleep(10)
2416 vmgroup_status = True
2417 return vmgroup_status
2418 else:
2419 raise vimconn.VimConnUnexpectedResponse(
2420 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2421
2422 def find_vmgroup_reference(self, url, headers):
2423 """ Method to create a new VMGroup which is required to add created VM
2424 Args:
2425 url- resource pool href
2426 headers- header information
2427
2428 Returns:
2429 returns href to VM group to create VM group
2430 """
2431 # Perform GET on resource pool to find 'add' link to create VMGroup
2432 # https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2433 vmgrp_href = None
2434 resp = self.perform_request(req_type='GET', url=url, headers=headers)
2435
2436 if resp.status_code != requests.codes.ok:
2437 self.logger.debug("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2438 else:
2439 # Get the href to add vmGroup to vCD
2440 resp_xml = XmlElementTree.fromstring(resp.content)
2441 for child in resp_xml:
2442 if 'VMWProviderVdcResourcePool' in child.tag:
2443 for schild in child:
2444 if 'Link' in schild.tag:
2445 # Find href with type VMGroup and rel with add
2446 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2447 and schild.attrib.get('rel') == "add":
2448 vmgrp_href = schild.attrib.get('href')
2449 return vmgrp_href
2450
2451 def check_availibility_zone(self, az, respool_href, headers):
2452 """ Method to verify requested av zone is present or not in provided
2453 resource pool
2454
2455 Args:
2456 az - name of hostgroup (availibility_zone)
2457 respool_href - Resource Pool href
2458 headers - Headers to make REST call
2459 Returns:
2460 az_found - True if availibility_zone is found else False
2461 """
2462 az_found = False
2463 headers['Accept'] = 'application/*+xml;version=27.0'
2464 resp = self.perform_request(req_type='GET', url=respool_href, headers=headers)
2465
2466 if resp.status_code != requests.codes.ok:
2467 self.logger.debug("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2468 else:
2469 # Get the href to hostGroups and find provided hostGroup is present in it
2470 resp_xml = XmlElementTree.fromstring(resp.content)
2471
2472 for child in resp_xml:
2473 if 'VMWProviderVdcResourcePool' in child.tag:
2474 for schild in child:
2475 if 'Link' in schild.tag:
2476 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2477 hostGroup_href = schild.attrib.get('href')
2478 hg_resp = self.perform_request(req_type='GET', url=hostGroup_href, headers=headers)
2479 if hg_resp.status_code != requests.codes.ok:
2480 self.logger.debug(
2481 "REST API call {} failed. Return status code {}".format(hostGroup_href,
2482 hg_resp.status_code))
2483 else:
2484 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2485 for hostGroup in hg_resp_xml:
2486 if 'HostGroup' in hostGroup.tag:
2487 if hostGroup.attrib.get("name") == az:
2488 az_found = True
2489 break
2490 return az_found
2491
2492 def get_pvdc_for_org(self, org_vdc, headers):
2493 """ This method gets provider vdc references from organisation
2494
2495 Args:
2496 org_vdc - name of the organisation VDC to find pvdc
2497 headers - headers to make REST call
2498
2499 Returns:
2500 None - if no pvdc href found else
2501 pvdc_href - href to pvdc
2502 """
2503
2504 # Get provider VDC references from vCD
2505 pvdc_href = None
2506 # url = '<vcd url>/api/admin/extension/providerVdcReferences'
2507 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2508 url = ''.join(url_list)
2509
2510 response = self.perform_request(req_type='GET', url=url, headers=headers)
2511 if response.status_code != requests.codes.ok:
2512 self.logger.debug("REST API call {} failed. Return status code {}"
2513 .format(url, response.status_code))
2514 else:
2515 xmlroot_response = XmlElementTree.fromstring(response.text)
2516 for child in xmlroot_response:
2517 if 'ProviderVdcReference' in child.tag:
2518 pvdc_href = child.attrib.get('href')
2519 # Get vdcReferences to find org
2520 pvdc_resp = self.perform_request(req_type='GET', url=pvdc_href, headers=headers)
2521 if pvdc_resp.status_code != requests.codes.ok:
2522 raise vimconn.VimConnException("REST API call {} failed. "
2523 "Return status code {}"
2524 .format(url, pvdc_resp.status_code))
2525
2526 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2527 for child in pvdc_resp_xml:
2528 if 'Link' in child.tag:
2529 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2530 vdc_href = child.attrib.get('href')
2531
2532 # Check if provided org is present in vdc
2533 vdc_resp = self.perform_request(req_type='GET',
2534 url=vdc_href,
2535 headers=headers)
2536 if vdc_resp.status_code != requests.codes.ok:
2537 raise vimconn.VimConnException("REST API call {} failed. "
2538 "Return status code {}"
2539 .format(url, vdc_resp.status_code))
2540 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2541 for child in vdc_resp_xml:
2542 if 'VdcReference' in child.tag:
2543 if child.attrib.get('name') == org_vdc:
2544 return pvdc_href
2545
2546 def get_resource_pool_details(self, pvdc_href, headers):
2547 """ Method to get resource pool information.
2548 Host groups are property of resource group.
2549 To get host groups, we need to GET details of resource pool.
2550
2551 Args:
2552 pvdc_href: href to pvdc details
2553 headers: headers
2554
2555 Returns:
2556 respool_href - Returns href link reference to resource pool
2557 """
2558 respool_href = None
2559 resp = self.perform_request(req_type='GET', url=pvdc_href, headers=headers)
2560
2561 if resp.status_code != requests.codes.ok:
2562 self.logger.debug("REST API call {} failed. Return status code {}"
2563 .format(pvdc_href, resp.status_code))
2564 else:
2565 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2566 for child in respool_resp_xml:
2567 if 'Link' in child.tag:
2568 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2569 respool_href = child.attrib.get("href")
2570 break
2571 return respool_href
2572
2573 def log_message(self, msg):
2574 """
2575 Method to log error messages related to Affinity rule creation
2576 in new_vminstance & raise Exception
2577 Args :
2578 msg - Error message to be logged
2579
2580 """
2581 # get token to connect vCD as a normal user
2582 self.get_token()
2583 self.logger.debug(msg)
2584 raise vimconn.VimConnException(msg)
2585
2586 # #
2587 # #
2588 # # based on current discussion
2589 # #
2590 # #
2591 # # server:
2592 # created: '2016-09-08T11:51:58'
2593 # description: simple-instance.linux1.1
2594 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2595 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2596 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2597 # status: ACTIVE
2598 # error_msg:
2599 # interfaces: …
2600 #
2601 def get_vminstance(self, vim_vm_uuid=None):
2602 """Returns the VM instance information from VIM"""
2603
2604 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2605
2606 _, vdc = self.get_vdc_details()
2607 if vdc is None:
2608 raise vimconn.VimConnConnectionException(
2609 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2610
2611 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2612 if not vm_info_dict:
2613 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2614 raise vimconn.VimConnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2615
2616 status_key = vm_info_dict['status']
2617 error = ''
2618 try:
2619 vm_dict = {'created': vm_info_dict['created'],
2620 'description': vm_info_dict['name'],
2621 'status': vcdStatusCode2manoFormat[int(status_key)],
2622 'hostId': vm_info_dict['vmuuid'],
2623 'error_msg': error,
2624 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2625
2626 if 'interfaces' in vm_info_dict:
2627 vm_dict['interfaces'] = vm_info_dict['interfaces']
2628 else:
2629 vm_dict['interfaces'] = []
2630 except KeyError:
2631 vm_dict = {'created': '',
2632 'description': '',
2633 'status': vcdStatusCode2manoFormat[int(-1)],
2634 'hostId': vm_info_dict['vmuuid'],
2635 'error_msg': "Inconsistency state",
2636 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2637
2638 return vm_dict
2639
2640 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2641 """Method poweroff and remove VM instance from vcloud director network.
2642
2643 Args:
2644 vm__vim_uuid: VM UUID
2645
2646 Returns:
2647 Returns the instance identifier
2648 """
2649
2650 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2651
2652 _, vdc = self.get_vdc_details()
2653 vdc_obj = VDC(self.client, href=vdc.get('href'))
2654 if vdc_obj is None:
2655 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2656 self.tenant_name))
2657 raise vimconn.VimConnException(
2658 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2659
2660 try:
2661 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2662 if vapp_name is None:
2663 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2664 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2665 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2666 vapp_resource = vdc_obj.get_vapp(vapp_name)
2667 vapp = VApp(self.client, resource=vapp_resource)
2668
2669 # Delete vApp and wait for status change if task executed and vApp is None.
2670
2671 if vapp:
2672 if vapp_resource.get('deployed') == 'true':
2673 self.logger.info("Powering off vApp {}".format(vapp_name))
2674 # Power off vApp
2675 powered_off = False
2676 wait_time = 0
2677 while wait_time <= MAX_WAIT_TIME:
2678 power_off_task = vapp.power_off()
2679 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2680
2681 if result.get('status') == 'success':
2682 powered_off = True
2683 break
2684 else:
2685 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2686 time.sleep(INTERVAL_TIME)
2687
2688 wait_time += INTERVAL_TIME
2689 if not powered_off:
2690 self.logger.debug(
2691 "delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2692 else:
2693 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2694
2695 # Undeploy vApp
2696 self.logger.info("Undeploy vApp {}".format(vapp_name))
2697 wait_time = 0
2698 undeployed = False
2699 while wait_time <= MAX_WAIT_TIME:
2700 vapp = VApp(self.client, resource=vapp_resource)
2701 if not vapp:
2702 self.logger.debug(
2703 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2704 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2705 undeploy_task = vapp.undeploy()
2706
2707 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2708 if result.get('status') == 'success':
2709 undeployed = True
2710 break
2711 else:
2712 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2713 time.sleep(INTERVAL_TIME)
2714
2715 wait_time += INTERVAL_TIME
2716
2717 if not undeployed:
2718 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2719
2720 # delete vapp
2721 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2722
2723 if vapp is not None:
2724 wait_time = 0
2725 result = False
2726
2727 while wait_time <= MAX_WAIT_TIME:
2728 vapp = VApp(self.client, resource=vapp_resource)
2729 if not vapp:
2730 self.logger.debug(
2731 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2732 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2733
2734 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2735
2736 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2737 if result.get('status') == 'success':
2738 break
2739 else:
2740 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2741 time.sleep(INTERVAL_TIME)
2742
2743 wait_time += INTERVAL_TIME
2744
2745 if result is None:
2746 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2747 else:
2748 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2749 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2750 catalog_list = self.get_image_list()
2751 try:
2752 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2753 if catalog_['name'] == config_drive_catalog_name][0]
2754 except IndexError:
2755 pass
2756 if config_drive_catalog_id:
2757 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2758 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2759 self.delete_image(config_drive_catalog_id)
2760 return vm__vim_uuid
2761 except Exception:
2762 self.logger.debug(traceback.format_exc())
2763 raise vimconn.VimConnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2764
2765 def refresh_vms_status(self, vm_list):
2766 """Get the status of the virtual machines and their interfaces/ports
2767 Params: the list of VM identifiers
2768 Returns a dictionary with:
2769 vm_id: #VIM id of this Virtual Machine
2770 status: #Mandatory. Text with one of:
2771 # DELETED (not found at vim)
2772 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2773 # OTHER (Vim reported other status not understood)
2774 # ERROR (VIM indicates an ERROR status)
2775 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2776 # CREATING (on building process), ERROR
2777 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2778 #
2779 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2780 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2781 interfaces:
2782 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2783 mac_address: #Text format XX:XX:XX:XX:XX:XX
2784 vim_net_id: #network id where this interface is connected
2785 vim_interface_id: #interface/port VIM id
2786 ip_address: #null, or text with IPv4, IPv6 address
2787 """
2788
2789 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2790
2791 _, vdc = self.get_vdc_details()
2792 if vdc is None:
2793 raise vimconn.VimConnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2794
2795 vms_dict = {}
2796 nsx_edge_list = []
2797 for vmuuid in vm_list:
2798 vapp_name = self.get_namebyvappid(vmuuid)
2799 if vapp_name is not None:
2800
2801 try:
2802 vm_pci_details = self.get_vm_pci_details(vmuuid)
2803 vdc_obj = VDC(self.client, href=vdc.get('href'))
2804 vapp_resource = vdc_obj.get_vapp(vapp_name)
2805 the_vapp = VApp(self.client, resource=vapp_resource)
2806
2807 vm_details = {}
2808 for vm in the_vapp.get_all_vms():
2809 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
2810 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2811 response = self.perform_request(req_type='GET',
2812 url=vm.get('href'),
2813 headers=headers)
2814
2815 if response.status_code != 200:
2816 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"
2817 "status code : {}".format(vm.get('href'),
2818 response.text,
2819 response.status_code))
2820 raise vimconn.VimConnException("refresh_vms_status : Failed to get VM details")
2821 xmlroot = XmlElementTree.fromstring(response.text)
2822
2823 result = response.text.replace("\n", " ")
2824 hdd_match = re.search(
2825 'vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=', result)
2826 if hdd_match:
2827 hdd_mb = hdd_match.group(1)
2828 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2829 cpus_match = re.search(
2830 '<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>', result)
2831 if cpus_match:
2832 cpus = cpus_match.group(1)
2833 vm_details['cpus'] = int(cpus) if cpus else None
2834 memory_mb = re.search(
2835 '<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>', result).group(1)
2836 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2837 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2838 vm_details['id'] = xmlroot.get('id')
2839 vm_details['name'] = xmlroot.get('name')
2840 vm_info = [vm_details]
2841 if vm_pci_details:
2842 vm_info[0].update(vm_pci_details)
2843
2844 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2845 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2846 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2847
2848 # get networks
2849 vm_ip = None
2850 vm_mac = None
2851 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>', result)
2852 for network in networks:
2853 mac_s = re.search('<MACAddress>(.*?)</MACAddress>', network)
2854 vm_mac = mac_s.group(1) if mac_s else None
2855 ip_s = re.search('<IpAddress>(.*?)</IpAddress>', network)
2856 vm_ip = ip_s.group(1) if ip_s else None
2857
2858 if vm_ip is None:
2859 if not nsx_edge_list:
2860 nsx_edge_list = self.get_edge_details()
2861 if nsx_edge_list is None:
2862 raise vimconn.VimConnException("refresh_vms_status:"
2863 "Failed to get edge details from NSX Manager")
2864 if vm_mac is not None:
2865 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2866
2867 net_s = re.search('network="(.*?)"', network)
2868 network_name = net_s.group(1) if net_s else None
2869
2870 vm_net_id = self.get_network_id_by_name(network_name)
2871 interface = {"mac_address": vm_mac,
2872 "vim_net_id": vm_net_id,
2873 "vim_interface_id": vm_net_id,
2874 "ip_address": vm_ip}
2875
2876 vm_dict["interfaces"].append(interface)
2877
2878 # add a vm to vm dict
2879 vms_dict.setdefault(vmuuid, vm_dict)
2880 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2881 except Exception as exp:
2882 self.logger.debug("Error in response {}".format(exp))
2883 self.logger.debug(traceback.format_exc())
2884
2885 return vms_dict
2886
2887 def get_edge_details(self):
2888 """Get the NSX edge list from NSX Manager
2889 Returns list of NSX edges
2890 """
2891 edge_list = []
2892 rheaders = {'Content-Type': 'application/xml'}
2893 nsx_api_url = '/api/4.0/edges'
2894
2895 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2896
2897 try:
2898 resp = requests.get(self.nsx_manager + nsx_api_url,
2899 auth=(self.nsx_user, self.nsx_password),
2900 verify=False, headers=rheaders)
2901 if resp.status_code == requests.codes.ok:
2902 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2903 for edge_pages in paged_Edge_List:
2904 if edge_pages.tag == 'edgePage':
2905 for edge_summary in edge_pages:
2906 if edge_summary.tag == 'pagingInfo':
2907 for element in edge_summary:
2908 if element.tag == 'totalCount' and element.text == '0':
2909 raise vimconn.VimConnException(
2910 "get_edge_details: No NSX edges details found: {}"
2911 .format(self.nsx_manager))
2912
2913 if edge_summary.tag == 'edgeSummary':
2914 for element in edge_summary:
2915 if element.tag == 'id':
2916 edge_list.append(element.text)
2917 else:
2918 raise vimconn.VimConnException("get_edge_details: No NSX edge details found: {}"
2919 .format(self.nsx_manager))
2920
2921 if not edge_list:
2922 raise vimconn.VimConnException("get_edge_details: "
2923 "No NSX edge details found: {}"
2924 .format(self.nsx_manager))
2925 else:
2926 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2927 return edge_list
2928 else:
2929 self.logger.debug("get_edge_details: "
2930 "Failed to get NSX edge details from NSX Manager: {}"
2931 .format(resp.content))
2932 return None
2933
2934 except Exception as exp:
2935 self.logger.debug("get_edge_details: "
2936 "Failed to get NSX edge details from NSX Manager: {}"
2937 .format(exp))
2938 raise vimconn.VimConnException("get_edge_details: "
2939 "Failed to get NSX edge details from NSX Manager: {}"
2940 .format(exp))
2941
2942 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2943 """Get IP address details from NSX edges, using the MAC address
2944 PARAMS: nsx_edges : List of NSX edges
2945 mac_address : Find IP address corresponding to this MAC address
2946 Returns: IP address corrresponding to the provided MAC address
2947 """
2948
2949 ip_addr = None
2950 rheaders = {'Content-Type': 'application/xml'}
2951
2952 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2953
2954 try:
2955 for edge in nsx_edges:
2956 nsx_api_url = '/api/4.0/edges/' + edge + '/dhcp/leaseInfo'
2957
2958 resp = requests.get(self.nsx_manager + nsx_api_url,
2959 auth=(self.nsx_user, self.nsx_password),
2960 verify=False, headers=rheaders)
2961
2962 if resp.status_code == requests.codes.ok:
2963 dhcp_leases = XmlElementTree.fromstring(resp.text)
2964 for child in dhcp_leases:
2965 if child.tag == 'dhcpLeaseInfo':
2966 dhcpLeaseInfo = child
2967 for leaseInfo in dhcpLeaseInfo:
2968 for elem in leaseInfo:
2969 if (elem.tag) == 'macAddress':
2970 edge_mac_addr = elem.text
2971 if (elem.tag) == 'ipAddress':
2972 ip_addr = elem.text
2973 if edge_mac_addr is not None:
2974 if edge_mac_addr == mac_address:
2975 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2976 .format(ip_addr, mac_address, edge))
2977 return ip_addr
2978 else:
2979 self.logger.debug("get_ipaddr_from_NSXedge: "
2980 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2981 .format(resp.content))
2982
2983 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2984 return None
2985
2986 except XmlElementTree.ParseError as Err:
2987 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2988
2989 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2990 """Send and action over a VM instance from VIM
2991 Returns the vm_id if the action was successfully sent to the VIM"""
2992
2993 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2994 if vm__vim_uuid is None or action_dict is None:
2995 raise vimconn.VimConnException("Invalid request. VM id or action is None.")
2996
2997 _, vdc = self.get_vdc_details()
2998 if vdc is None:
2999 raise vimconn.VimConnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3000
3001 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3002 if vapp_name is None:
3003 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3004 raise vimconn.VimConnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3005 else:
3006 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3007
3008 try:
3009 vdc_obj = VDC(self.client, href=vdc.get('href'))
3010 vapp_resource = vdc_obj.get_vapp(vapp_name)
3011 vapp = VApp(self.client, resource=vapp_resource)
3012 if "start" in action_dict:
3013 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3014 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3015 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3016 self.instance_actions_result("start", result, vapp_name)
3017 elif "rebuild" in action_dict:
3018 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3019 rebuild_task = vapp.deploy(power_on=True)
3020 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3021 self.instance_actions_result("rebuild", result, vapp_name)
3022 elif "pause" in action_dict:
3023 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3024 pause_task = vapp.undeploy(action='suspend')
3025 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3026 self.instance_actions_result("pause", result, vapp_name)
3027 elif "resume" in action_dict:
3028 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3029 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3030 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3031 self.instance_actions_result("resume", result, vapp_name)
3032 elif "shutoff" in action_dict or "shutdown" in action_dict:
3033 action_name, _ = list(action_dict.items())[0]
3034 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3035 shutdown_task = vapp.shutdown()
3036 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3037 if action_name == "shutdown":
3038 self.instance_actions_result("shutdown", result, vapp_name)
3039 else:
3040 self.instance_actions_result("shutoff", result, vapp_name)
3041 elif "forceOff" in action_dict:
3042 result = vapp.undeploy(action='powerOff')
3043 self.instance_actions_result("forceOff", result, vapp_name)
3044 elif "reboot" in action_dict:
3045 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3046 reboot_task = vapp.reboot()
3047 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3048 else:
3049 raise vimconn.VimConnException(
3050 "action_vminstance: Invalid action {} or action is None.".format(action_dict))
3051 return vm__vim_uuid
3052 except Exception as exp:
3053 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3054 raise vimconn.VimConnException("action_vminstance: Failed with Exception {}".format(exp))
3055
3056 def instance_actions_result(self, action, result, vapp_name):
3057 if result.get('status') == 'success':
3058 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3059 else:
3060 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3061
3062 def get_vminstance_console(self, vm_id, console_type="novnc"):
3063 """
3064 Get a console for the virtual machine
3065 Params:
3066 vm_id: uuid of the VM
3067 console_type, can be:
3068 "novnc" (by default), "xvpvnc" for VNC types,
3069 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3070 Returns dict with the console parameters:
3071 protocol: ssh, ftp, http, https, ...
3072 server: usually ip address
3073 port: the http, ssh, ... port
3074 suffix: extra text, e.g. the http path and query string
3075 """
3076 console_dict = {}
3077
3078 if console_type is None or console_type == 'novnc':
3079
3080 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3081
3082 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3083 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3084 response = self.perform_request(req_type='POST',
3085 url=url_rest_call,
3086 headers=headers)
3087
3088 if response.status_code == 403:
3089 response = self.retry_rest('GET', url_rest_call)
3090
3091 if response.status_code != 200:
3092 self.logger.error("REST call {} failed reason : {}"
3093 "status code : {}".format(url_rest_call,
3094 response.text,
3095 response.status_code))
3096 raise vimconn.VimConnException("get_vminstance_console : Failed to get "
3097 "VM Mks ticket details")
3098 s = re.search("<Host>(.*?)</Host>", response.text)
3099 console_dict['server'] = s.group(1) if s else None
3100 s1 = re.search("<Port>(\d+)</Port>", response.text)
3101 console_dict['port'] = s1.group(1) if s1 else None
3102
3103 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3104
3105 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3106 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3107 response = self.perform_request(req_type='POST',
3108 url=url_rest_call,
3109 headers=headers)
3110
3111 if response.status_code == 403:
3112 response = self.retry_rest('GET', url_rest_call)
3113
3114 if response.status_code != 200:
3115 self.logger.error("REST call {} failed reason : {}"
3116 "status code : {}".format(url_rest_call,
3117 response.text,
3118 response.status_code))
3119 raise vimconn.VimConnException("get_vminstance_console : Failed to get "
3120 "VM console details")
3121 s = re.search(">.*?/(vm-\d+.*)</", response.text)
3122 console_dict['suffix'] = s.group(1) if s else None
3123 console_dict['protocol'] = "https"
3124
3125 return console_dict
3126
3127 # NOT USED METHODS in current version
3128
3129 def host_vim2gui(self, host, server_dict):
3130 """Transform host dictionary from VIM format to GUI format,
3131 and append to the server_dict
3132 """
3133 raise vimconn.VimConnNotImplemented("Should have implemented this")
3134
3135 def get_hosts_info(self):
3136 """Get the information of deployed hosts
3137 Returns the hosts content"""
3138 raise vimconn.VimConnNotImplemented("Should have implemented this")
3139
3140 def get_hosts(self, vim_tenant):
3141 """Get the hosts and deployed instances
3142 Returns the hosts content"""
3143 raise vimconn.VimConnNotImplemented("Should have implemented this")
3144
3145 def get_processor_rankings(self):
3146 """Get the processor rankings in the VIM database"""
3147 raise vimconn.VimConnNotImplemented("Should have implemented this")
3148
3149 def new_host(self, host_data):
3150 """Adds a new host to VIM"""
3151 '''Returns status code of the VIM response'''
3152 raise vimconn.VimConnNotImplemented("Should have implemented this")
3153
3154 def new_external_port(self, port_data):
3155 """Adds a external port to VIM"""
3156 '''Returns the port identifier'''
3157 raise vimconn.VimConnNotImplemented("Should have implemented this")
3158
3159 def new_external_network(self, net_name, net_type):
3160 """Adds a external network to VIM (shared)"""
3161 '''Returns the network identifier'''
3162 raise vimconn.VimConnNotImplemented("Should have implemented this")
3163
3164 def connect_port_network(self, port_id, network_id, admin=False):
3165 """Connects a external port to a network"""
3166 '''Returns status code of the VIM response'''
3167 raise vimconn.VimConnNotImplemented("Should have implemented this")
3168
3169 def new_vminstancefromJSON(self, vm_data):
3170 """Adds a VM instance to VIM"""
3171 '''Returns the instance identifier'''
3172 raise vimconn.VimConnNotImplemented("Should have implemented this")
3173
3174 def get_network_name_by_id(self, network_uuid=None):
3175 """Method gets vcloud director network named based on supplied uuid.
3176
3177 Args:
3178 network_uuid: network_id
3179
3180 Returns:
3181 The return network name.
3182 """
3183
3184 if not network_uuid:
3185 return None
3186
3187 try:
3188 org_dict = self.get_org(self.org_uuid)
3189 if 'networks' in org_dict:
3190 org_network_dict = org_dict['networks']
3191 for net_uuid in org_network_dict:
3192 if net_uuid == network_uuid:
3193 return org_network_dict[net_uuid]
3194 except Exception:
3195 self.logger.debug("Exception in get_network_name_by_id")
3196 self.logger.debug(traceback.format_exc())
3197
3198 return None
3199
3200 def get_network_id_by_name(self, network_name=None):
3201 """Method gets vcloud director network uuid based on supplied name.
3202
3203 Args:
3204 network_name: network_name
3205 Returns:
3206 The return network uuid.
3207 network_uuid: network_id
3208 """
3209 if not network_name:
3210 self.logger.debug("get_network_id_by_name() : Network name is empty")
3211 return None
3212
3213 try:
3214 org_dict = self.get_org(self.org_uuid)
3215 if org_dict and 'networks' in org_dict:
3216 org_network_dict = org_dict['networks']
3217 for net_uuid, net_name in org_network_dict.items():
3218 if net_name == network_name:
3219 return net_uuid
3220
3221 except KeyError as exp:
3222 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3223
3224 return None
3225
3226 def get_physical_network_by_name(self, physical_network_name):
3227 '''
3228 Methos returns uuid of physical network which passed
3229 Args:
3230 physical_network_name: physical network name
3231 Returns:
3232 UUID of physical_network_name
3233 '''
3234 try:
3235 client_as_admin = self.connect_as_admin()
3236 if not client_as_admin:
3237 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
3238 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3239 vm_list_rest_call = ''.join(url_list)
3240
3241 if client_as_admin._session:
3242 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3243 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3244
3245 response = self.perform_request(req_type='GET',
3246 url=vm_list_rest_call,
3247 headers=headers)
3248
3249 provider_network = None
3250 available_network = None
3251 # add_vdc_rest_url = None
3252
3253 if response.status_code != requests.codes.ok:
3254 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3255 response.status_code))
3256 return None
3257 else:
3258 try:
3259 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3260 for child in vm_list_xmlroot:
3261
3262 if child.tag.split("}")[1] == 'ProviderVdcReference':
3263 provider_network = child.attrib.get('href')
3264 # application/vnd.vmware.admin.providervdc+xml
3265 if child.tag.split("}")[1] == 'Link':
3266 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3267 and child.attrib.get('rel') == 'add':
3268 child.attrib.get('href')
3269 except Exception:
3270 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3271 self.logger.debug("Respond body {}".format(response.text))
3272 return None
3273
3274 # find pvdc provided available network
3275 response = self.perform_request(req_type='GET',
3276 url=provider_network,
3277 headers=headers)
3278
3279 if response.status_code != requests.codes.ok:
3280 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3281 response.status_code))
3282 return None
3283
3284 try:
3285 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3286 for child in vm_list_xmlroot.iter():
3287 if child.tag.split("}")[1] == 'AvailableNetworks':
3288 for networks in child.iter():
3289 if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
3290 if networks.attrib.get('name') == physical_network_name:
3291 network_url = networks.attrib.get('href')
3292 available_network = network_url[network_url.rindex('/') + 1:]
3293 break
3294 except Exception as e:
3295 return None
3296
3297 return available_network
3298 except Exception as e:
3299 self.logger.error("Error while getting physical network: {}".format(e))
3300
3301 def list_org_action(self):
3302 """
3303 Method leverages vCloud director and query for available organization for particular user
3304
3305 Args:
3306 vca - is active VCA connection.
3307 vdc_name - is a vdc name that will be used to query vms action
3308
3309 Returns:
3310 The return XML respond
3311 """
3312 url_list = [self.url, '/api/org']
3313 vm_list_rest_call = ''.join(url_list)
3314
3315 if self.client._session:
3316 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3317 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3318
3319 response = self.perform_request(req_type='GET',
3320 url=vm_list_rest_call,
3321 headers=headers)
3322
3323 if response.status_code == 403:
3324 response = self.retry_rest('GET', vm_list_rest_call)
3325
3326 if response.status_code == requests.codes.ok:
3327 return response.text
3328
3329 return None
3330
3331 def get_org_action(self, org_uuid=None):
3332 """
3333 Method leverages vCloud director and retrieve available object for organization.
3334
3335 Args:
3336 org_uuid - vCD organization uuid
3337 self.client - is active connection.
3338
3339 Returns:
3340 The return XML respond
3341 """
3342
3343 if org_uuid is None:
3344 return None
3345
3346 url_list = [self.url, '/api/org/', org_uuid]
3347 vm_list_rest_call = ''.join(url_list)
3348
3349 if self.client._session:
3350 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3351 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3352
3353 # response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3354 response = self.perform_request(req_type='GET',
3355 url=vm_list_rest_call,
3356 headers=headers)
3357 if response.status_code == 403:
3358 response = self.retry_rest('GET', vm_list_rest_call)
3359
3360 if response.status_code == requests.codes.ok:
3361 return response.text
3362 return None
3363
3364 def get_org(self, org_uuid=None):
3365 """
3366 Method retrieves available organization in vCloud Director
3367
3368 Args:
3369 org_uuid - is a organization uuid.
3370
3371 Returns:
3372 The return dictionary with following key
3373 "network" - for network list under the org
3374 "catalogs" - for network list under the org
3375 "vdcs" - for vdc list under org
3376 """
3377
3378 org_dict = {}
3379
3380 if org_uuid is None:
3381 return org_dict
3382
3383 content = self.get_org_action(org_uuid=org_uuid)
3384 try:
3385 vdc_list = {}
3386 network_list = {}
3387 catalog_list = {}
3388 vm_list_xmlroot = XmlElementTree.fromstring(content)
3389 for child in vm_list_xmlroot:
3390 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3391 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3392 org_dict['vdcs'] = vdc_list
3393 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3394 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3395 org_dict['networks'] = network_list
3396 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3397 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3398 org_dict['catalogs'] = catalog_list
3399 except Exception:
3400 pass
3401
3402 return org_dict
3403
3404 def get_org_list(self):
3405 """
3406 Method retrieves available organization in vCloud Director
3407
3408 Args:
3409 vca - is active VCA connection.
3410
3411 Returns:
3412 The return dictionary and key for each entry VDC UUID
3413 """
3414
3415 org_dict = {}
3416
3417 content = self.list_org_action()
3418 try:
3419 vm_list_xmlroot = XmlElementTree.fromstring(content)
3420 for vm_xml in vm_list_xmlroot:
3421 if vm_xml.tag.split("}")[1] == 'Org':
3422 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3423 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3424 except Exception:
3425 pass
3426
3427 return org_dict
3428
3429 def vms_view_action(self, vdc_name=None):
3430 """ Method leverages vCloud director vms query call
3431
3432 Args:
3433 vca - is active VCA connection.
3434 vdc_name - is a vdc name that will be used to query vms action
3435
3436 Returns:
3437 The return XML respond
3438 """
3439 vca = self.connect()
3440 if vdc_name is None:
3441 return None
3442
3443 url_list = [vca.host, '/api/vms/query']
3444 vm_list_rest_call = ''.join(url_list)
3445
3446 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3447 refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
3448 ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3449 if len(refs) == 1:
3450 response = Http.get(url=vm_list_rest_call,
3451 headers=vca.vcloud_session.get_vcloud_headers(),
3452 verify=vca.verify,
3453 logger=vca.logger)
3454 if response.status_code == requests.codes.ok:
3455 return response.text
3456
3457 return None
3458
3459 def get_vapp_list(self, vdc_name=None):
3460 """
3461 Method retrieves vApp list deployed vCloud director and returns a dictionary
3462 contains a list of all vapp deployed for queried VDC.
3463 The key for a dictionary is vApp UUID
3464
3465
3466 Args:
3467 vca - is active VCA connection.
3468 vdc_name - is a vdc name that will be used to query vms action
3469
3470 Returns:
3471 The return dictionary and key for each entry vapp UUID
3472 """
3473
3474 vapp_dict = {}
3475 if vdc_name is None:
3476 return vapp_dict
3477
3478 content = self.vms_view_action(vdc_name=vdc_name)
3479 try:
3480 vm_list_xmlroot = XmlElementTree.fromstring(content)
3481 for vm_xml in vm_list_xmlroot:
3482 if vm_xml.tag.split("}")[1] == 'VMRecord':
3483 if vm_xml.attrib['isVAppTemplate'] == 'true':
3484 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3485 if 'vappTemplate-' in rawuuid[0]:
3486 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3487 # vm and use raw UUID as key
3488 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3489 except Exception:
3490 pass
3491
3492 return vapp_dict
3493
3494 def get_vm_list(self, vdc_name=None):
3495 """
3496 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3497 contains a list of all VM's deployed for queried VDC.
3498 The key for a dictionary is VM UUID
3499
3500
3501 Args:
3502 vca - is active VCA connection.
3503 vdc_name - is a vdc name that will be used to query vms action
3504
3505 Returns:
3506 The return dictionary and key for each entry vapp UUID
3507 """
3508 vm_dict = {}
3509
3510 if vdc_name is None:
3511 return vm_dict
3512
3513 content = self.vms_view_action(vdc_name=vdc_name)
3514 try:
3515 vm_list_xmlroot = XmlElementTree.fromstring(content)
3516 for vm_xml in vm_list_xmlroot:
3517 if vm_xml.tag.split("}")[1] == 'VMRecord':
3518 if vm_xml.attrib['isVAppTemplate'] == 'false':
3519 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3520 if 'vm-' in rawuuid[0]:
3521 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3522 # vm and use raw UUID as key
3523 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3524 except Exception:
3525 pass
3526
3527 return vm_dict
3528
3529 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3530 """
3531 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3532 contains a list of all VM's deployed for queried VDC.
3533 The key for a dictionary is VM UUID
3534
3535
3536 Args:
3537 vca - is active VCA connection.
3538 vdc_name - is a vdc name that will be used to query vms action
3539
3540 Returns:
3541 The return dictionary and key for each entry vapp UUID
3542 """
3543 vm_dict = {}
3544 vca = self.connect()
3545 if not vca:
3546 raise vimconn.VimConnConnectionException("self.connect() is failed")
3547
3548 if vdc_name is None:
3549 return vm_dict
3550
3551 content = self.vms_view_action(vdc_name=vdc_name)
3552 try:
3553 vm_list_xmlroot = XmlElementTree.fromstring(content)
3554 for vm_xml in vm_list_xmlroot:
3555 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3556 # lookup done by UUID
3557 if isuuid:
3558 if vapp_name in vm_xml.attrib['container']:
3559 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3560 if 'vm-' in rawuuid[0]:
3561 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3562 break
3563 # lookup done by Name
3564 else:
3565 if vapp_name in vm_xml.attrib['name']:
3566 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3567 if 'vm-' in rawuuid[0]:
3568 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3569 break
3570 except Exception:
3571 pass
3572
3573 return vm_dict
3574
3575 def get_network_action(self, network_uuid=None):
3576 """
3577 Method leverages vCloud director and query network based on network uuid
3578
3579 Args:
3580 vca - is active VCA connection.
3581 network_uuid - is a network uuid
3582
3583 Returns:
3584 The return XML respond
3585 """
3586
3587 if network_uuid is None:
3588 return None
3589
3590 url_list = [self.url, '/api/network/', network_uuid]
3591 vm_list_rest_call = ''.join(url_list)
3592
3593 if self.client._session:
3594 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3595 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3596
3597 response = self.perform_request(req_type='GET',
3598 url=vm_list_rest_call,
3599 headers=headers)
3600 # Retry login if session expired & retry sending request
3601 if response.status_code == 403:
3602 response = self.retry_rest('GET', vm_list_rest_call)
3603
3604 if response.status_code == requests.codes.ok:
3605 return response.text
3606
3607 return None
3608
3609 def get_vcd_network(self, network_uuid=None):
3610 """
3611 Method retrieves available network from vCloud Director
3612
3613 Args:
3614 network_uuid - is VCD network UUID
3615
3616 Each element serialized as key : value pair
3617
3618 Following keys available for access. network_configuration['Gateway'}
3619 <Configuration>
3620 <IpScopes>
3621 <IpScope>
3622 <IsInherited>true</IsInherited>
3623 <Gateway>172.16.252.100</Gateway>
3624 <Netmask>255.255.255.0</Netmask>
3625 <Dns1>172.16.254.201</Dns1>
3626 <Dns2>172.16.254.202</Dns2>
3627 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3628 <IsEnabled>true</IsEnabled>
3629 <IpRanges>
3630 <IpRange>
3631 <StartAddress>172.16.252.1</StartAddress>
3632 <EndAddress>172.16.252.99</EndAddress>
3633 </IpRange>
3634 </IpRanges>
3635 </IpScope>
3636 </IpScopes>
3637 <FenceMode>bridged</FenceMode>
3638
3639 Returns:
3640 The return dictionary and key for each entry vapp UUID
3641 """
3642
3643 network_configuration = {}
3644 if network_uuid is None:
3645 return network_uuid
3646
3647 try:
3648 content = self.get_network_action(network_uuid=network_uuid)
3649 if content is not None:
3650 vm_list_xmlroot = XmlElementTree.fromstring(content)
3651
3652 network_configuration['status'] = vm_list_xmlroot.get("status")
3653 network_configuration['name'] = vm_list_xmlroot.get("name")
3654 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3655
3656 for child in vm_list_xmlroot:
3657 if child.tag.split("}")[1] == 'IsShared':
3658 network_configuration['isShared'] = child.text.strip()
3659 if child.tag.split("}")[1] == 'Configuration':
3660 for configuration in child.iter():
3661 tagKey = configuration.tag.split("}")[1].strip()
3662 if tagKey != "":
3663 network_configuration[tagKey] = configuration.text.strip()
3664 except Exception as exp:
3665 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3666 raise vimconn.VimConnException("get_vcd_network: Failed with Exception {}".format(exp))
3667
3668 return network_configuration
3669
3670 def delete_network_action(self, network_uuid=None):
3671 """
3672 Method delete given network from vCloud director
3673
3674 Args:
3675 network_uuid - is a network uuid that client wish to delete
3676
3677 Returns:
3678 The return None or XML respond or false
3679 """
3680 client = self.connect_as_admin()
3681 if not client:
3682 raise vimconn.VimConnConnectionException("Failed to connect vCD as admin")
3683 if network_uuid is None:
3684 return False
3685
3686 url_list = [self.url, '/api/admin/network/', network_uuid]
3687 vm_list_rest_call = ''.join(url_list)
3688
3689 if client._session:
3690 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3691 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3692 response = self.perform_request(req_type='DELETE',
3693 url=vm_list_rest_call,
3694 headers=headers)
3695 if response.status_code == 202:
3696 return True
3697
3698 return False
3699
3700 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3701 ip_profile=None, isshared='true'):
3702 """
3703 Method create network in vCloud director
3704
3705 Args:
3706 network_name - is network name to be created.
3707 net_type - can be 'bridge','data','ptp','mgmt'.
3708 ip_profile is a dict containing the IP parameters of the network
3709 isshared - is a boolean
3710 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3711 It optional attribute. by default if no parent network indicate the first available will be used.
3712
3713 Returns:
3714 The return network uuid or return None
3715 """
3716
3717 new_network_name = [network_name, '-', str(uuid.uuid4())]
3718 content = self.create_network_rest(network_name=''.join(new_network_name),
3719 ip_profile=ip_profile,
3720 net_type=net_type,
3721 parent_network_uuid=parent_network_uuid,
3722 isshared=isshared)
3723 if content is None:
3724 self.logger.debug("Failed create network {}.".format(network_name))
3725 return None
3726
3727 try:
3728 vm_list_xmlroot = XmlElementTree.fromstring(content)
3729 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3730 if len(vcd_uuid) == 4:
3731 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3732 return vcd_uuid[3]
3733 except Exception:
3734 self.logger.debug("Failed create network {}".format(network_name))
3735 return None
3736
3737 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3738 ip_profile=None, isshared='true'):
3739 """
3740 Method create network in vCloud director
3741
3742 Args:
3743 network_name - is network name to be created.
3744 net_type - can be 'bridge','data','ptp','mgmt'.
3745 ip_profile is a dict containing the IP parameters of the network
3746 isshared - is a boolean
3747 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3748 It optional attribute. by default if no parent network indicate the first available will be used.
3749
3750 Returns:
3751 The return network uuid or return None
3752 """
3753 client_as_admin = self.connect_as_admin()
3754 if not client_as_admin:
3755 raise vimconn.VimConnConnectionException("Failed to connect vCD.")
3756 if network_name is None:
3757 return None
3758
3759 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3760 vm_list_rest_call = ''.join(url_list)
3761
3762 if client_as_admin._session:
3763 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
3764 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3765
3766 response = self.perform_request(req_type='GET',
3767 url=vm_list_rest_call,
3768 headers=headers)
3769
3770 provider_network = None
3771 available_networks = None
3772 add_vdc_rest_url = None
3773
3774 if response.status_code != requests.codes.ok:
3775 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3776 response.status_code))
3777 return None
3778 else:
3779 try:
3780 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3781 for child in vm_list_xmlroot:
3782
3783 if child.tag.split("}")[1] == 'ProviderVdcReference':
3784 provider_network = child.attrib.get('href')
3785 # application/vnd.vmware.admin.providervdc+xml
3786 if child.tag.split("}")[1] == 'Link':
3787 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3788 and child.attrib.get('rel') == 'add':
3789 add_vdc_rest_url = child.attrib.get('href')
3790 except Exception:
3791 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3792 self.logger.debug("Respond body {}".format(response.text))
3793 return None
3794
3795 # find pvdc provided available network
3796 response = self.perform_request(req_type='GET',
3797 url=provider_network,
3798 headers=headers)
3799
3800 if response.status_code != requests.codes.ok:
3801 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3802 response.status_code))
3803 return None
3804
3805 if parent_network_uuid is None:
3806 try:
3807 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3808 for child in vm_list_xmlroot.iter():
3809 if child.tag.split("}")[1] == 'AvailableNetworks':
3810 for networks in child.iter():
3811 # application/vnd.vmware.admin.network+xml
3812 if networks.attrib.get('href') is not None:
3813 available_networks = networks.attrib.get('href')
3814 break
3815 except Exception:
3816 return None
3817
3818 try:
3819 # Configure IP profile of the network
3820 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3821
3822 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3823 subnet_rand = random.randint(0, 255)
3824 ip_base = "192.168.{}.".format(subnet_rand)
3825 ip_profile['subnet_address'] = ip_base + "0/24"
3826 else:
3827 ip_base = ip_profile['subnet_address'].rsplit('.', 1)[0] + '.'
3828
3829 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3830 ip_profile['gateway_address'] = ip_base + "1"
3831 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3832 ip_profile['dhcp_count'] = DEFAULT_IP_PROFILE['dhcp_count']
3833 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3834 ip_profile['dhcp_enabled'] = DEFAULT_IP_PROFILE['dhcp_enabled']
3835 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3836 ip_profile['dhcp_start_address'] = ip_base + "3"
3837 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3838 ip_profile['ip_version'] = DEFAULT_IP_PROFILE['ip_version']
3839 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3840 ip_profile['dns_address'] = ip_base + "2"
3841
3842 gateway_address = ip_profile['gateway_address']
3843 dhcp_count = int(ip_profile['dhcp_count'])
3844 subnet_address = self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3845
3846 if ip_profile['dhcp_enabled'] is True:
3847 dhcp_enabled = 'true'
3848 else:
3849 dhcp_enabled = 'false'
3850 dhcp_start_address = ip_profile['dhcp_start_address']
3851
3852 # derive dhcp_end_address from dhcp_start_address & dhcp_count
3853 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3854 end_ip_int += dhcp_count - 1
3855 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3856
3857 # ip_version = ip_profile['ip_version']
3858 dns_address = ip_profile['dns_address']
3859 except KeyError as exp:
3860 self.logger.debug("Create Network REST: Key error {}".format(exp))
3861 raise vimconn.VimConnException("Create Network REST: Key error{}".format(exp))
3862
3863 # either use client provided UUID or search for a first available
3864 # if both are not defined we return none
3865 if parent_network_uuid is not None:
3866 provider_network = None
3867 available_networks = None
3868 add_vdc_rest_url = None
3869
3870 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3871 add_vdc_rest_url = ''.join(url_list)
3872
3873 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3874 available_networks = ''.join(url_list)
3875
3876 # Creating all networks as Direct Org VDC type networks.
3877 # Unused in case of Underlay (data/ptp) network interface.
3878 fence_mode = "isolated"
3879 is_inherited = 'false'
3880 dns_list = dns_address.split(";")
3881 dns1 = dns_list[0]
3882 dns2_text = ""
3883 if len(dns_list) >= 2:
3884 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3885 if net_type == "isolated":
3886 fence_mode = "isolated"
3887 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3888 <Description>Openmano created</Description>
3889 <Configuration>
3890 <IpScopes>
3891 <IpScope>
3892 <IsInherited>{1:s}</IsInherited>
3893 <Gateway>{2:s}</Gateway>
3894 <Netmask>{3:s}</Netmask>
3895 <Dns1>{4:s}</Dns1>{5:s}
3896 <IsEnabled>{6:s}</IsEnabled>
3897 <IpRanges>
3898 <IpRange>
3899 <StartAddress>{7:s}</StartAddress>
3900 <EndAddress>{8:s}</EndAddress>
3901 </IpRange>
3902 </IpRanges>
3903 </IpScope>
3904 </IpScopes>
3905 <FenceMode>{9:s}</FenceMode>
3906 </Configuration>
3907 <IsShared>{10:s}</IsShared>
3908 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3909 subnet_address, dns1, dns2_text, dhcp_enabled,
3910 dhcp_start_address, dhcp_end_address,
3911 fence_mode, isshared)
3912 else:
3913 fence_mode = "bridged"
3914 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3915 <Description>Openmano created</Description>
3916 <Configuration>
3917 <IpScopes>
3918 <IpScope>
3919 <IsInherited>{1:s}</IsInherited>
3920 <Gateway>{2:s}</Gateway>
3921 <Netmask>{3:s}</Netmask>
3922 <Dns1>{4:s}</Dns1>{5:s}
3923 <IsEnabled>{6:s}</IsEnabled>
3924 <IpRanges>
3925 <IpRange>
3926 <StartAddress>{7:s}</StartAddress>
3927 <EndAddress>{8:s}</EndAddress>
3928 </IpRange>
3929 </IpRanges>
3930 </IpScope>
3931 </IpScopes>
3932 <ParentNetwork href="{9:s}"/>
3933 <FenceMode>{10:s}</FenceMode>
3934 </Configuration>
3935 <IsShared>{11:s}</IsShared>
3936 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3937 subnet_address, dns1, dns2_text, dhcp_enabled,
3938 dhcp_start_address, dhcp_end_address, available_networks,
3939 fence_mode, isshared)
3940
3941 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3942 try:
3943 response = self.perform_request(req_type='POST',
3944 url=add_vdc_rest_url,
3945 headers=headers,
3946 data=data)
3947
3948 if response.status_code != 201:
3949 self.logger.debug("Create Network POST REST API call failed. "
3950 "Return status code {}, response.text: {}"
3951 .format(response.status_code, response.text))
3952 else:
3953 network_task = self.get_task_from_response(response.text)
3954 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3955 time.sleep(5)
3956 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3957 if result.get('status') == 'success':
3958 return response.text
3959 else:
3960 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3961 .format(response.text))
3962 except Exception as exp:
3963 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3964
3965 return None
3966
3967 def convert_cidr_to_netmask(self, cidr_ip=None):
3968 """
3969 Method sets convert CIDR netmask address to normal IP format
3970 Args:
3971 cidr_ip : CIDR IP address
3972 Returns:
3973 netmask : Converted netmask
3974 """
3975 if cidr_ip is not None:
3976 if '/' in cidr_ip:
3977 _, net_bits = cidr_ip.split('/')
3978 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3979 else:
3980 netmask = cidr_ip
3981 return netmask
3982 return None
3983
3984 def get_provider_rest(self, vca=None):
3985 """
3986 Method gets provider vdc view from vcloud director
3987
3988 Args:
3989 network_name - is network name to be created.
3990 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3991 It optional attribute. by default if no parent network indicate the first available will be used.
3992
3993 Returns:
3994 The return xml content of respond or None
3995 """
3996
3997 url_list = [self.url, '/api/admin']
3998 if vca:
3999 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4000 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4001 response = self.perform_request(req_type='GET',
4002 url=''.join(url_list),
4003 headers=headers)
4004
4005 if response.status_code == requests.codes.ok:
4006 return response.text
4007 return None
4008
4009 def create_vdc(self, vdc_name=None):
4010
4011 vdc_dict = {}
4012
4013 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4014 if xml_content is not None:
4015 try:
4016 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4017 for child in task_resp_xmlroot:
4018 if child.tag.split("}")[1] == 'Owner':
4019 vdc_id = child.attrib.get('href').split("/")[-1]
4020 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
4021 return vdc_dict
4022 except Exception:
4023 self.logger.debug("Respond body {}".format(xml_content))
4024
4025 return None
4026
4027 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4028 """
4029 Method create vdc in vCloud director based on VDC template.
4030 it uses pre-defined template.
4031
4032 Args:
4033 vdc_name - name of a new vdc.
4034
4035 Returns:
4036 The return xml content of respond or None
4037 """
4038 # pre-requesite atleast one vdc template should be available in vCD
4039 self.logger.info("Creating new vdc {}".format(vdc_name))
4040 vca = self.connect_as_admin()
4041 if not vca:
4042 raise vimconn.VimConnConnectionException("Failed to connect vCD")
4043 if vdc_name is None:
4044 return None
4045
4046 url_list = [self.url, '/api/vdcTemplates']
4047 vm_list_rest_call = ''.join(url_list)
4048
4049 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4050 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4051 response = self.perform_request(req_type='GET',
4052 url=vm_list_rest_call,
4053 headers=headers)
4054
4055 # container url to a template
4056 vdc_template_ref = None
4057 try:
4058 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4059 for child in vm_list_xmlroot:
4060 # application/vnd.vmware.admin.providervdc+xml
4061 # we need find a template from witch we instantiate VDC
4062 if child.tag.split("}")[1] == 'VdcTemplate':
4063 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4064 vdc_template_ref = child.attrib.get('href')
4065 except Exception:
4066 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4067 self.logger.debug("Respond body {}".format(response.text))
4068 return None
4069
4070 # if we didn't found required pre defined template we return None
4071 if vdc_template_ref is None:
4072 return None
4073
4074 try:
4075 # instantiate vdc
4076 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4077 vm_list_rest_call = ''.join(url_list)
4078 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4079 <Source href="{1:s}"></Source>
4080 <Description>opnemano</Description>
4081 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4082
4083 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4084
4085 response = self.perform_request(req_type='POST',
4086 url=vm_list_rest_call,
4087 headers=headers,
4088 data=data)
4089
4090 vdc_task = self.get_task_from_response(response.text)
4091 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4092
4093 # if we all ok we respond with content otherwise by default None
4094 if response.status_code >= 200 and response.status_code < 300:
4095 return response.text
4096 return None
4097 except Exception:
4098 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4099 self.logger.debug("Respond body {}".format(response.text))
4100
4101 return None
4102
4103 def create_vdc_rest(self, vdc_name=None):
4104 """
4105 Method create network in vCloud director
4106
4107 Args:
4108 vdc_name - vdc name to be created
4109 Returns:
4110 The return response
4111 """
4112
4113 self.logger.info("Creating new vdc {}".format(vdc_name))
4114
4115 vca = self.connect_as_admin()
4116 if not vca:
4117 raise vimconn.VimConnConnectionException("Failed to connect vCD")
4118 if vdc_name is None:
4119 return None
4120
4121 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4122 vm_list_rest_call = ''.join(url_list)
4123
4124 if vca._session:
4125 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4126 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4127 response = self.perform_request(req_type='GET',
4128 url=vm_list_rest_call,
4129 headers=headers)
4130
4131 provider_vdc_ref = None
4132 add_vdc_rest_url = None
4133 # available_networks = None
4134
4135 if response.status_code != requests.codes.ok:
4136 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4137 response.status_code))
4138 return None
4139 else:
4140 try:
4141 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4142 for child in vm_list_xmlroot:
4143 # application/vnd.vmware.admin.providervdc+xml
4144 if child.tag.split("}")[1] == 'Link':
4145 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4146 and child.attrib.get('rel') == 'add':
4147 add_vdc_rest_url = child.attrib.get('href')
4148 except Exception:
4149 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4150 self.logger.debug("Respond body {}".format(response.text))
4151 return None
4152
4153 response = self.get_provider_rest(vca=vca)
4154 try:
4155 vm_list_xmlroot = XmlElementTree.fromstring(response)
4156 for child in vm_list_xmlroot:
4157 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4158 for sub_child in child:
4159 provider_vdc_ref = sub_child.attrib.get('href')
4160 except Exception:
4161 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4162 self.logger.debug("Respond body {}".format(response))
4163 return None
4164
4165 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4166 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4167 <AllocationModel>ReservationPool</AllocationModel>
4168 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4169 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4170 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4171 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4172 <ProviderVdcReference
4173 name="Main Provider"
4174 href="{2:s}" />
4175 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4176 escape(vdc_name),
4177 provider_vdc_ref)
4178
4179 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4180
4181 response = self.perform_request(req_type='POST',
4182 url=add_vdc_rest_url,
4183 headers=headers,
4184 data=data)
4185
4186 # if we all ok we respond with content otherwise by default None
4187 if response.status_code == 201:
4188 return response.text
4189 return None
4190
4191 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4192 """
4193 Method retrieve vapp detail from vCloud director
4194
4195 Args:
4196 vapp_uuid - is vapp identifier.
4197
4198 Returns:
4199 The return network uuid or return None
4200 """
4201
4202 parsed_respond = {}
4203 vca = None
4204
4205 if need_admin_access:
4206 vca = self.connect_as_admin()
4207 else:
4208 vca = self.client
4209
4210 if not vca:
4211 raise vimconn.VimConnConnectionException("Failed to connect vCD")
4212 if vapp_uuid is None:
4213 return None
4214
4215 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4216 get_vapp_restcall = ''.join(url_list)
4217
4218 if vca._session:
4219 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4220 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4221 response = self.perform_request(req_type='GET',
4222 url=get_vapp_restcall,
4223 headers=headers)
4224
4225 if response.status_code == 403:
4226 if need_admin_access is False:
4227 response = self.retry_rest('GET', get_vapp_restcall)
4228
4229 if response.status_code != requests.codes.ok:
4230 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4231 response.status_code))
4232 return parsed_respond
4233
4234 try:
4235 xmlroot_respond = XmlElementTree.fromstring(response.text)
4236 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4237
4238 namespaces = {
4239 "vssd": "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData",
4240 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4241 'vmw': 'http://www.vmware.com/schema/ovf',
4242 'vm': 'http://www.vmware.com/vcloud/v1.5',
4243 'rasd': "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4244 "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
4245 "xmlns": "http://www.vmware.com/vcloud/v1.5"
4246 }
4247
4248 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4249 if created_section is not None:
4250 parsed_respond['created'] = created_section.text
4251
4252 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4253 if network_section is not None and 'networkName' in network_section.attrib:
4254 parsed_respond['networkname'] = network_section.attrib['networkName']
4255
4256 ipscopes_section = \
4257 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4258 namespaces)
4259 if ipscopes_section is not None:
4260 for ipscope in ipscopes_section:
4261 for scope in ipscope:
4262 tag_key = scope.tag.split("}")[1]
4263 if tag_key == 'IpRanges':
4264 ip_ranges = scope.getchildren()
4265 for ipblock in ip_ranges:
4266 for block in ipblock:
4267 parsed_respond[block.tag.split("}")[1]] = block.text
4268 else:
4269 parsed_respond[tag_key] = scope.text
4270
4271 # parse children section for other attrib
4272 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4273 if children_section is not None:
4274 parsed_respond['name'] = children_section.attrib['name']
4275 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4276 if "nestedHypervisorEnabled" in children_section.attrib else None
4277 parsed_respond['deployed'] = children_section.attrib['deployed']
4278 parsed_respond['status'] = children_section.attrib['status']
4279 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4280 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4281 nic_list = []
4282 for adapters in network_adapter:
4283 adapter_key = adapters.tag.split("}")[1]
4284 if adapter_key == 'PrimaryNetworkConnectionIndex':
4285 parsed_respond['primarynetwork'] = adapters.text
4286 if adapter_key == 'NetworkConnection':
4287 vnic = {}
4288 if 'network' in adapters.attrib:
4289 vnic['network'] = adapters.attrib['network']
4290 for adapter in adapters:
4291 setting_key = adapter.tag.split("}")[1]
4292 vnic[setting_key] = adapter.text
4293 nic_list.append(vnic)
4294
4295 for link in children_section:
4296 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4297 if link.attrib['rel'] == 'screen:acquireTicket':
4298 parsed_respond['acquireTicket'] = link.attrib
4299 if link.attrib['rel'] == 'screen:acquireMksTicket':
4300 parsed_respond['acquireMksTicket'] = link.attrib
4301
4302 parsed_respond['interfaces'] = nic_list
4303 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4304 if vCloud_extension_section is not None:
4305 vm_vcenter_info = {}
4306 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4307 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4308 if vmext is not None:
4309 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4310 parsed_respond["vm_vcenter_info"] = vm_vcenter_info
4311
4312 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4313 vm_virtual_hardware_info = {}
4314 if virtual_hardware_section is not None:
4315 for item in virtual_hardware_section.iterfind('ovf:Item', namespaces):
4316 if item.find("rasd:Description", namespaces).text == "Hard disk":
4317 disk_size = item.find(
4318 "rasd:HostResource", namespaces).attrib["{" + namespaces['vm'] + "}capacity"]
4319
4320 vm_virtual_hardware_info["disk_size"] = disk_size
4321 break
4322
4323 for link in virtual_hardware_section:
4324 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4325 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4326 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4327 break
4328
4329 parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
4330 except Exception as exp:
4331 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4332 return parsed_respond
4333
4334 def acquire_console(self, vm_uuid=None):
4335
4336 if vm_uuid is None:
4337 return None
4338 if self.client._session:
4339 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4340 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4341 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4342 console_dict = vm_dict['acquireTicket']
4343 console_rest_call = console_dict['href']
4344
4345 response = self.perform_request(req_type='POST',
4346 url=console_rest_call,
4347 headers=headers)
4348
4349 if response.status_code == 403:
4350 response = self.retry_rest('POST', console_rest_call)
4351
4352 if response.status_code == requests.codes.ok:
4353 return response.text
4354
4355 return None
4356
4357 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4358 """
4359 Method retrieve vm disk details
4360
4361 Args:
4362 vapp_uuid - is vapp identifier.
4363 flavor_disk - disk size as specified in VNFD (flavor)
4364
4365 Returns:
4366 The return network uuid or return None
4367 """
4368 status = None
4369 try:
4370 # Flavor disk is in GB convert it into MB
4371 flavor_disk = int(flavor_disk) * 1024
4372 vm_details = self.get_vapp_details_rest(vapp_uuid)
4373 if vm_details:
4374 vm_name = vm_details["name"]
4375 self.logger.info("VM: {} flavor_disk :{}".format(vm_name, flavor_disk))
4376
4377 if vm_details and "vm_virtual_hardware" in vm_details:
4378 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4379 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4380
4381 self.logger.info("VM: {} VM_disk :{}".format(vm_name, vm_disk))
4382
4383 if flavor_disk > vm_disk:
4384 status = self.modify_vm_disk_rest(disk_edit_href, flavor_disk)
4385 self.logger.info("Modify disk of VM {} from {} to {} MB".format(
4386 vm_name,
4387 vm_disk, flavor_disk))
4388 else:
4389 status = True
4390 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4391
4392 return status
4393 except Exception as exp:
4394 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4395
4396 def modify_vm_disk_rest(self, disk_href, disk_size):
4397 """
4398 Method retrieve modify vm disk size
4399
4400 Args:
4401 disk_href - vCD API URL to GET and PUT disk data
4402 disk_size - disk size as specified in VNFD (flavor)
4403
4404 Returns:
4405 The return network uuid or return None
4406 """
4407 if disk_href is None or disk_size is None:
4408 return None
4409
4410 if self.client._session:
4411 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4412 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4413 response = self.perform_request(req_type='GET',
4414 url=disk_href,
4415 headers=headers)
4416
4417 if response.status_code == 403:
4418 response = self.retry_rest('GET', disk_href)
4419
4420 if response.status_code != requests.codes.ok:
4421 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4422 response.status_code))
4423 return None
4424 try:
4425 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4426 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
4427 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4428
4429 for item in lxmlroot_respond.iterfind('xmlns:Item', namespaces):
4430 if item.find("rasd:Description", namespaces).text == "Hard disk":
4431 disk_item = item.find("rasd:HostResource", namespaces)
4432 if disk_item is not None:
4433 disk_item.attrib["{" + namespaces['xmlns'] + "}capacity"] = str(disk_size)
4434 break
4435
4436 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4437 xml_declaration=True)
4438
4439 # Send PUT request to modify disk size
4440 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4441
4442 response = self.perform_request(req_type='PUT',
4443 url=disk_href,
4444 headers=headers,
4445 data=data)
4446 if response.status_code == 403:
4447 add_headers = {'Content-Type': headers['Content-Type']}
4448 response = self.retry_rest('PUT', disk_href, add_headers, data)
4449
4450 if response.status_code != 202:
4451 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4452 response.status_code))
4453 else:
4454 modify_disk_task = self.get_task_from_response(response.text)
4455 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4456 if result.get('status') == 'success':
4457 return True
4458 else:
4459 return False
4460 return None
4461
4462 except Exception as exp:
4463 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4464 return None
4465
4466 def add_serial_device(self, vapp_uuid):
4467 """
4468 Method to attach a serial device to a VM
4469
4470 Args:
4471 vapp_uuid - uuid of vApp/VM
4472
4473 Returns:
4474 """
4475 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
4476 _, content = self.get_vcenter_content()
4477 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4478 if vm_moref_id:
4479 try:
4480 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4481 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4482 if host_obj and vm_obj:
4483 spec = vim.vm.ConfigSpec()
4484 spec.deviceChange = []
4485 serial_spec = vim.vm.device.VirtualDeviceSpec()
4486 serial_spec.operation = 'add'
4487 serial_port = vim.vm.device.VirtualSerialPort()
4488 serial_port.yieldOnPoll = True
4489 backing = serial_port.URIBackingInfo()
4490 backing.serviceURI = 'tcp://:65500'
4491 backing.direction = 'server'
4492 serial_port.backing = backing
4493 serial_spec.device = serial_port
4494 spec.deviceChange.append(serial_spec)
4495 vm_obj.ReconfigVM_Task(spec=spec)
4496
4497 self.logger.info("Adding serial device to VM {}".format(vm_obj))
4498 except vmodl.MethodFault as error:
4499 self.logger.error("Error occurred while adding PCI devices {} ", error)
4500
4501 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
4502 """
4503 Method to attach pci devices to VM
4504
4505 Args:
4506 vapp_uuid - uuid of vApp/VM
4507 pci_devices - pci devices infromation as specified in VNFD (flavor)
4508
4509 Returns:
4510 The status of add pci device task , vm object and
4511 vcenter_conect object
4512 """
4513 vm_obj = None
4514 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid))
4515 vcenter_conect, content = self.get_vcenter_content()
4516 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4517
4518 if vm_moref_id:
4519 try:
4520 no_of_pci_devices = len(pci_devices)
4521 if no_of_pci_devices > 0:
4522 # Get VM and its host
4523 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4524 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4525 if host_obj and vm_obj:
4526 # get PCI devies from host on which vapp is currently installed
4527 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4528
4529 if avilable_pci_devices is None:
4530 # find other hosts with active pci devices
4531 new_host_obj, avilable_pci_devices = self.get_host_and_PCIdevices(
4532 content,
4533 no_of_pci_devices
4534 )
4535
4536 if (new_host_obj is not None and
4537 avilable_pci_devices is not None and
4538 len(avilable_pci_devices) > 0):
4539 # Migrate vm to the host where PCI devices are availble
4540 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4541 task = self.relocate_vm(new_host_obj, vm_obj)
4542 if task is not None:
4543 result = self.wait_for_vcenter_task(task, vcenter_conect)
4544 self.logger.info("Migrate VM status: {}".format(result))
4545 host_obj = new_host_obj
4546 else:
4547 self.logger.info("Fail to migrate VM : {}".format(result))
4548 raise vimconn.VimConnNotFoundException(
4549 "Fail to migrate VM : {} to host {}".format(
4550 vmname_andid,
4551 new_host_obj)
4552 )
4553
4554 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices) > 0:
4555 # Add PCI devices one by one
4556 for pci_device in avilable_pci_devices:
4557 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4558 if task:
4559 status = self.wait_for_vcenter_task(task, vcenter_conect)
4560 if status:
4561 self.logger.info("Added PCI device {} to VM {}".format(pci_device, str(vm_obj)))
4562 else:
4563 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,
4564 str(vm_obj)))
4565 return True, vm_obj, vcenter_conect
4566 else:
4567 self.logger.error("Currently there is no host with"
4568 " {} number of avaialble PCI devices required for VM {}".format(
4569 no_of_pci_devices,
4570 vmname_andid)
4571 )
4572 raise vimconn.VimConnNotFoundException(
4573 "Currently there is no host with {} "
4574 "number of avaialble PCI devices required for VM {}".format(
4575 no_of_pci_devices,
4576 vmname_andid))
4577 else:
4578 self.logger.debug("No infromation about PCI devices {} ", pci_devices)
4579
4580 except vmodl.MethodFault as error:
4581 self.logger.error("Error occurred while adding PCI devices {} ", error)
4582 return None, vm_obj, vcenter_conect
4583
4584 def get_vm_obj(self, content, mob_id):
4585 """
4586 Method to get the vsphere VM object associated with a given morf ID
4587 Args:
4588 vapp_uuid - uuid of vApp/VM
4589 content - vCenter content object
4590 mob_id - mob_id of VM
4591
4592 Returns:
4593 VM and host object
4594 """
4595 vm_obj = None
4596 host_obj = None
4597 try:
4598 container = content.viewManager.CreateContainerView(content.rootFolder,
4599 [vim.VirtualMachine], True
4600 )
4601 for vm in container.view:
4602 mobID = vm._GetMoId()
4603 if mobID == mob_id:
4604 vm_obj = vm
4605 host_obj = vm_obj.runtime.host
4606 break
4607 except Exception as exp:
4608 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4609 return host_obj, vm_obj
4610
4611 def get_pci_devices(self, host, need_devices):
4612 """
4613 Method to get the details of pci devices on given host
4614 Args:
4615 host - vSphere host object
4616 need_devices - number of pci devices needed on host
4617
4618 Returns:
4619 array of pci devices
4620 """
4621 all_devices = []
4622 all_device_ids = []
4623 used_devices_ids = []
4624
4625 try:
4626 if host:
4627 pciPassthruInfo = host.config.pciPassthruInfo
4628 pciDevies = host.hardware.pciDevice
4629
4630 for pci_status in pciPassthruInfo:
4631 if pci_status.passthruActive:
4632 for device in pciDevies:
4633 if device.id == pci_status.id:
4634 all_device_ids.append(device.id)
4635 all_devices.append(device)
4636
4637 # check if devices are in use
4638 avalible_devices = all_devices
4639 for vm in host.vm:
4640 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4641 vm_devices = vm.config.hardware.device
4642 for device in vm_devices:
4643 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4644 if device.backing.id in all_device_ids:
4645 for use_device in avalible_devices:
4646 if use_device.id == device.backing.id:
4647 avalible_devices.remove(use_device)
4648 used_devices_ids.append(device.backing.id)
4649 self.logger.debug("Device {} from devices {}"
4650 "is in use".format(device.backing.id,
4651 device)
4652 )
4653 if len(avalible_devices) < need_devices:
4654 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4655 need_devices))
4656 self.logger.debug("found only {} devices {}".format(len(avalible_devices),
4657 avalible_devices))
4658 return None
4659 else:
4660 required_devices = avalible_devices[:need_devices]
4661 self.logger.info("Found {} PCI devices on host {} but required only {}".format(
4662 len(avalible_devices),
4663 host,
4664 need_devices))
4665 self.logger.info("Retruning {} devices as {}".format(need_devices,
4666 required_devices))
4667 return required_devices
4668
4669 except Exception as exp:
4670 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4671
4672 return None
4673
4674 def get_host_and_PCIdevices(self, content, need_devices):
4675 """
4676 Method to get the details of pci devices infromation on all hosts
4677
4678 Args:
4679 content - vSphere host object
4680 need_devices - number of pci devices needed on host
4681
4682 Returns:
4683 array of pci devices and host object
4684 """
4685 host_obj = None
4686 pci_device_objs = None
4687 try:
4688 if content:
4689 container = content.viewManager.CreateContainerView(content.rootFolder,
4690 [vim.HostSystem], True)
4691 for host in container.view:
4692 devices = self.get_pci_devices(host, need_devices)
4693 if devices:
4694 host_obj = host
4695 pci_device_objs = devices
4696 break
4697 except Exception as exp:
4698 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4699
4700 return host_obj, pci_device_objs
4701
4702 def relocate_vm(self, dest_host, vm):
4703 """
4704 Method to get the relocate VM to new host
4705
4706 Args:
4707 dest_host - vSphere host object
4708 vm - vSphere VM object
4709
4710 Returns:
4711 task object
4712 """
4713 task = None
4714 try:
4715 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4716 task = vm.Relocate(relocate_spec)
4717 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4718 except Exception as exp:
4719 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4720 dest_host, vm, exp))
4721 return task
4722
4723 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4724 """
4725 Waits and provides updates on a vSphere task
4726 """
4727 while task.info.state == vim.TaskInfo.State.running:
4728 time.sleep(2)
4729
4730 if task.info.state == vim.TaskInfo.State.success:
4731 if task.info.result is not None and not hideResult:
4732 self.logger.info('{} completed successfully, result: {}'.format(
4733 actionName,
4734 task.info.result))
4735 else:
4736 self.logger.info('Task {} completed successfully.'.format(actionName))
4737 else:
4738 self.logger.error('{} did not complete successfully: {} '.format(
4739 actionName,
4740 task.info.error)
4741 )
4742
4743 return task.info.result
4744
4745 def add_pci_to_vm(self, host_object, vm_object, host_pci_dev):
4746 """
4747 Method to add pci device in given VM
4748
4749 Args:
4750 host_object - vSphere host object
4751 vm_object - vSphere VM object
4752 host_pci_dev - host_pci_dev must be one of the devices from the
4753 host_object.hardware.pciDevice list
4754 which is configured as a PCI passthrough device
4755
4756 Returns:
4757 task object
4758 """
4759 task = None
4760 if vm_object and host_object and host_pci_dev:
4761 try:
4762 # Add PCI device to VM
4763 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4764 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4765
4766 if host_pci_dev.id not in systemid_by_pciid:
4767 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4768 return None
4769
4770 deviceId = hex(host_pci_dev.deviceId % 2 ** 16).lstrip('0x')
4771 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4772 id=host_pci_dev.id,
4773 systemId=systemid_by_pciid[host_pci_dev.id],
4774 vendorId=host_pci_dev.vendorId,
4775 deviceName=host_pci_dev.deviceName)
4776
4777 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4778
4779 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4780 new_device_config.operation = "add"
4781 vmConfigSpec = vim.vm.ConfigSpec()
4782 vmConfigSpec.deviceChange = [new_device_config]
4783
4784 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4785 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4786 host_pci_dev, vm_object, host_object)
4787 )
4788 except Exception as exp:
4789 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4790 host_pci_dev,
4791 vm_object,
4792 exp))
4793 return task
4794
4795 def get_vm_vcenter_info(self):
4796 """
4797 Method to get details of vCenter and vm
4798
4799 Args:
4800 vapp_uuid - uuid of vApp or VM
4801
4802 Returns:
4803 Moref Id of VM and deails of vCenter
4804 """
4805 vm_vcenter_info = {}
4806
4807 if self.vcenter_ip is not None:
4808 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4809 else:
4810 raise vimconn.VimConnException(message="vCenter IP is not provided."
4811 " Please provide vCenter IP while attaching datacenter "
4812 "to tenant in --config")
4813 if self.vcenter_port is not None:
4814 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4815 else:
4816 raise vimconn.VimConnException(message="vCenter port is not provided."
4817 " Please provide vCenter port while attaching datacenter "
4818 "to tenant in --config")
4819 if self.vcenter_user is not None:
4820 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4821 else:
4822 raise vimconn.VimConnException(message="vCenter user is not provided."
4823 " Please provide vCenter user while attaching datacenter "
4824 "to tenant in --config")
4825
4826 if self.vcenter_password is not None:
4827 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4828 else:
4829 raise vimconn.VimConnException(message="vCenter user password is not provided."
4830 " Please provide vCenter user password while attaching datacenter "
4831 "to tenant in --config")
4832
4833 return vm_vcenter_info
4834
4835 def get_vm_pci_details(self, vmuuid):
4836 """
4837 Method to get VM PCI device details from vCenter
4838
4839 Args:
4840 vm_obj - vSphere VM object
4841
4842 Returns:
4843 dict of PCI devives attached to VM
4844
4845 """
4846 vm_pci_devices_info = {}
4847 try:
4848 _, content = self.get_vcenter_content()
4849 vm_moref_id = self.get_vm_moref_id(vmuuid)
4850 if vm_moref_id:
4851 # Get VM and its host
4852 if content:
4853 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4854 if host_obj and vm_obj:
4855 vm_pci_devices_info["host_name"] = host_obj.name
4856 vm_pci_devices_info["host_ip"] = host_obj.config.network.vnic[0].spec.ip.ipAddress
4857 for device in vm_obj.config.hardware.device:
4858 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4859 device_details = {'devide_id': device.backing.id,
4860 'pciSlotNumber': device.slotInfo.pciSlotNumber,
4861 }
4862 vm_pci_devices_info[device.deviceInfo.label] = device_details
4863 else:
4864 self.logger.error("Can not connect to vCenter while getting "
4865 "PCI devices infromationn")
4866 return vm_pci_devices_info
4867 except Exception as exp:
4868 self.logger.error("Error occurred while getting VM information"
4869 " for VM : {}".format(exp))
4870 raise vimconn.VimConnException(message=exp)
4871
4872 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4873 """
4874 Method to reserve memory for all VMs
4875 Args :
4876 vapp - VApp
4877 memory_mb - Memory in MB
4878 Returns:
4879 None
4880 """
4881
4882 self.logger.info("Reserve memory for all VMs")
4883 for vms in vapp.get_all_vms():
4884 vm_id = vms.get('id').split(':')[-1]
4885
4886 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4887
4888 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4889 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4890 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4891 response = self.perform_request(req_type='GET',
4892 url=url_rest_call,
4893 headers=headers)
4894
4895 if response.status_code == 403:
4896 response = self.retry_rest('GET', url_rest_call)
4897
4898 if response.status_code != 200:
4899 self.logger.error("REST call {} failed reason : {}"
4900 "status code : {}".format(url_rest_call,
4901 response.text,
4902 response.status_code))
4903 raise vimconn.VimConnException("reserve_memory_for_all_vms : Failed to get "
4904 "memory")
4905
4906 bytexml = bytes(bytearray(response.text, encoding='utf-8'))
4907 contentelem = lxmlElementTree.XML(bytexml)
4908 namespaces = {prefix: uri for prefix, uri in contentelem.nsmap.items() if prefix}
4909 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4910
4911 # Find the reservation element in the response
4912 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4913 for memelem in memelem_list:
4914 memelem.text = str(memory_mb)
4915
4916 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4917
4918 response = self.perform_request(req_type='PUT',
4919 url=url_rest_call,
4920 headers=headers,
4921 data=newdata)
4922
4923 if response.status_code == 403:
4924 add_headers = {'Content-Type': headers['Content-Type']}
4925 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4926
4927 if response.status_code != 202:
4928 self.logger.error("REST call {} failed reason : {}"
4929 "status code : {} ".format(url_rest_call,
4930 response.text,
4931 response.status_code))
4932 raise vimconn.VimConnException("reserve_memory_for_all_vms : Failed to update "
4933 "virtual hardware memory section")
4934 else:
4935 mem_task = self.get_task_from_response(response.text)
4936 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4937 if result.get('status') == 'success':
4938 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "
4939 .format(vm_id))
4940 else:
4941 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "
4942 .format(vm_id))
4943
4944 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4945 """
4946 Configure VApp network config with org vdc network
4947 Args :
4948 vapp - VApp
4949 Returns:
4950 None
4951 """
4952
4953 self.logger.info("Connecting vapp {} to org vdc network {}".
4954 format(vapp_id, net_name))
4955
4956 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4957
4958 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
4959 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4960 response = self.perform_request(req_type='GET',
4961 url=url_rest_call,
4962 headers=headers)
4963
4964 if response.status_code == 403:
4965 response = self.retry_rest('GET', url_rest_call)
4966
4967 if response.status_code != 200:
4968 self.logger.error("REST call {} failed reason : {}"
4969 "status code : {}".format(url_rest_call,
4970 response.text,
4971 response.status_code))
4972 raise vimconn.VimConnException("connect_vapp_to_org_vdc_network : Failed to get "
4973 "network config section")
4974
4975 data = response.text
4976 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
4977 net_id = self.get_network_id_by_name(net_name)
4978 if not net_id:
4979 raise vimconn.VimConnException("connect_vapp_to_org_vdc_network : Failed to find "
4980 "existing network")
4981
4982 bytexml = bytes(bytearray(data, encoding='utf-8'))
4983 newelem = lxmlElementTree.XML(bytexml)
4984 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
4985 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4986 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
4987
4988 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
4989 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
4990 if parentnetworklist:
4991 for pn in parentnetworklist:
4992 if "href" not in pn.keys():
4993 id_val = pn.get("id")
4994 href_val = "{}/api/network/{}".format(self.url, id_val)
4995 pn.set("href", href_val)
4996
4997 newstr = """<NetworkConfig networkName="{}">
4998 <Configuration>
4999 <ParentNetwork href="{}/api/network/{}"/>
5000 <FenceMode>bridged</FenceMode>
5001 </Configuration>
5002 </NetworkConfig>
5003 """.format(net_name, self.url, net_id)
5004 newcfgelem = lxmlElementTree.fromstring(newstr)
5005 if nwcfglist:
5006 nwcfglist[0].addnext(newcfgelem)
5007
5008 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
5009
5010 response = self.perform_request(req_type='PUT',
5011 url=url_rest_call,
5012 headers=headers,
5013 data=newdata)
5014
5015 if response.status_code == 403:
5016 add_headers = {'Content-Type': headers['Content-Type']}
5017 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5018
5019 if response.status_code != 202:
5020 self.logger.error("REST call {} failed reason : {}"
5021 "status code : {} ".format(url_rest_call,
5022 response.text,
5023 response.status_code))
5024 raise vimconn.VimConnException("connect_vapp_to_org_vdc_network : Failed to update "
5025 "network config section")
5026 else:
5027 vapp_task = self.get_task_from_response(response.text)
5028 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
5029 if result.get('status') == 'success':
5030 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "
5031 "network {}".format(vapp_id, net_name))
5032 else:
5033 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "
5034 "connect to network {}".format(vapp_id, net_name))
5035
5036 def remove_primary_network_adapter_from_all_vms(self, vapp):
5037 """
5038 Method to remove network adapter type to vm
5039 Args :
5040 vapp - VApp
5041 Returns:
5042 None
5043 """
5044
5045 self.logger.info("Removing network adapter from all VMs")
5046 for vms in vapp.get_all_vms():
5047 vm_id = vms.get('id').split(':')[-1]
5048
5049 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5050
5051 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
5052 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5053 response = self.perform_request(req_type='GET',
5054 url=url_rest_call,
5055 headers=headers)
5056
5057 if response.status_code == 403:
5058 response = self.retry_rest('GET', url_rest_call)
5059
5060 if response.status_code != 200:
5061 self.logger.error("REST call {} failed reason : {}"
5062 "status code : {}".format(url_rest_call,
5063 response.text,
5064 response.status_code))
5065 raise vimconn.VimConnException("remove_primary_network_adapter : Failed to get "
5066 "network connection section")
5067
5068 data = response.text
5069 data = data.split('<Link rel="edit"')[0]
5070
5071 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5072
5073 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5074 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
5075 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5076 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
5077 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
5078 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
5079 xmlns:vmw="http://www.vmware.com/schema/ovf"
5080 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
5081 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
5082 xmlns:ns9="http://www.vmware.com/vcloud/versions"
5083 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"
5084 ovf:required="false">
5085 <ovf:Info>Specifies the available VM network connections</ovf:Info>
5086 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
5087 <Link rel="edit" href="{url}"
5088 type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5089 </NetworkConnectionSection>""".format(url=url_rest_call)
5090 response = self.perform_request(req_type='PUT',
5091 url=url_rest_call,
5092 headers=headers,
5093 data=newdata)
5094
5095 if response.status_code == 403:
5096 add_headers = {'Content-Type': headers['Content-Type']}
5097 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5098
5099 if response.status_code != 202:
5100 self.logger.error("REST call {} failed reason : {}"
5101 "status code : {} ".format(url_rest_call,
5102 response.text,
5103 response.status_code))
5104 raise vimconn.VimConnException("remove_primary_network_adapter : Failed to update "
5105 "network connection section")
5106 else:
5107 nic_task = self.get_task_from_response(response.text)
5108 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5109 if result.get('status') == 'success':
5110 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "
5111 "default NIC type".format(vm_id))
5112 else:
5113 self.logger.error("remove_primary_network_adapter(): VM {} failed to "
5114 "connect NIC type".format(vm_id))
5115
5116 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5117 """
5118 Method to add network adapter type to vm
5119 Args :
5120 network_name - name of network
5121 primary_nic_index - int value for primary nic index
5122 nicIndex - int value for nic index
5123 nic_type - specify model name to which add to vm
5124 Returns:
5125 None
5126 """
5127
5128 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".
5129 format(network_name, nicIndex, nic_type))
5130 try:
5131 ip_address = None
5132 floating_ip = False
5133 mac_address = None
5134 if 'floating_ip' in net:
5135 floating_ip = net['floating_ip']
5136
5137 # Stub for ip_address feature
5138 if 'ip_address' in net:
5139 ip_address = net['ip_address']
5140
5141 if 'mac_address' in net:
5142 mac_address = net['mac_address']
5143
5144 if floating_ip:
5145 allocation_mode = "POOL"
5146 elif ip_address:
5147 allocation_mode = "MANUAL"
5148 else:
5149 allocation_mode = "DHCP"
5150
5151 if not nic_type:
5152 for vms in vapp.get_all_vms():
5153 vm_id = vms.get('id').split(':')[-1]
5154
5155 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5156
5157 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
5158 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5159 response = self.perform_request(req_type='GET',
5160 url=url_rest_call,
5161 headers=headers)
5162
5163 if response.status_code == 403:
5164 response = self.retry_rest('GET', url_rest_call)
5165
5166 if response.status_code != 200:
5167 self.logger.error("REST call {} failed reason : {}"
5168 "status code : {}".format(url_rest_call,
5169 response.text,
5170 response.status_code))
5171 raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to get "
5172 "network connection section")
5173
5174 data = response.text
5175 data = data.split('<Link rel="edit"')[0]
5176 if '<PrimaryNetworkConnectionIndex>' not in data:
5177 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5178 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5179 <NetworkConnection network="{}">
5180 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5181 <IsConnected>true</IsConnected>
5182 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5183 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5184 allocation_mode)
5185 # Stub for ip_address feature
5186 if ip_address:
5187 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5188 item = item.replace('</NetworkConnectionIndex>\n',
5189 '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5190
5191 if mac_address:
5192 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5193 item = item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
5194
5195 data = data.replace('</ovf:Info>\n',
5196 '</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5197 else:
5198 self.logger.debug("add_network_adapter PrimaryNIC in data")
5199 new_item = """<NetworkConnection network="{}">
5200 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5201 <IsConnected>true</IsConnected>
5202 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5203 </NetworkConnection>""".format(network_name, nicIndex,
5204 allocation_mode)
5205 # Stub for ip_address feature
5206 if ip_address:
5207 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5208 new_item = new_item.replace('</NetworkConnectionIndex>\n',
5209 '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5210
5211 if mac_address:
5212 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5213 new_item = new_item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
5214
5215 data = data + new_item + '</NetworkConnectionSection>'
5216
5217 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5218
5219 response = self.perform_request(req_type='PUT',
5220 url=url_rest_call,
5221 headers=headers,
5222 data=data)
5223
5224 if response.status_code == 403:
5225 add_headers = {'Content-Type': headers['Content-Type']}
5226 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5227
5228 if response.status_code != 202:
5229 self.logger.error("REST call {} failed reason : {}"
5230 "status code : {} ".format(url_rest_call,
5231 response.text,
5232 response.status_code))
5233 raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to update "
5234 "network connection section")
5235 else:
5236 nic_task = self.get_task_from_response(response.text)
5237 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5238 if result.get('status') == 'success':
5239 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "
5240 "default NIC type".format(vm_id))
5241 else:
5242 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "
5243 "connect NIC type".format(vm_id))
5244 else:
5245 for vms in vapp.get_all_vms():
5246 vm_id = vms.get('id').split(':')[-1]
5247
5248 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5249
5250 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
5251 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5252 response = self.perform_request(req_type='GET',
5253 url=url_rest_call,
5254 headers=headers)
5255
5256 if response.status_code == 403:
5257 response = self.retry_rest('GET', url_rest_call)
5258
5259 if response.status_code != 200:
5260 self.logger.error("REST call {} failed reason : {}"
5261 "status code : {}".format(url_rest_call,
5262 response.text,
5263 response.status_code))
5264 raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to get "
5265 "network connection section")
5266 data = response.text
5267 data = data.split('<Link rel="edit"')[0]
5268 vcd_netadapter_type = nic_type
5269 if nic_type in ['SR-IOV', 'VF']:
5270 vcd_netadapter_type = "SRIOVETHERNETCARD"
5271
5272 if '<PrimaryNetworkConnectionIndex>' not in data:
5273 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5274 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5275 <NetworkConnection network="{}">
5276 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5277 <IsConnected>true</IsConnected>
5278 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5279 <NetworkAdapterType>{}</NetworkAdapterType>
5280 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5281 allocation_mode, vcd_netadapter_type)
5282 # Stub for ip_address feature
5283 if ip_address:
5284 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5285 item = item.replace('</NetworkConnectionIndex>\n',
5286 '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5287
5288 if mac_address:
5289 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5290 item = item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
5291
5292 data = data.replace('</ovf:Info>\n',
5293 '</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5294 else:
5295 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5296 new_item = """<NetworkConnection network="{}">
5297 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5298 <IsConnected>true</IsConnected>
5299 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5300 <NetworkAdapterType>{}</NetworkAdapterType>
5301 </NetworkConnection>""".format(network_name, nicIndex,
5302 allocation_mode, vcd_netadapter_type)
5303 # Stub for ip_address feature
5304 if ip_address:
5305 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5306 new_item = new_item.replace('</NetworkConnectionIndex>\n',
5307 '</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5308
5309 if mac_address:
5310 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5311 new_item = new_item.replace('</IsConnected>\n', '</IsConnected>\n{}\n'.format(mac_tag))
5312
5313 data = data + new_item + '</NetworkConnectionSection>'
5314
5315 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5316
5317 response = self.perform_request(req_type='PUT',
5318 url=url_rest_call,
5319 headers=headers,
5320 data=data)
5321
5322 if response.status_code == 403:
5323 add_headers = {'Content-Type': headers['Content-Type']}
5324 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5325
5326 if response.status_code != 202:
5327 self.logger.error("REST call {} failed reason : {}"
5328 "status code : {}".format(url_rest_call,
5329 response.text,
5330 response.status_code))
5331 raise vimconn.VimConnException("add_network_adapter_to_vms : Failed to update "
5332 "network connection section")
5333 else:
5334 nic_task = self.get_task_from_response(response.text)
5335 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5336 if result.get('status') == 'success':
5337 self.logger.info("add_network_adapter_to_vms(): VM {} "
5338 "conneced to NIC type {}".format(vm_id, nic_type))
5339 else:
5340 self.logger.error("add_network_adapter_to_vms(): VM {} "
5341 "failed to connect NIC type {}".format(vm_id, nic_type))
5342 except Exception as exp:
5343 self.logger.error("add_network_adapter_to_vms() : exception occurred "
5344 "while adding Network adapter")
5345 raise vimconn.VimConnException(message=exp)
5346
5347 def set_numa_affinity(self, vmuuid, paired_threads_id):
5348 """
5349 Method to assign numa affinity in vm configuration parammeters
5350 Args :
5351 vmuuid - vm uuid
5352 paired_threads_id - one or more virtual processor
5353 numbers
5354 Returns:
5355 return if True
5356 """
5357 try:
5358 vcenter_conect, content = self.get_vcenter_content()
5359 vm_moref_id = self.get_vm_moref_id(vmuuid)
5360
5361 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
5362 if vm_obj:
5363 config_spec = vim.vm.ConfigSpec()
5364 config_spec.extraConfig = []
5365 opt = vim.option.OptionValue()
5366 opt.key = 'numa.nodeAffinity'
5367 opt.value = str(paired_threads_id)
5368 config_spec.extraConfig.append(opt)
5369 task = vm_obj.ReconfigVM_Task(config_spec)
5370 if task:
5371 self.wait_for_vcenter_task(task, vcenter_conect)
5372 extra_config = vm_obj.config.extraConfig
5373 flag = False
5374 for opts in extra_config:
5375 if 'numa.nodeAffinity' in opts.key:
5376 flag = True
5377 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "
5378 "value {} for vm {}".format(opt.value, vm_obj))
5379 if flag:
5380 return
5381 else:
5382 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5383 except Exception as exp:
5384 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "
5385 "for VM {} : {}".format(vm_obj, vm_moref_id))
5386 raise vimconn.VimConnException("set_numa_affinity : Error {} failed to assign numa "
5387 "affinity".format(exp))
5388
5389 def cloud_init(self, vapp, cloud_config):
5390 """
5391 Method to inject ssh-key
5392 vapp - vapp object
5393 cloud_config a dictionary with:
5394 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5395 'users': (optional) list of users to be inserted, each item is a dict with:
5396 'name': (mandatory) user name,
5397 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5398 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5399 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5400 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5401 'dest': (mandatory) string with the destination absolute path
5402 'encoding': (optional, by default text). Can be one of:
5403 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5404 'content' (mandatory): string with the content of the file
5405 'permissions': (optional) string with file permissions, typically octal notation '0644'
5406 'owner': (optional) file owner, string with the format 'owner:group'
5407 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5408 """
5409 try:
5410 if not isinstance(cloud_config, dict):
5411 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5412 else:
5413 key_pairs = []
5414 userdata = []
5415 if "key-pairs" in cloud_config:
5416 key_pairs = cloud_config["key-pairs"]
5417
5418 if "users" in cloud_config:
5419 userdata = cloud_config["users"]
5420
5421 self.logger.debug("cloud_init : Guest os customization started..")
5422 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5423 customize_script = customize_script.replace("&", "&amp;")
5424 self.guest_customization(vapp, customize_script)
5425
5426 except Exception as exp:
5427 self.logger.error("cloud_init : exception occurred while injecting "
5428 "ssh-key")
5429 raise vimconn.VimConnException("cloud_init : Error {} failed to inject "
5430 "ssh-key".format(exp))
5431
5432 def format_script(self, key_pairs=[], users_list=[]):
5433 bash_script = """#!/bin/sh
5434 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
5435 if [ "$1" = "precustomization" ];then
5436 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5437 """
5438
5439 keys = "\n".join(key_pairs)
5440 if keys:
5441 keys_data = """
5442 if [ ! -d /root/.ssh ];then
5443 mkdir /root/.ssh
5444 chown root:root /root/.ssh
5445 chmod 700 /root/.ssh
5446 touch /root/.ssh/authorized_keys
5447 chown root:root /root/.ssh/authorized_keys
5448 chmod 600 /root/.ssh/authorized_keys
5449 # make centos with selinux happy
5450 which restorecon && restorecon -Rv /root/.ssh
5451 else
5452 touch /root/.ssh/authorized_keys
5453 chown root:root /root/.ssh/authorized_keys
5454 chmod 600 /root/.ssh/authorized_keys
5455 fi
5456 echo '{key}' >> /root/.ssh/authorized_keys
5457 """.format(key=keys)
5458
5459 bash_script += keys_data
5460
5461 for user in users_list:
5462 if 'name' in user:
5463 user_name = user['name']
5464 if 'key-pairs' in user:
5465 user_keys = "\n".join(user['key-pairs'])
5466 else:
5467 user_keys = None
5468
5469 add_user_name = """
5470 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5471 """.format(user_name=user_name)
5472
5473 bash_script += add_user_name
5474
5475 if user_keys:
5476 user_keys_data = """
5477 mkdir /home/{user_name}/.ssh
5478 chown {user_name}:{user_name} /home/{user_name}/.ssh
5479 chmod 700 /home/{user_name}/.ssh
5480 touch /home/{user_name}/.ssh/authorized_keys
5481 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5482 chmod 600 /home/{user_name}/.ssh/authorized_keys
5483 # make centos with selinux happy
5484 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5485 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5486 """.format(user_name=user_name, user_key=user_keys)
5487
5488 bash_script += user_keys_data
5489
5490 return bash_script + "\n\tfi"
5491
5492 def guest_customization(self, vapp, customize_script):
5493 """
5494 Method to customize guest os
5495 vapp - Vapp object
5496 customize_script - Customize script to be run at first boot of VM.
5497 """
5498 for vm in vapp.get_all_vms():
5499 vm_id = vm.get('id').split(':')[-1]
5500 vm_name = vm.get('name')
5501 vm_name = vm_name.replace('_', '-')
5502
5503 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5504 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
5505 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5506
5507 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5508
5509 data = """<GuestCustomizationSection
5510 xmlns="http://www.vmware.com/vcloud/v1.5"
5511 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5512 ovf:required="false" href="{}"
5513 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5514 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5515 <Enabled>true</Enabled>
5516 <ChangeSid>false</ChangeSid>
5517 <VirtualMachineId>{}</VirtualMachineId>
5518 <JoinDomainEnabled>false</JoinDomainEnabled>
5519 <UseOrgSettings>false</UseOrgSettings>
5520 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5521 <AdminPasswordAuto>true</AdminPasswordAuto>
5522 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5523 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5524 <ResetPasswordRequired>false</ResetPasswordRequired>
5525 <CustomizationScript>{}</CustomizationScript>
5526 <ComputerName>{}</ComputerName>
5527 <Link href="{}"
5528 type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5529 </GuestCustomizationSection>
5530 """.format(vm_customization_url,
5531 vm_id,
5532 customize_script,
5533 vm_name,
5534 vm_customization_url)
5535
5536 response = self.perform_request(req_type='PUT',
5537 url=vm_customization_url,
5538 headers=headers,
5539 data=data)
5540 if response.status_code == 202:
5541 guest_task = self.get_task_from_response(response.text)
5542 self.client.get_task_monitor().wait_for_success(task=guest_task)
5543 self.logger.info("guest_customization : customized guest os task "
5544 "completed for VM {}".format(vm_name))
5545 else:
5546 self.logger.error("guest_customization : task for customized guest os"
5547 "failed for VM {}".format(vm_name))
5548 raise vimconn.VimConnException("guest_customization : failed to perform"
5549 "guest os customization on VM {}".format(vm_name))
5550
5551 def add_new_disk(self, vapp_uuid, disk_size):
5552 """
5553 Method to create an empty vm disk
5554
5555 Args:
5556 vapp_uuid - is vapp identifier.
5557 disk_size - size of disk to be created in GB
5558
5559 Returns:
5560 None
5561 """
5562 status = False
5563 vm_details = None
5564 try:
5565 # Disk size in GB, convert it into MB
5566 if disk_size is not None:
5567 disk_size_mb = int(disk_size) * 1024
5568 vm_details = self.get_vapp_details_rest(vapp_uuid)
5569
5570 if vm_details and "vm_virtual_hardware" in vm_details:
5571 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5572 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5573 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5574
5575 except Exception as exp:
5576 msg = "Error occurred while creating new disk {}.".format(exp)
5577 self.rollback_newvm(vapp_uuid, msg)
5578
5579 if status:
5580 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5581 else:
5582 # If failed to add disk, delete VM
5583 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5584 self.rollback_newvm(vapp_uuid, msg)
5585
5586 def add_new_disk_rest(self, disk_href, disk_size_mb):
5587 """
5588 Retrives vApp Disks section & add new empty disk
5589
5590 Args:
5591 disk_href: Disk section href to addd disk
5592 disk_size_mb: Disk size in MB
5593
5594 Returns: Status of add new disk task
5595 """
5596 status = False
5597 if self.client._session:
5598 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
5599 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5600 response = self.perform_request(req_type='GET',
5601 url=disk_href,
5602 headers=headers)
5603
5604 if response.status_code == 403:
5605 response = self.retry_rest('GET', disk_href)
5606
5607 if response.status_code != requests.codes.ok:
5608 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5609 .format(disk_href, response.status_code))
5610 return status
5611 try:
5612 # Find but type & max of instance IDs assigned to disks
5613 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5614 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
5615 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
5616 instance_id = 0
5617 for item in lxmlroot_respond.iterfind('xmlns:Item', namespaces):
5618 if item.find("rasd:Description", namespaces).text == "Hard disk":
5619 inst_id = int(item.find("rasd:InstanceID", namespaces).text)
5620 if inst_id > instance_id:
5621 instance_id = inst_id
5622 disk_item = item.find("rasd:HostResource", namespaces)
5623 bus_subtype = disk_item.attrib["{" + namespaces['xmlns'] + "}busSubType"]
5624 bus_type = disk_item.attrib["{" + namespaces['xmlns'] + "}busType"]
5625
5626 instance_id = instance_id + 1
5627 new_item = """<Item>
5628 <rasd:Description>Hard disk</rasd:Description>
5629 <rasd:ElementName>New disk</rasd:ElementName>
5630 <rasd:HostResource
5631 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5632 vcloud:capacity="{}"
5633 vcloud:busSubType="{}"
5634 vcloud:busType="{}"></rasd:HostResource>
5635 <rasd:InstanceID>{}</rasd:InstanceID>
5636 <rasd:ResourceType>17</rasd:ResourceType>
5637 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5638
5639 new_data = response.text
5640 # Add new item at the bottom
5641 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5642
5643 # Send PUT request to modify virtual hardware section with new disk
5644 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5645
5646 response = self.perform_request(req_type='PUT',
5647 url=disk_href,
5648 data=new_data,
5649 headers=headers)
5650
5651 if response.status_code == 403:
5652 add_headers = {'Content-Type': headers['Content-Type']}
5653 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5654
5655 if response.status_code != 202:
5656 self.logger.error("PUT REST API call {} failed. Return status code {}. response.text:{}"
5657 .format(disk_href, response.status_code, response.text))
5658 else:
5659 add_disk_task = self.get_task_from_response(response.text)
5660 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5661 if result.get('status') == 'success':
5662 status = True
5663 else:
5664 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5665
5666 except Exception as exp:
5667 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5668
5669 return status
5670
5671 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5672 """
5673 Method to add existing disk to vm
5674 Args :
5675 catalogs - List of VDC catalogs
5676 image_id - Catalog ID
5677 template_name - Name of template in catalog
5678 vapp_uuid - UUID of vApp
5679 Returns:
5680 None
5681 """
5682 disk_info = None
5683 vcenter_conect, content = self.get_vcenter_content()
5684 # find moref-id of vm in image
5685 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5686 image_id=image_id,
5687 )
5688
5689 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5690 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5691 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5692 if catalog_vm_moref_id:
5693 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5694 _, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5695 if catalog_vm_obj:
5696 # find existing disk
5697 disk_info = self.find_disk(catalog_vm_obj)
5698 else:
5699 exp_msg = "No VM with image id {} found".format(image_id)
5700 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5701 else:
5702 exp_msg = "No Image found with image ID {} ".format(image_id)
5703 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5704
5705 if disk_info:
5706 self.logger.info("Existing disk_info : {}".format(disk_info))
5707 # get VM
5708 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5709 _, vm_obj = self.get_vm_obj(content, vm_moref_id)
5710 if vm_obj:
5711 status = self.add_disk(vcenter_conect=vcenter_conect,
5712 vm=vm_obj,
5713 disk_info=disk_info,
5714 size=size,
5715 vapp_uuid=vapp_uuid
5716 )
5717 if status:
5718 self.logger.info("Disk from image id {} added to {}".format(image_id,
5719 vm_obj.config.name)
5720 )
5721 else:
5722 msg = "No disk found with image id {} to add in VM {}".format(
5723 image_id,
5724 vm_obj.config.name)
5725 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5726
5727 def find_disk(self, vm_obj):
5728 """
5729 Method to find details of existing disk in VM
5730 Args :
5731 vm_obj - vCenter object of VM
5732 image_id - Catalog ID
5733 Returns:
5734 disk_info : dict of disk details
5735 """
5736 disk_info = {}
5737 if vm_obj:
5738 try:
5739 devices = vm_obj.config.hardware.device
5740 for device in devices:
5741 if type(device) is vim.vm.device.VirtualDisk:
5742 if (isinstance(device.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and
5743 hasattr(device.backing, 'fileName')):
5744 disk_info["full_path"] = device.backing.fileName
5745 disk_info["datastore"] = device.backing.datastore
5746 disk_info["capacityKB"] = device.capacityInKB
5747 break
5748 except Exception as exp:
5749 self.logger.error("find_disk() : exception occurred while "
5750 "getting existing disk details :{}".format(exp))
5751 return disk_info
5752
5753 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5754 """
5755 Method to add existing disk in VM
5756 Args :
5757 vcenter_conect - vCenter content object
5758 vm - vCenter vm object
5759 disk_info : dict of disk details
5760 Returns:
5761 status : status of add disk task
5762 """
5763 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5764 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5765 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5766 if size is not None:
5767 # Convert size from GB to KB
5768 sizeKB = int(size) * 1024 * 1024
5769 # compare size of existing disk and user given size.Assign whicherver is greater
5770 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5771 sizeKB, capacityKB))
5772 if sizeKB > capacityKB:
5773 capacityKB = sizeKB
5774
5775 if datastore and fullpath and capacityKB:
5776 try:
5777 spec = vim.vm.ConfigSpec()
5778 # get all disks on a VM, set unit_number to the next available
5779 unit_number = 0
5780 for dev in vm.config.hardware.device:
5781 if hasattr(dev.backing, 'fileName'):
5782 unit_number = int(dev.unitNumber) + 1
5783 # unit_number 7 reserved for scsi controller
5784 if unit_number == 7:
5785 unit_number += 1
5786 if isinstance(dev, vim.vm.device.VirtualDisk):
5787 # vim.vm.device.VirtualSCSIController
5788 controller_key = dev.controllerKey
5789
5790 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5791 unit_number, controller_key))
5792 # add disk here
5793 dev_changes = []
5794 disk_spec = vim.vm.device.VirtualDeviceSpec()
5795 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5796 disk_spec.device = vim.vm.device.VirtualDisk()
5797 disk_spec.device.backing = \
5798 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5799 disk_spec.device.backing.thinProvisioned = True
5800 disk_spec.device.backing.diskMode = 'persistent'
5801 disk_spec.device.backing.datastore = datastore
5802 disk_spec.device.backing.fileName = fullpath
5803
5804 disk_spec.device.unitNumber = unit_number
5805 disk_spec.device.capacityInKB = capacityKB
5806 disk_spec.device.controllerKey = controller_key
5807 dev_changes.append(disk_spec)
5808 spec.deviceChange = dev_changes
5809 task = vm.ReconfigVM_Task(spec=spec)
5810 status = self.wait_for_vcenter_task(task, vcenter_conect)
5811 return status
5812 except Exception as exp:
5813 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5814 "{} to vm {}".format(exp,
5815 fullpath,
5816 vm.config.name)
5817 self.rollback_newvm(vapp_uuid, exp_msg)
5818 else:
5819 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5820 self.rollback_newvm(vapp_uuid, msg)
5821
5822 def get_vcenter_content(self):
5823 """
5824 Get the vsphere content object
5825 """
5826 try:
5827 vm_vcenter_info = self.get_vm_vcenter_info()
5828 except Exception as exp:
5829 self.logger.error("Error occurred while getting vCenter infromationn"
5830 " for VM : {}".format(exp))
5831 raise vimconn.VimConnException(message=exp)
5832
5833 context = None
5834 if hasattr(ssl, '_create_unverified_context'):
5835 context = ssl._create_unverified_context()
5836
5837 vcenter_conect = SmartConnect(
5838 host=vm_vcenter_info["vm_vcenter_ip"],
5839 user=vm_vcenter_info["vm_vcenter_user"],
5840 pwd=vm_vcenter_info["vm_vcenter_password"],
5841 port=int(vm_vcenter_info["vm_vcenter_port"]),
5842 sslContext=context
5843 )
5844 atexit.register(Disconnect, vcenter_conect)
5845 content = vcenter_conect.RetrieveContent()
5846 return vcenter_conect, content
5847
5848 def get_vm_moref_id(self, vapp_uuid):
5849 """
5850 Get the moref_id of given VM
5851 """
5852 try:
5853 if vapp_uuid:
5854 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5855 if vm_details and "vm_vcenter_info" in vm_details:
5856 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5857 return vm_moref_id
5858
5859 except Exception as exp:
5860 self.logger.error("Error occurred while getting VM moref ID "
5861 " for VM : {}".format(exp))
5862 return None
5863
5864 def get_vapp_template_details(self, catalogs=None, image_id=None, template_name=None):
5865 """
5866 Method to get vApp template details
5867 Args :
5868 catalogs - list of VDC catalogs
5869 image_id - Catalog ID to find
5870 template_name : template name in catalog
5871 Returns:
5872 parsed_respond : dict of vApp tempalte details
5873 """
5874 parsed_response = {}
5875
5876 vca = self.connect_as_admin()
5877 if not vca:
5878 raise vimconn.VimConnConnectionException("Failed to connect vCD")
5879
5880 try:
5881 org, _ = self.get_vdc_details()
5882 catalog = self.get_catalog_obj(image_id, catalogs)
5883 if catalog:
5884 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5885 catalog_items = [items.attrib]
5886
5887 if len(catalog_items) == 1:
5888 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
5889 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5890
5891 response = self.perform_request(req_type='GET',
5892 url=catalog_items[0].get('href'),
5893 headers=headers)
5894 catalogItem = XmlElementTree.fromstring(response.text)
5895 entity = [child for child in catalogItem if child.get("type") ==
5896 "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5897 vapp_tempalte_href = entity.get("href")
5898 # get vapp details and parse moref id
5899
5900 namespaces = {
5901 "vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5902 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5903 'vmw': 'http://www.vmware.com/schema/ovf',
5904 'vm': 'http://www.vmware.com/vcloud/v1.5',
5905 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5906 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5907 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5908 }
5909
5910 if vca._session:
5911 response = self.perform_request(req_type='GET',
5912 url=vapp_tempalte_href,
5913 headers=headers)
5914
5915 if response.status_code != requests.codes.ok:
5916 self.logger.debug("REST API call {} failed. Return status code {}".format(
5917 vapp_tempalte_href, response.status_code))
5918
5919 else:
5920 xmlroot_respond = XmlElementTree.fromstring(response.text)
5921 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5922 if children_section is not None:
5923 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5924 if vCloud_extension_section is not None:
5925 vm_vcenter_info = {}
5926 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5927 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5928 if vmext is not None:
5929 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5930 parsed_response["vm_vcenter_info"] = vm_vcenter_info
5931
5932 except Exception as exp:
5933 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5934
5935 return parsed_response
5936
5937 def rollback_newvm(self, vapp_uuid, msg, exp_type="Genric"):
5938 """
5939 Method to delete vApp
5940 Args :
5941 vapp_uuid - vApp UUID
5942 msg - Error message to be logged
5943 exp_type : Exception type
5944 Returns:
5945 None
5946 """
5947 if vapp_uuid:
5948 self.delete_vminstance(vapp_uuid)
5949 else:
5950 msg = "No vApp ID"
5951 self.logger.error(msg)
5952 if exp_type == "Genric":
5953 raise vimconn.VimConnException(msg)
5954 elif exp_type == "NotFound":
5955 raise vimconn.VimConnNotFoundException(message=msg)
5956
5957 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5958 """
5959 Method to attach SRIOV adapters to VM
5960
5961 Args:
5962 vapp_uuid - uuid of vApp/VM
5963 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5964 vmname_andid - vmname
5965
5966 Returns:
5967 The status of add SRIOV adapter task , vm object and
5968 vcenter_conect object
5969 """
5970 vm_obj = None
5971 vcenter_conect, content = self.get_vcenter_content()
5972 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5973
5974 if vm_moref_id:
5975 try:
5976 no_of_sriov_devices = len(sriov_nets)
5977 if no_of_sriov_devices > 0:
5978 # Get VM and its host
5979 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5980 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5981 if host_obj and vm_obj:
5982 # get SRIOV devies from host on which vapp is currently installed
5983 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5984 no_of_sriov_devices,
5985 )
5986
5987 if len(avilable_sriov_devices) == 0:
5988 # find other hosts with active pci devices
5989 new_host_obj, avilable_sriov_devices = self.get_host_and_sriov_devices(
5990 content,
5991 no_of_sriov_devices,
5992 )
5993
5994 if new_host_obj is not None and len(avilable_sriov_devices) > 0:
5995 # Migrate vm to the host where SRIOV devices are available
5996 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5997 new_host_obj))
5998 task = self.relocate_vm(new_host_obj, vm_obj)
5999 if task is not None:
6000 result = self.wait_for_vcenter_task(task, vcenter_conect)
6001 self.logger.info("Migrate VM status: {}".format(result))
6002 host_obj = new_host_obj
6003 else:
6004 self.logger.info("Fail to migrate VM : {}".format(result))
6005 raise vimconn.VimConnNotFoundException(
6006 "Fail to migrate VM : {} to host {}".format(
6007 vmname_andid,
6008 new_host_obj)
6009 )
6010
6011 if (host_obj is not None and
6012 avilable_sriov_devices is not None and
6013 len(avilable_sriov_devices) > 0):
6014 # Add SRIOV devices one by one
6015 for sriov_net in sriov_nets:
6016 network_name = sriov_net.get('net_id')
6017 self.create_dvPort_group(network_name)
6018 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
6019 # add vlan ID ,Modify portgroup for vlan ID
6020 self.configure_vlanID(content, vcenter_conect, network_name)
6021
6022 task = self.add_sriov_to_vm(content,
6023 vm_obj,
6024 host_obj,
6025 network_name,
6026 avilable_sriov_devices[0]
6027 )
6028 if task:
6029 status = self.wait_for_vcenter_task(task, vcenter_conect)
6030 if status:
6031 self.logger.info("Added SRIOV {} to VM {}".format(
6032 no_of_sriov_devices,
6033 str(vm_obj)))
6034 else:
6035 self.logger.error("Fail to add SRIOV {} to VM {}".format(
6036 no_of_sriov_devices,
6037 str(vm_obj)))
6038 raise vimconn.VimConnUnexpectedResponse(
6039 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
6040 )
6041 return True, vm_obj, vcenter_conect
6042 else:
6043 self.logger.error("Currently there is no host with"
6044 " {} number of avaialble SRIOV "
6045 "VFs required for VM {}".format(
6046 no_of_sriov_devices,
6047 vmname_andid)
6048 )
6049 raise vimconn.VimConnNotFoundException(
6050 "Currently there is no host with {} "
6051 "number of avaialble SRIOV devices required for VM {}".format(
6052 no_of_sriov_devices,
6053 vmname_andid))
6054 else:
6055 self.logger.debug("No infromation about SRIOV devices {} ", sriov_nets)
6056
6057 except vmodl.MethodFault as error:
6058 self.logger.error("Error occurred while adding SRIOV {} ", error)
6059 return None, vm_obj, vcenter_conect
6060
6061 def get_sriov_devices(self, host, no_of_vfs):
6062 """
6063 Method to get the details of SRIOV devices on given host
6064 Args:
6065 host - vSphere host object
6066 no_of_vfs - number of VFs needed on host
6067
6068 Returns:
6069 array of SRIOV devices
6070 """
6071 sriovInfo = []
6072 if host:
6073 for device in host.config.pciPassthruInfo:
6074 if isinstance(device, vim.host.SriovInfo) and device.sriovActive:
6075 if device.numVirtualFunction >= no_of_vfs:
6076 sriovInfo.append(device)
6077 break
6078 return sriovInfo
6079
6080 def get_host_and_sriov_devices(self, content, no_of_vfs):
6081 """
6082 Method to get the details of SRIOV devices infromation on all hosts
6083
6084 Args:
6085 content - vSphere host object
6086 no_of_vfs - number of pci VFs needed on host
6087
6088 Returns:
6089 array of SRIOV devices and host object
6090 """
6091 host_obj = None
6092 sriov_device_objs = None
6093 try:
6094 if content:
6095 container = content.viewManager.CreateContainerView(content.rootFolder,
6096 [vim.HostSystem], True)
6097 for host in container.view:
6098 devices = self.get_sriov_devices(host, no_of_vfs)
6099 if devices:
6100 host_obj = host
6101 sriov_device_objs = devices
6102 break
6103 except Exception as exp:
6104 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6105
6106 return host_obj, sriov_device_objs
6107
6108 def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
6109 """
6110 Method to add SRIOV adapter to vm
6111
6112 Args:
6113 host_obj - vSphere host object
6114 vm_obj - vSphere vm object
6115 content - vCenter content object
6116 network_name - name of distributed virtaul portgroup
6117 sriov_device - SRIOV device info
6118
6119 Returns:
6120 task object
6121 """
6122 devices = []
6123 vnic_label = "sriov nic"
6124 try:
6125 dvs_portgr = self.get_dvport_group(network_name)
6126 network_name = dvs_portgr.name
6127 nic = vim.vm.device.VirtualDeviceSpec()
6128 # VM device
6129 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6130 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6131 nic.device.addressType = 'assigned'
6132 # nic.device.key = 13016
6133 nic.device.deviceInfo = vim.Description()
6134 nic.device.deviceInfo.label = vnic_label
6135 nic.device.deviceInfo.summary = network_name
6136 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6137
6138 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6139 nic.device.backing.deviceName = network_name
6140 nic.device.backing.useAutoDetect = False
6141 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6142 nic.device.connectable.startConnected = True
6143 nic.device.connectable.allowGuestControl = True
6144
6145 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6146 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6147 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6148
6149 devices.append(nic)
6150 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6151 task = vm_obj.ReconfigVM_Task(vmconf)
6152 return task
6153 except Exception as exp:
6154 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6155 return None
6156
6157 def create_dvPort_group(self, network_name):
6158 """
6159 Method to create disributed virtual portgroup
6160
6161 Args:
6162 network_name - name of network/portgroup
6163
6164 Returns:
6165 portgroup key
6166 """
6167 try:
6168 new_network_name = [network_name, '-', str(uuid.uuid4())]
6169 network_name = ''.join(new_network_name)
6170 vcenter_conect, content = self.get_vcenter_content()
6171
6172 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6173 if dv_switch:
6174 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6175 dv_pg_spec.name = network_name
6176
6177 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6178 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6179 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6180 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6181 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6182 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6183
6184 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6185 self.wait_for_vcenter_task(task, vcenter_conect)
6186
6187 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6188 if dvPort_group:
6189 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6190 return dvPort_group.key
6191 else:
6192 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6193
6194 except Exception as exp:
6195 self.logger.error("Error occurred while creating disributed virtaul port group {}"
6196 " : {}".format(network_name, exp))
6197 return None
6198
6199 def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
6200 """
6201 Method to reconfigure disributed virtual portgroup
6202
6203 Args:
6204 dvPort_group_name - name of disributed virtual portgroup
6205 content - vCenter content object
6206 config_info - disributed virtual portgroup configuration
6207
6208 Returns:
6209 task object
6210 """
6211 try:
6212 dvPort_group = self.get_dvport_group(dvPort_group_name)
6213 if dvPort_group:
6214 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6215 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6216 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6217 if "vlanID" in config_info:
6218 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6219 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6220
6221 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6222 return task
6223 else:
6224 return None
6225 except Exception as exp:
6226 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"
6227 " : {}".format(dvPort_group_name, exp))
6228 return None
6229
6230 def destroy_dvport_group(self, dvPort_group_name):
6231 """
6232 Method to destroy disributed virtual portgroup
6233
6234 Args:
6235 network_name - name of network/portgroup
6236
6237 Returns:
6238 True if portgroup successfully got deleted else false
6239 """
6240 vcenter_conect, _ = self.get_vcenter_content()
6241 try:
6242 status = None
6243 dvPort_group = self.get_dvport_group(dvPort_group_name)
6244 if dvPort_group:
6245 task = dvPort_group.Destroy_Task()
6246 status = self.wait_for_vcenter_task(task, vcenter_conect)
6247 return status
6248 except vmodl.MethodFault as exp:
6249 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6250 exp, dvPort_group_name))
6251 return None
6252
6253 def get_dvport_group(self, dvPort_group_name):
6254 """
6255 Method to get disributed virtual portgroup
6256
6257 Args:
6258 network_name - name of network/portgroup
6259
6260 Returns:
6261 portgroup object
6262 """
6263 _, content = self.get_vcenter_content()
6264 dvPort_group = None
6265 try:
6266 container = content.viewManager.CreateContainerView(content.rootFolder,
6267 [vim.dvs.DistributedVirtualPortgroup], True)
6268 for item in container.view:
6269 if item.key == dvPort_group_name:
6270 dvPort_group = item
6271 break
6272 return dvPort_group
6273 except vmodl.MethodFault as exp:
6274 self.logger.error("Caught vmodl fault {} for disributed virtual port group {}".format(
6275 exp, dvPort_group_name))
6276 return None
6277
6278 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6279 """
6280 Method to get disributed virtual portgroup vlanID
6281
6282 Args:
6283 network_name - name of network/portgroup
6284
6285 Returns:
6286 vlan ID
6287 """
6288 vlanId = None
6289 try:
6290 dvPort_group = self.get_dvport_group(dvPort_group_name)
6291 if dvPort_group:
6292 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6293 except vmodl.MethodFault as exp:
6294 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6295 exp, dvPort_group_name))
6296 return vlanId
6297
6298 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6299 """
6300 Method to configure vlanID in disributed virtual portgroup vlanID
6301
6302 Args:
6303 network_name - name of network/portgroup
6304
6305 Returns:
6306 None
6307 """
6308 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6309 if vlanID == 0:
6310 # configure vlanID
6311 vlanID = self.genrate_vlanID(dvPort_group_name)
6312 config = {"vlanID": vlanID}
6313 task = self.reconfig_portgroup(content, dvPort_group_name,
6314 config_info=config)
6315 if task:
6316 status = self.wait_for_vcenter_task(task, vcenter_conect)
6317 if status:
6318 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6319 dvPort_group_name, vlanID))
6320 else:
6321 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6322 dvPort_group_name, vlanID))
6323
6324 def genrate_vlanID(self, network_name):
6325 """
6326 Method to get unused vlanID
6327 Args:
6328 network_name - name of network/portgroup
6329 Returns:
6330 vlanID
6331 """
6332 vlan_id = None
6333 used_ids = []
6334 if self.config.get('vlanID_range') is None:
6335 raise vimconn.VimConnConflictException("You must provide a 'vlanID_range' "
6336 "at config value before creating sriov network with vlan tag")
6337 if "used_vlanIDs" not in self.persistent_info:
6338 self.persistent_info["used_vlanIDs"] = {}
6339 else:
6340 used_ids = list(self.persistent_info["used_vlanIDs"].values())
6341
6342 for vlanID_range in self.config.get('vlanID_range'):
6343 start_vlanid, end_vlanid = vlanID_range.split("-")
6344 if start_vlanid > end_vlanid:
6345 raise vimconn.VimConnConflictException("Invalid vlan ID range {}".format(
6346 vlanID_range))
6347
6348 for vid in range(int(start_vlanid), int(end_vlanid) + 1):
6349 if vid not in used_ids:
6350 vlan_id = vid
6351 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6352 return vlan_id
6353 if vlan_id is None:
6354 raise vimconn.VimConnConflictException("All Vlan IDs are in use")
6355
6356 def get_obj(self, content, vimtype, name):
6357 """
6358 Get the vsphere object associated with a given text name
6359 """
6360 obj = None
6361 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6362 for item in container.view:
6363 if item.name == name:
6364 obj = item
6365 break
6366 return obj
6367
6368 def insert_media_to_vm(self, vapp, image_id):
6369 """
6370 Method to insert media CD-ROM (ISO image) from catalog to vm.
6371 vapp - vapp object to get vm id
6372 Image_id - image id for cdrom to be inerted to vm
6373 """
6374 # create connection object
6375 vca = self.connect()
6376 try:
6377 # fetching catalog details
6378 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6379 if vca._session:
6380 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
6381 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6382 response = self.perform_request(req_type='GET',
6383 url=rest_url,
6384 headers=headers)
6385
6386 if response.status_code != 200:
6387 self.logger.error("REST call {} failed reason : {}"
6388 "status code : {}".format(url_rest_call,
6389 response.text,
6390 response.status_code))
6391 raise vimconn.VimConnException("insert_media_to_vm(): Failed to get "
6392 "catalog details")
6393 # searching iso name and id
6394 iso_name, media_id = self.get_media_details(vca, response.text)
6395
6396 if iso_name and media_id:
6397 data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6398 <ns6:MediaInsertOrEjectParams
6399 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6400 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6401 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6402 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6403 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6404 xmlns:ns7="http://www.vmware.com/schema/ovf"
6405 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6406 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6407 <ns6:Media
6408 type="application/vnd.vmware.vcloud.media+xml"
6409 name="{}"
6410 id="urn:vcloud:media:{}"
6411 href="https://{}/api/media/{}"/>
6412 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6413 self.url, media_id)
6414
6415 for vms in vapp.get_all_vms():
6416 vm_id = vms.get('id').split(':')[-1]
6417
6418 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6419 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url, vm_id)
6420
6421 response = self.perform_request(req_type='POST',
6422 url=rest_url,
6423 data=data,
6424 headers=headers)
6425
6426 if response.status_code != 202:
6427 error_msg = ("insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. "
6428 "Status code {}".format(response.text, response.status_code))
6429 self.logger.error(error_msg)
6430 raise vimconn.VimConnException(error_msg)
6431 else:
6432 task = self.get_task_from_response(response.text)
6433 result = self.client.get_task_monitor().wait_for_success(task=task)
6434 if result.get('status') == 'success':
6435 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"
6436 " image to vm {}".format(vm_id))
6437
6438 except Exception as exp:
6439 self.logger.error("insert_media_to_vm() : exception occurred "
6440 "while inserting media CD-ROM")
6441 raise vimconn.VimConnException(message=exp)
6442
6443 def get_media_details(self, vca, content):
6444 """
6445 Method to get catalog item details
6446 vca - connection object
6447 content - Catalog details
6448 Return - Media name, media id
6449 """
6450 cataloghref_list = []
6451 try:
6452 if content:
6453 vm_list_xmlroot = XmlElementTree.fromstring(content)
6454 for child in vm_list_xmlroot.iter():
6455 if 'CatalogItem' in child.tag:
6456 cataloghref_list.append(child.attrib.get('href'))
6457 if cataloghref_list is not None:
6458 for href in cataloghref_list:
6459 if href:
6460 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
6461 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6462 response = self.perform_request(req_type='GET',
6463 url=href,
6464 headers=headers)
6465 if response.status_code != 200:
6466 self.logger.error("REST call {} failed reason : {}"
6467 "status code : {}".format(href,
6468 response.text,
6469 response.status_code))
6470 raise vimconn.VimConnException("get_media_details : Failed to get "
6471 "catalogitem details")
6472 list_xmlroot = XmlElementTree.fromstring(response.text)
6473 for child in list_xmlroot.iter():
6474 if 'Entity' in child.tag:
6475 if 'media' in child.attrib.get('href'):
6476 name = child.attrib.get('name')
6477 media_id = child.attrib.get('href').split('/').pop()
6478 return name, media_id
6479 else:
6480 self.logger.debug("Media name and id not found")
6481 return False, False
6482 except Exception as exp:
6483 self.logger.error("get_media_details : exception occurred "
6484 "getting media details")
6485 raise vimconn.VimConnException(message=exp)
6486
6487 def retry_rest(self, method, url, add_headers=None, data=None):
6488 """ Method to get Token & retry respective REST request
6489 Args:
6490 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6491 url - request url to be used
6492 add_headers - Additional headers (optional)
6493 data - Request payload data to be passed in request
6494 Returns:
6495 response - Response of request
6496 """
6497 response = None
6498
6499 # Get token
6500 self.get_token()
6501
6502 if self.client._session:
6503 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
6504 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6505
6506 if add_headers:
6507 headers.update(add_headers)
6508
6509 if method == 'GET':
6510 response = self.perform_request(req_type='GET',
6511 url=url,
6512 headers=headers)
6513 elif method == 'PUT':
6514 response = self.perform_request(req_type='PUT',
6515 url=url,
6516 headers=headers,
6517 data=data)
6518 elif method == 'POST':
6519 response = self.perform_request(req_type='POST',
6520 url=url,
6521 headers=headers,
6522 data=data)
6523 elif method == 'DELETE':
6524 response = self.perform_request(req_type='DELETE',
6525 url=url,
6526 headers=headers)
6527 return response
6528
6529 def get_token(self):
6530 """ Generate a new token if expired
6531
6532 Returns:
6533 The return client object that letter can be used to connect to vCloud director as admin for VDC
6534 """
6535 self.client = self.connect()
6536
6537 def get_vdc_details(self):
6538 """ Get VDC details using pyVcloud Lib
6539
6540 Returns org and vdc object
6541 """
6542 vdc = None
6543 try:
6544 org = Org(self.client, resource=self.client.get_org())
6545 vdc = org.get_vdc(self.tenant_name)
6546 except Exception as e:
6547 # pyvcloud not giving a specific exception, Refresh nevertheless
6548 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6549
6550 # Retry once, if failed by refreshing token
6551 if vdc is None:
6552 self.get_token()
6553 org = Org(self.client, resource=self.client.get_org())
6554 vdc = org.get_vdc(self.tenant_name)
6555
6556 return org, vdc
6557
6558 def perform_request(self, req_type, url, headers=None, data=None):
6559 """Perform the POST/PUT/GET/DELETE request."""
6560
6561 # Log REST request details
6562 self.log_request(req_type, url=url, headers=headers, data=data)
6563 # perform request and return its result
6564 if req_type == 'GET':
6565 response = requests.get(url=url,
6566 headers=headers,
6567 verify=False)
6568 elif req_type == 'PUT':
6569 response = requests.put(url=url,
6570 headers=headers,
6571 data=data,
6572 verify=False)
6573 elif req_type == 'POST':
6574 response = requests.post(url=url,
6575 headers=headers,
6576 data=data,
6577 verify=False)
6578 elif req_type == 'DELETE':
6579 response = requests.delete(url=url,
6580 headers=headers,
6581 verify=False)
6582 # Log the REST response
6583 self.log_response(response)
6584
6585 return response
6586
6587 def log_request(self, req_type, url=None, headers=None, data=None):
6588 """Logs REST request details"""
6589
6590 if req_type is not None:
6591 self.logger.debug("Request type: {}".format(req_type))
6592
6593 if url is not None:
6594 self.logger.debug("Request url: {}".format(url))
6595
6596 if headers is not None:
6597 for header in headers:
6598 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6599
6600 if data is not None:
6601 self.logger.debug("Request data: {}".format(data))
6602
6603 def log_response(self, response):
6604 """Logs REST response details"""
6605
6606 self.logger.debug("Response status code: {} ".format(response.status_code))
6607
6608 def get_task_from_response(self, content):
6609 """
6610 content - API response.text(response.text)
6611 return task object
6612 """
6613 xmlroot = XmlElementTree.fromstring(content)
6614 if xmlroot.tag.split('}')[1] == "Task":
6615 return xmlroot
6616 else:
6617 for ele in xmlroot:
6618 if ele.tag.split("}")[1] == "Tasks":
6619 task = ele[0]
6620 break
6621 return task
6622
6623 def power_on_vapp(self, vapp_id, vapp_name):
6624 """
6625 vapp_id - vApp uuid
6626 vapp_name - vAapp name
6627 return - Task object
6628 """
6629 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
6630 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6631
6632 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6633 vapp_id)
6634 response = self.perform_request(req_type='POST',
6635 url=poweron_href,
6636 headers=headers)
6637
6638 if response.status_code != 202:
6639 self.logger.error("REST call {} failed reason : {}"
6640 "status code : {} ".format(poweron_href,
6641 response.text,
6642 response.status_code))
6643 raise vimconn.VimConnException("power_on_vapp() : Failed to power on "
6644 "vApp {}".format(vapp_name))
6645 else:
6646 poweron_task = self.get_task_from_response(response.text)
6647 return poweron_task