Fixes content conversion after python3
[osm/RO.git] / RO-VIM-vmware / osm_rovim_vmware / vimconn_vmware.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2016-2019 VMware Inc.
5 # This file is part of ETSI OSM
6 # All Rights Reserved.
7 #
8 # Licensed under the Apache License, Version 2.0 (the "License"); you may
9 # not use this file except in compliance with the License. You may obtain
10 # a copy of the License at
11 #
12 # http://www.apache.org/licenses/LICENSE-2.0
13 #
14 # Unless required by applicable law or agreed to in writing, software
15 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 # License for the specific language governing permissions and limitations
18 # under the License.
19 #
20 # For those usages not covered by the Apache License, Version 2.0 please
21 # contact: osslegalrouting@vmware.com
22 ##
23
24 """
25 vimconn_vmware implementation an Abstract class in order to interact with VMware vCloud Director.
26 mbayramov@vmware.com
27 """
28 from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
29
30 from osm_ro import vimconn
31 import os
32 import shutil
33 import subprocess
34 import tempfile
35 import traceback
36 import requests
37 import ssl
38 import atexit
39
40 from pyVmomi import vim, vmodl
41 from pyVim.connect import SmartConnect, Disconnect
42
43 from xml.etree import ElementTree as XmlElementTree
44 from lxml import etree as lxmlElementTree
45
46 import yaml
47 from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
48 from pyvcloud.vcd.vdc import VDC
49 from pyvcloud.vcd.org import Org
50 import re
51 from pyvcloud.vcd.vapp import VApp
52 from xml.sax.saxutils import escape
53 import logging
54 import json
55 import time
56 import uuid
57 # import httplib
58 #For python3
59 #import http.client # TODO py3 check
60 import hashlib
61 import socket
62 import struct
63 import netaddr
64 import random
65
66 # global variable for vcd connector type
67 STANDALONE = 'standalone'
68
69 # key for flavor dicts
70 FLAVOR_RAM_KEY = 'ram'
71 FLAVOR_VCPUS_KEY = 'vcpus'
72 FLAVOR_DISK_KEY = 'disk'
73 DEFAULT_IP_PROFILE = {'dhcp_count':50,
74 'dhcp_enabled':True,
75 'ip_version':"IPv4"
76 }
77 # global variable for wait time
78 INTERVAL_TIME = 5
79 MAX_WAIT_TIME = 1800
80
81 API_VERSION = '27.0'
82
83 __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
84 __date__ = "$09-Mar-2018 11:09:29$"
85 __version__ = '0.2'
86
87 # -1: "Could not be created",
88 # 0: "Unresolved",
89 # 1: "Resolved",
90 # 2: "Deployed",
91 # 3: "Suspended",
92 # 4: "Powered on",
93 # 5: "Waiting for user input",
94 # 6: "Unknown state",
95 # 7: "Unrecognized state",
96 # 8: "Powered off",
97 # 9: "Inconsistent state",
98 # 10: "Children do not all have the same status",
99 # 11: "Upload initiated, OVF descriptor pending",
100 # 12: "Upload initiated, copying contents",
101 # 13: "Upload initiated , disk contents pending",
102 # 14: "Upload has been quarantined",
103 # 15: "Upload quarantine period has expired"
104
105 # mapping vCD status to MANO
106 vcdStatusCode2manoFormat = {4: 'ACTIVE',
107 7: 'PAUSED',
108 3: 'SUSPENDED',
109 8: 'INACTIVE',
110 12: 'BUILD',
111 -1: 'ERROR',
112 14: 'DELETED'}
113
114 #
115 netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
116 'ERROR': 'ERROR', 'DELETED': 'DELETED'
117 }
118
119
120 class vimconnector(vimconn.vimconnector):
121 # dict used to store flavor in memory
122 flavorlist = {}
123
124 def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
125 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
126 """
127 Constructor create vmware connector to vCloud director.
128
129 By default construct doesn't validate connection state. So client can create object with None arguments.
130 If client specified username , password and host and VDC name. Connector initialize other missing attributes.
131
132 a) It initialize organization UUID
133 b) Initialize tenant_id/vdc ID. (This information derived from tenant name)
134
135 Args:
136 uuid - is organization uuid.
137 name - is organization name that must be presented in vCloud director.
138 tenant_id - is VDC uuid it must be presented in vCloud director
139 tenant_name - is VDC name.
140 url - is hostname or ip address of vCloud director
141 url_admin - same as above.
142 user - is user that administrator for organization. Caller must make sure that
143 username has right privileges.
144
145 password - is password for a user.
146
147 VMware connector also requires PVDC administrative privileges and separate account.
148 This variables must be passed via config argument dict contains keys
149
150 dict['admin_username']
151 dict['admin_password']
152 config - Provide NSX and vCenter information
153
154 Returns:
155 Nothing.
156 """
157
158 vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
159 url_admin, user, passwd, log_level, config)
160
161 self.logger = logging.getLogger('openmano.vim.vmware')
162 self.logger.setLevel(10)
163 self.persistent_info = persistent_info
164
165 self.name = name
166 self.id = uuid
167 self.url = url
168 self.url_admin = url_admin
169 self.tenant_id = tenant_id
170 self.tenant_name = tenant_name
171 self.user = user
172 self.passwd = passwd
173 self.config = config
174 self.admin_password = None
175 self.admin_user = None
176 self.org_name = ""
177 self.nsx_manager = None
178 self.nsx_user = None
179 self.nsx_password = None
180 self.availability_zone = None
181
182 # Disable warnings from self-signed certificates.
183 requests.packages.urllib3.disable_warnings()
184
185 if tenant_name is not None:
186 orgnameandtenant = tenant_name.split(":")
187 if len(orgnameandtenant) == 2:
188 self.tenant_name = orgnameandtenant[1]
189 self.org_name = orgnameandtenant[0]
190 else:
191 self.tenant_name = tenant_name
192 if "orgname" in config:
193 self.org_name = config['orgname']
194
195 if log_level:
196 self.logger.setLevel(getattr(logging, log_level))
197
198 try:
199 self.admin_user = config['admin_username']
200 self.admin_password = config['admin_password']
201 except KeyError:
202 raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
203
204 try:
205 self.nsx_manager = config['nsx_manager']
206 self.nsx_user = config['nsx_user']
207 self.nsx_password = config['nsx_password']
208 except KeyError:
209 raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
210
211 self.vcenter_ip = config.get("vcenter_ip", None)
212 self.vcenter_port = config.get("vcenter_port", None)
213 self.vcenter_user = config.get("vcenter_user", None)
214 self.vcenter_password = config.get("vcenter_password", None)
215
216 #Set availability zone for Affinity rules
217 self.availability_zone = self.set_availability_zones()
218
219 # ############# Stub code for SRIOV #################
220 # try:
221 # self.dvs_name = config['dv_switch_name']
222 # except KeyError:
223 # raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
224 #
225 # self.vlanID_range = config.get("vlanID_range", None)
226
227 self.org_uuid = None
228 self.client = None
229
230 if not url:
231 raise vimconn.vimconnException('url param can not be NoneType')
232
233 if not self.url_admin: # try to use normal url
234 self.url_admin = self.url
235
236 logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
237 self.tenant_id, self.tenant_name))
238 logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
239 logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
240
241 # initialize organization
242 if self.user is not None and self.passwd is not None and self.url:
243 self.init_organization()
244
245 def __getitem__(self, index):
246 if index == 'name':
247 return self.name
248 if index == 'tenant_id':
249 return self.tenant_id
250 if index == 'tenant_name':
251 return self.tenant_name
252 elif index == 'id':
253 return self.id
254 elif index == 'org_name':
255 return self.org_name
256 elif index == 'org_uuid':
257 return self.org_uuid
258 elif index == 'user':
259 return self.user
260 elif index == 'passwd':
261 return self.passwd
262 elif index == 'url':
263 return self.url
264 elif index == 'url_admin':
265 return self.url_admin
266 elif index == "config":
267 return self.config
268 else:
269 raise KeyError("Invalid key '{}'".format(index))
270
271 def __setitem__(self, index, value):
272 if index == 'name':
273 self.name = value
274 if index == 'tenant_id':
275 self.tenant_id = value
276 if index == 'tenant_name':
277 self.tenant_name = value
278 elif index == 'id':
279 self.id = value
280 elif index == 'org_name':
281 self.org_name = value
282 elif index == 'org_uuid':
283 self.org_uuid = value
284 elif index == 'user':
285 self.user = value
286 elif index == 'passwd':
287 self.passwd = value
288 elif index == 'url':
289 self.url = value
290 elif index == 'url_admin':
291 self.url_admin = value
292 else:
293 raise KeyError("Invalid key '{}'".format(index))
294
295 def connect_as_admin(self):
296 """ Method connect as pvdc admin user to vCloud director.
297 There are certain action that can be done only by provider vdc admin user.
298 Organization creation / provider network creation etc.
299
300 Returns:
301 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
302 """
303 self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
304
305 try:
306 host = self.url
307 org = 'System'
308 client_as_admin = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
309 client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
310 except Exception as e:
311 raise vimconn.vimconnException(
312 "Can't connect to vCloud director as: {} with exception {}".format(self.admin_user, e))
313
314 return client_as_admin
315
316 def connect(self):
317 """ Method connect as normal user to vCloud director.
318
319 Returns:
320 The return client object that latter can be used to connect to vCloud director as admin for VDC
321 """
322 try:
323 self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
324 self.user,
325 self.org_name))
326 host = self.url
327 client = Client(host, verify_ssl_certs=False, api_version=API_VERSION)
328 client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
329 except Exception as e:
330 raise vimconn.vimconnConnectionException("Can't connect to vCloud director org: "
331 "{} as user {} with exception: {}".format(self.org_name,
332 self.user,
333 e))
334
335 return client
336
337 def init_organization(self):
338 """ Method initialize organization UUID and VDC parameters.
339
340 At bare minimum client must provide organization name that present in vCloud director and VDC.
341
342 The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
343 The Org - UUID will be initialized at the run time if data center present in vCloud director.
344
345 Returns:
346 The return vca object that letter can be used to connect to vcloud direct as admin
347 """
348 client = self.connect()
349 if not client:
350 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
351
352 self.client = client
353 try:
354 if self.org_uuid is None:
355 org_list = client.get_org_list()
356 for org in org_list.Org:
357 # we set org UUID at the init phase but we can do it only when we have valid credential.
358 if org.get('name') == self.org_name:
359 self.org_uuid = org.get('href').split('/')[-1]
360 self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
361 break
362 else:
363 raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
364
365 # if well good we require for org details
366 org_details_dict = self.get_org(org_uuid=self.org_uuid)
367
368 # we have two case if we want to initialize VDC ID or VDC name at run time
369 # tenant_name provided but no tenant id
370 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
371 vdcs_dict = org_details_dict['vdcs']
372 for vdc in vdcs_dict:
373 if vdcs_dict[vdc] == self.tenant_name:
374 self.tenant_id = vdc
375 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
376 self.org_name))
377 break
378 else:
379 raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
380 # case two we have tenant_id but we don't have tenant name so we find and set it.
381 if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
382 vdcs_dict = org_details_dict['vdcs']
383 for vdc in vdcs_dict:
384 if vdc == self.tenant_id:
385 self.tenant_name = vdcs_dict[vdc]
386 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
387 self.org_name))
388 break
389 else:
390 raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
391 self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
392 except Exception as e:
393 self.logger.debug("Failed initialize organization UUID for org {}: {}".format(self.org_name), e)
394 self.logger.debug(traceback.format_exc())
395 self.org_uuid = None
396
397 def new_tenant(self, tenant_name=None, tenant_description=None):
398 """ Method adds a new tenant to VIM with this name.
399 This action requires access to create VDC action in vCloud director.
400
401 Args:
402 tenant_name is tenant_name to be created.
403 tenant_description not used for this call
404
405 Return:
406 returns the tenant identifier in UUID format.
407 If action is failed method will throw vimconn.vimconnException method
408 """
409 vdc_task = self.create_vdc(vdc_name=tenant_name)
410 if vdc_task is not None:
411 vdc_uuid, value = vdc_task.popitem()
412 self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
413 return vdc_uuid
414 else:
415 raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
416
417 def delete_tenant(self, tenant_id=None):
418 """ Delete a tenant from VIM
419 Args:
420 tenant_id is tenant_id to be deleted.
421
422 Return:
423 returns the tenant identifier in UUID format.
424 If action is failed method will throw exception
425 """
426 vca = self.connect_as_admin()
427 if not vca:
428 raise vimconn.vimconnConnectionException("Failed to connect vCD")
429
430 if tenant_id is not None:
431 if vca._session:
432 #Get OrgVDC
433 url_list = [self.url, '/api/vdc/', tenant_id]
434 orgvdc_herf = ''.join(url_list)
435
436 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
437 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
438 response = self.perform_request(req_type='GET',
439 url=orgvdc_herf,
440 headers=headers)
441
442 if response.status_code != requests.codes.ok:
443 self.logger.debug("delete_tenant():GET REST API call {} failed. "\
444 "Return status code {}".format(orgvdc_herf,
445 response.status_code))
446 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
447
448 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
449 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
450 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
451 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
452 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
453
454 response = self.perform_request(req_type='DELETE',
455 url=vdc_remove_href,
456 headers=headers)
457
458 if response.status_code == 202:
459 time.sleep(5)
460 return tenant_id
461 else:
462 self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
463 "Return status code {}".format(vdc_remove_href,
464 response.status_code))
465 raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
466 else:
467 self.logger.debug("delete_tenant():Incorrect tenant ID {}".format(tenant_id))
468 raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
469
470
471 def get_tenant_list(self, filter_dict={}):
472 """Obtain tenants of VIM
473 filter_dict can contain the following keys:
474 name: filter by tenant name
475 id: filter by tenant uuid/id
476 <other VIM specific>
477 Returns the tenant list of dictionaries:
478 [{'name':'<name>, 'id':'<id>, ...}, ...]
479
480 """
481 org_dict = self.get_org(self.org_uuid)
482 vdcs_dict = org_dict['vdcs']
483
484 vdclist = []
485 try:
486 for k in vdcs_dict:
487 entry = {'name': vdcs_dict[k], 'id': k}
488 # if caller didn't specify dictionary we return all tenants.
489 if filter_dict is not None and filter_dict:
490 filtered_entry = entry.copy()
491 filtered_dict = set(entry.keys()) - set(filter_dict)
492 for unwanted_key in filtered_dict: del entry[unwanted_key]
493 if filter_dict == entry:
494 vdclist.append(filtered_entry)
495 else:
496 vdclist.append(entry)
497 except:
498 self.logger.debug("Error in get_tenant_list()")
499 self.logger.debug(traceback.format_exc())
500 raise vimconn.vimconnException("Incorrect state. {}")
501
502 return vdclist
503
504 def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
505 """Adds a tenant network to VIM
506 Params:
507 'net_name': name of the network
508 'net_type': one of:
509 'bridge': overlay isolated network
510 'data': underlay E-LAN network for Passthrough and SRIOV interfaces
511 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces.
512 'ip_profile': is a dict containing the IP parameters of the network
513 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
514 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
515 'gateway_address': (Optional) ip_schema, that is X.X.X.X
516 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
517 'dhcp_enabled': True or False
518 'dhcp_start_address': ip_schema, first IP to grant
519 'dhcp_count': number of IPs to grant.
520 'shared': if this network can be seen/use by other tenants/organization
521 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
522 Returns a tuple with the network identifier and created_items, or raises an exception on error
523 created_items can be None or a dictionary where this method can include key-values that will be passed to
524 the method delete_network. Can be used to store created segments, created l2gw connections, etc.
525 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
526 as not present.
527 """
528
529 self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
530 .format(net_name, net_type, ip_profile, shared, provider_network_profile))
531 vlan = None
532 if provider_network_profile:
533 vlan = provider_network_profile.get("segmentation-id")
534
535 created_items = {}
536 isshared = 'false'
537 if shared:
538 isshared = 'true'
539
540 # ############# Stub code for SRIOV #################
541 # if net_type == "data" or net_type == "ptp":
542 # if self.config.get('dv_switch_name') == None:
543 # raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
544 # network_uuid = self.create_dvPort_group(net_name)
545 parent_network_uuid = None
546
547 if provider_network_profile is not None:
548 for k, v in provider_network_profile.items():
549 if k == 'physical_network':
550 parent_network_uuid = self.get_physical_network_by_name(v)
551
552 network_uuid = self.create_network(network_name=net_name, net_type=net_type,
553 ip_profile=ip_profile, isshared=isshared,
554 parent_network_uuid=parent_network_uuid)
555 if network_uuid is not None:
556 return network_uuid, created_items
557 else:
558 raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
559
560 def get_vcd_network_list(self):
561 """ Method available organization for a logged in tenant
562
563 Returns:
564 The return vca object that letter can be used to connect to vcloud direct as admin
565 """
566
567 self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
568
569 if not self.tenant_name:
570 raise vimconn.vimconnConnectionException("Tenant name is empty.")
571
572 org, vdc = self.get_vdc_details()
573 if vdc is None:
574 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
575
576 vdc_uuid = vdc.get('id').split(":")[3]
577 if self.client._session:
578 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
579 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
580 response = self.perform_request(req_type='GET',
581 url=vdc.get('href'),
582 headers=headers)
583 if response.status_code != 200:
584 self.logger.error("Failed to get vdc content")
585 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
586 else:
587 content = XmlElementTree.fromstring(response.text)
588
589 network_list = []
590 try:
591 for item in content:
592 if item.tag.split('}')[-1] == 'AvailableNetworks':
593 for net in item:
594 response = self.perform_request(req_type='GET',
595 url=net.get('href'),
596 headers=headers)
597
598 if response.status_code != 200:
599 self.logger.error("Failed to get network content")
600 raise vimconn.vimconnNotFoundException("Failed to get network content")
601 else:
602 net_details = XmlElementTree.fromstring(response.text)
603
604 filter_dict = {}
605 net_uuid = net_details.get('id').split(":")
606 if len(net_uuid) != 4:
607 continue
608 else:
609 net_uuid = net_uuid[3]
610 # create dict entry
611 self.logger.debug("get_vcd_network_list(): Adding network {} "
612 "to a list vcd id {} network {}".format(net_uuid,
613 vdc_uuid,
614 net_details.get('name')))
615 filter_dict["name"] = net_details.get('name')
616 filter_dict["id"] = net_uuid
617 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
618 shared = True
619 else:
620 shared = False
621 filter_dict["shared"] = shared
622 filter_dict["tenant_id"] = vdc_uuid
623 if int(net_details.get('status')) == 1:
624 filter_dict["admin_state_up"] = True
625 else:
626 filter_dict["admin_state_up"] = False
627 filter_dict["status"] = "ACTIVE"
628 filter_dict["type"] = "bridge"
629 network_list.append(filter_dict)
630 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
631 except:
632 self.logger.debug("Error in get_vcd_network_list", exc_info=True)
633 pass
634
635 self.logger.debug("get_vcd_network_list returning {}".format(network_list))
636 return network_list
637
638 def get_network_list(self, filter_dict={}):
639 """Obtain tenant networks of VIM
640 Filter_dict can be:
641 name: network name OR/AND
642 id: network uuid OR/AND
643 shared: boolean OR/AND
644 tenant_id: tenant OR/AND
645 admin_state_up: boolean
646 status: 'ACTIVE'
647
648 [{key : value , key : value}]
649
650 Returns the network list of dictionaries:
651 [{<the fields at Filter_dict plus some VIM specific>}, ...]
652 List can be empty
653 """
654
655 self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
656
657 if not self.tenant_name:
658 raise vimconn.vimconnConnectionException("Tenant name is empty.")
659
660 org, vdc = self.get_vdc_details()
661 if vdc is None:
662 raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
663
664 try:
665 vdcid = vdc.get('id').split(":")[3]
666
667 if self.client._session:
668 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
669 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
670 response = self.perform_request(req_type='GET',
671 url=vdc.get('href'),
672 headers=headers)
673 if response.status_code != 200:
674 self.logger.error("Failed to get vdc content")
675 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
676 else:
677 content = XmlElementTree.fromstring(response.text)
678
679 network_list = []
680 for item in content:
681 if item.tag.split('}')[-1] == 'AvailableNetworks':
682 for net in item:
683 response = self.perform_request(req_type='GET',
684 url=net.get('href'),
685 headers=headers)
686
687 if response.status_code != 200:
688 self.logger.error("Failed to get network content")
689 raise vimconn.vimconnNotFoundException("Failed to get network content")
690 else:
691 net_details = XmlElementTree.fromstring(response.text)
692
693 filter_entry = {}
694 net_uuid = net_details.get('id').split(":")
695 if len(net_uuid) != 4:
696 continue
697 else:
698 net_uuid = net_uuid[3]
699 # create dict entry
700 self.logger.debug("get_network_list(): Adding net {}"
701 " to a list vcd id {} network {}".format(net_uuid,
702 vdcid,
703 net_details.get('name')))
704 filter_entry["name"] = net_details.get('name')
705 filter_entry["id"] = net_uuid
706 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
707 shared = True
708 else:
709 shared = False
710 filter_entry["shared"] = shared
711 filter_entry["tenant_id"] = vdcid
712 if int(net_details.get('status')) == 1:
713 filter_entry["admin_state_up"] = True
714 else:
715 filter_entry["admin_state_up"] = False
716 filter_entry["status"] = "ACTIVE"
717 filter_entry["type"] = "bridge"
718 filtered_entry = filter_entry.copy()
719
720 if filter_dict is not None and filter_dict:
721 # we remove all the key : value we don't care and match only
722 # respected field
723 filtered_dict = set(filter_entry.keys()) - set(filter_dict)
724 for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
725 if filter_dict == filter_entry:
726 network_list.append(filtered_entry)
727 else:
728 network_list.append(filtered_entry)
729 except Exception as e:
730 self.logger.debug("Error in get_network_list",exc_info=True)
731 if isinstance(e, vimconn.vimconnException):
732 raise
733 else:
734 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
735
736 self.logger.debug("Returning {}".format(network_list))
737 return network_list
738
739 def get_network(self, net_id):
740 """Method obtains network details of net_id VIM network
741 Return a dict with the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
742
743 try:
744 org, vdc = self.get_vdc_details()
745 vdc_id = vdc.get('id').split(":")[3]
746 if self.client._session:
747 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
748 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
749 response = self.perform_request(req_type='GET',
750 url=vdc.get('href'),
751 headers=headers)
752 if response.status_code != 200:
753 self.logger.error("Failed to get vdc content")
754 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
755 else:
756 content = XmlElementTree.fromstring(response.text)
757
758 filter_dict = {}
759
760 for item in content:
761 if item.tag.split('}')[-1] == 'AvailableNetworks':
762 for net in item:
763 response = self.perform_request(req_type='GET',
764 url=net.get('href'),
765 headers=headers)
766
767 if response.status_code != 200:
768 self.logger.error("Failed to get network content")
769 raise vimconn.vimconnNotFoundException("Failed to get network content")
770 else:
771 net_details = XmlElementTree.fromstring(response.text)
772
773 vdc_network_id = net_details.get('id').split(":")
774 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
775 filter_dict["name"] = net_details.get('name')
776 filter_dict["id"] = vdc_network_id[3]
777 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
778 shared = True
779 else:
780 shared = False
781 filter_dict["shared"] = shared
782 filter_dict["tenant_id"] = vdc_id
783 if int(net_details.get('status')) == 1:
784 filter_dict["admin_state_up"] = True
785 else:
786 filter_dict["admin_state_up"] = False
787 filter_dict["status"] = "ACTIVE"
788 filter_dict["type"] = "bridge"
789 self.logger.debug("Returning {}".format(filter_dict))
790 return filter_dict
791 else:
792 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
793 except Exception as e:
794 self.logger.debug("Error in get_network")
795 self.logger.debug(traceback.format_exc())
796 if isinstance(e, vimconn.vimconnException):
797 raise
798 else:
799 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
800
801 return filter_dict
802
803 def delete_network(self, net_id, created_items=None):
804 """
805 Removes a tenant network from VIM and its associated elements
806 :param net_id: VIM identifier of the network, provided by method new_network
807 :param created_items: dictionary with extra items to be deleted. provided by method new_network
808 Returns the network identifier or raises an exception upon error or when network is not found
809 """
810
811 # ############# Stub code for SRIOV #################
812 # dvport_group = self.get_dvport_group(net_id)
813 # if dvport_group:
814 # #delete portgroup
815 # status = self.destroy_dvport_group(net_id)
816 # if status:
817 # # Remove vlanID from persistent info
818 # if net_id in self.persistent_info["used_vlanIDs"]:
819 # del self.persistent_info["used_vlanIDs"][net_id]
820 #
821 # return net_id
822
823 vcd_network = self.get_vcd_network(network_uuid=net_id)
824 if vcd_network is not None and vcd_network:
825 if self.delete_network_action(network_uuid=net_id):
826 return net_id
827 else:
828 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
829
830 def refresh_nets_status(self, net_list):
831 """Get the status of the networks
832 Params: the list of network identifiers
833 Returns a dictionary with:
834 net_id: #VIM id of this network
835 status: #Mandatory. Text with one of:
836 # DELETED (not found at vim)
837 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
838 # OTHER (Vim reported other status not understood)
839 # ERROR (VIM indicates an ERROR status)
840 # ACTIVE, INACTIVE, DOWN (admin down),
841 # BUILD (on building process)
842 #
843 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
844 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
845
846 """
847
848 dict_entry = {}
849 try:
850 for net in net_list:
851 errormsg = ''
852 vcd_network = self.get_vcd_network(network_uuid=net)
853 if vcd_network is not None and vcd_network:
854 if vcd_network['status'] == '1':
855 status = 'ACTIVE'
856 else:
857 status = 'DOWN'
858 else:
859 status = 'DELETED'
860 errormsg = 'Network not found.'
861
862 dict_entry[net] = {'status': status, 'error_msg': errormsg,
863 'vim_info': yaml.safe_dump(vcd_network)}
864 except:
865 self.logger.debug("Error in refresh_nets_status")
866 self.logger.debug(traceback.format_exc())
867
868 return dict_entry
869
870 def get_flavor(self, flavor_id):
871 """Obtain flavor details from the VIM
872 Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
873 """
874 if flavor_id not in vimconnector.flavorlist:
875 raise vimconn.vimconnNotFoundException("Flavor not found.")
876 return vimconnector.flavorlist[flavor_id]
877
878 def new_flavor(self, flavor_data):
879 """Adds a tenant flavor to VIM
880 flavor_data contains a dictionary with information, keys:
881 name: flavor name
882 ram: memory (cloud type) in MBytes
883 vpcus: cpus (cloud type)
884 extended: EPA parameters
885 - numas: #items requested in same NUMA
886 memory: number of 1G huge pages memory
887 paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
888 interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
889 - name: interface name
890 dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC
891 bandwidth: X Gbps; requested guarantee bandwidth
892 vpci: requested virtual PCI address
893 disk: disk size
894 is_public:
895 #TODO to concrete
896 Returns the flavor identifier"""
897
898 # generate a new uuid put to internal dict and return it.
899 self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
900 new_flavor=flavor_data
901 ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
902 cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
903 disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
904
905 if not isinstance(ram, int):
906 raise vimconn.vimconnException("Non-integer value for ram")
907 elif not isinstance(cpu, int):
908 raise vimconn.vimconnException("Non-integer value for cpu")
909 elif not isinstance(disk, int):
910 raise vimconn.vimconnException("Non-integer value for disk")
911
912 extended_flv = flavor_data.get("extended")
913 if extended_flv:
914 numas=extended_flv.get("numas")
915 if numas:
916 for numa in numas:
917 #overwrite ram and vcpus
918 if 'memory' in numa:
919 ram = numa['memory']*1024
920 if 'paired-threads' in numa:
921 cpu = numa['paired-threads']*2
922 elif 'cores' in numa:
923 cpu = numa['cores']
924 elif 'threads' in numa:
925 cpu = numa['threads']
926
927 new_flavor[FLAVOR_RAM_KEY] = ram
928 new_flavor[FLAVOR_VCPUS_KEY] = cpu
929 new_flavor[FLAVOR_DISK_KEY] = disk
930 # generate a new uuid put to internal dict and return it.
931 flavor_id = uuid.uuid4()
932 vimconnector.flavorlist[str(flavor_id)] = new_flavor
933 self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
934
935 return str(flavor_id)
936
937 def delete_flavor(self, flavor_id):
938 """Deletes a tenant flavor from VIM identify by its id
939
940 Returns the used id or raise an exception
941 """
942 if flavor_id not in vimconnector.flavorlist:
943 raise vimconn.vimconnNotFoundException("Flavor not found.")
944
945 vimconnector.flavorlist.pop(flavor_id, None)
946 return flavor_id
947
948 def new_image(self, image_dict):
949 """
950 Adds a tenant image to VIM
951 Returns:
952 200, image-id if the image is created
953 <0, message if there is an error
954 """
955
956 return self.get_image_id_from_path(image_dict['location'])
957
958 def delete_image(self, image_id):
959 """
960 Deletes a tenant image from VIM
961 Args:
962 image_id is ID of Image to be deleted
963 Return:
964 returns the image identifier in UUID format or raises an exception on error
965 """
966 conn = self.connect_as_admin()
967 if not conn:
968 raise vimconn.vimconnConnectionException("Failed to connect vCD")
969 # Get Catalog details
970 url_list = [self.url, '/api/catalog/', image_id]
971 catalog_herf = ''.join(url_list)
972
973 headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
974 'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
975
976 response = self.perform_request(req_type='GET',
977 url=catalog_herf,
978 headers=headers)
979
980 if response.status_code != requests.codes.ok:
981 self.logger.debug("delete_image():GET REST API call {} failed. "
982 "Return status code {}".format(catalog_herf,
983 response.status_code))
984 raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
985
986 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
987 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
988 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
989
990 catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems", namespaces)
991 catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem", namespaces)
992 for catalogItem in catalogItems:
993 catalogItem_href = catalogItem.attrib['href']
994
995 response = self.perform_request(req_type='GET',
996 url=catalogItem_href,
997 headers=headers)
998
999 if response.status_code != requests.codes.ok:
1000 self.logger.debug("delete_image():GET REST API call {} failed. "
1001 "Return status code {}".format(catalog_herf,
1002 response.status_code))
1003 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
1004 catalogItem,
1005 image_id))
1006
1007 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
1008 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
1009 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
1010 catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']", namespaces).attrib['href']
1011
1012 # Remove catalogItem
1013 response = self.perform_request(req_type='DELETE',
1014 url=catalogitem_remove_href,
1015 headers=headers)
1016 if response.status_code == requests.codes.no_content:
1017 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
1018 else:
1019 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
1020
1021 # Remove catalog
1022 url_list = [self.url, '/api/admin/catalog/', image_id]
1023 catalog_remove_herf = ''.join(url_list)
1024 response = self.perform_request(req_type='DELETE',
1025 url=catalog_remove_herf,
1026 headers=headers)
1027
1028 if response.status_code == requests.codes.no_content:
1029 self.logger.debug("Deleted Catalog {}".format(image_id))
1030 return image_id
1031 else:
1032 raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
1033
1034 def catalog_exists(self, catalog_name, catalogs):
1035 """
1036
1037 :param catalog_name:
1038 :param catalogs:
1039 :return:
1040 """
1041 for catalog in catalogs:
1042 if catalog['name'] == catalog_name:
1043 return catalog['id']
1044
1045 def create_vimcatalog(self, vca=None, catalog_name=None):
1046 """ Create new catalog entry in vCloud director.
1047
1048 Args
1049 vca: vCloud director.
1050 catalog_name catalog that client wish to create. Note no validation done for a name.
1051 Client must make sure that provide valid string representation.
1052
1053 Returns catalog id if catalog created else None.
1054
1055 """
1056 try:
1057 lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
1058 if lxml_catalog_element:
1059 id_attr_value = lxml_catalog_element.get('id') # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
1060 return id_attr_value.split(':')[-1]
1061 catalogs = vca.list_catalogs()
1062 except Exception as ex:
1063 self.logger.error(
1064 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
1065 raise
1066 return self.catalog_exists(catalog_name, catalogs)
1067
1068 # noinspection PyIncorrectDocstring
1069 def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
1070 description='', progress=False, chunk_bytes=128 * 1024):
1071 """
1072 Uploads a OVF file to a vCloud catalog
1073
1074 :param chunk_bytes:
1075 :param progress:
1076 :param description:
1077 :param image_name:
1078 :param vca:
1079 :param catalog_name: (str): The name of the catalog to upload the media.
1080 :param media_file_name: (str): The name of the local media file to upload.
1081 :return: (bool) True if the media file was successfully uploaded, false otherwise.
1082 """
1083 os.path.isfile(media_file_name)
1084 statinfo = os.stat(media_file_name)
1085
1086 # find a catalog entry where we upload OVF.
1087 # create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
1088 # status change.
1089 # if VCD can parse OVF we upload VMDK file
1090 try:
1091 for catalog in vca.list_catalogs():
1092 if catalog_name != catalog['name']:
1093 continue
1094 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
1095 data = """
1096 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
1097 """.format(catalog_name, description)
1098
1099 if self.client:
1100 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1101 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1102 headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
1103
1104 response = self.perform_request(req_type='POST',
1105 url=catalog_href,
1106 headers=headers,
1107 data=data)
1108
1109 if response.status_code == requests.codes.created:
1110 catalogItem = XmlElementTree.fromstring(response.text)
1111 entity = [child for child in catalogItem if
1112 child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1113 href = entity.get('href')
1114 template = href
1115
1116 response = self.perform_request(req_type='GET',
1117 url=href,
1118 headers=headers)
1119
1120 if response.status_code == requests.codes.ok:
1121 headers['Content-Type'] = 'Content-Type text/xml'
1122 result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"', response.text)
1123 if result:
1124 transfer_href = result.group(1)
1125
1126 response = self.perform_request(req_type='PUT',
1127 url=transfer_href,
1128 headers=headers,
1129 data=open(media_file_name, 'rb'))
1130 if response.status_code != requests.codes.ok:
1131 self.logger.debug(
1132 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
1133 media_file_name))
1134 return False
1135
1136 # TODO fix this with aync block
1137 time.sleep(5)
1138
1139 self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
1140
1141 # uploading VMDK file
1142 # check status of OVF upload and upload remaining files.
1143 response = self.perform_request(req_type='GET',
1144 url=template,
1145 headers=headers)
1146
1147 if response.status_code == requests.codes.ok:
1148 result = re.search('rel="upload:default"\s*href="(.*?vmdk)"', response.text)
1149 if result:
1150 link_href = result.group(1)
1151 # we skip ovf since it already uploaded.
1152 if 'ovf' in link_href:
1153 continue
1154 # The OVF file and VMDK must be in a same directory
1155 head, tail = os.path.split(media_file_name)
1156 file_vmdk = head + '/' + link_href.split("/")[-1]
1157 if not os.path.isfile(file_vmdk):
1158 return False
1159 statinfo = os.stat(file_vmdk)
1160 if statinfo.st_size == 0:
1161 return False
1162 hrefvmdk = link_href
1163
1164 if progress:
1165 widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
1166 FileTransferSpeed()]
1167 progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
1168
1169 bytes_transferred = 0
1170 f = open(file_vmdk, 'rb')
1171 while bytes_transferred < statinfo.st_size:
1172 my_bytes = f.read(chunk_bytes)
1173 if len(my_bytes) <= chunk_bytes:
1174 headers['Content-Range'] = 'bytes {}-{}/{}'.format(
1175 bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
1176 headers['Content-Length'] = str(len(my_bytes))
1177 response = requests.put(url=hrefvmdk,
1178 headers=headers,
1179 data=my_bytes,
1180 verify=False)
1181 if response.status_code == requests.codes.ok:
1182 bytes_transferred += len(my_bytes)
1183 if progress:
1184 progress_bar.update(bytes_transferred)
1185 else:
1186 self.logger.debug(
1187 'file upload failed with error: [{}] {}'.format(response.status_code,
1188 response.text))
1189
1190 f.close()
1191 return False
1192 f.close()
1193 if progress:
1194 progress_bar.finish()
1195 time.sleep(10)
1196 return True
1197 else:
1198 self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
1199 format(catalog_name, media_file_name))
1200 return False
1201 except Exception as exp:
1202 self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1203 .format(catalog_name,media_file_name, exp))
1204 raise vimconn.vimconnException(
1205 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
1206 .format(catalog_name,media_file_name, exp))
1207
1208 self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
1209 return False
1210
1211 def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
1212 """Upload media file"""
1213 # TODO add named parameters for readability
1214
1215 return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
1216 media_file_name=medial_file_name, description='medial_file_name', progress=progress)
1217
1218 def validate_uuid4(self, uuid_string=None):
1219 """ Method validate correct format of UUID.
1220
1221 Return: true if string represent valid uuid
1222 """
1223 try:
1224 val = uuid.UUID(uuid_string, version=4)
1225 except ValueError:
1226 return False
1227 return True
1228
1229 def get_catalogid(self, catalog_name=None, catalogs=None):
1230 """ Method check catalog and return catalog ID in UUID format.
1231
1232 Args
1233 catalog_name: catalog name as string
1234 catalogs: list of catalogs.
1235
1236 Return: catalogs uuid
1237 """
1238
1239 for catalog in catalogs:
1240 if catalog['name'] == catalog_name:
1241 catalog_id = catalog['id']
1242 return catalog_id
1243 return None
1244
1245 def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
1246 """ Method check catalog and return catalog name lookup done by catalog UUID.
1247
1248 Args
1249 catalog_name: catalog name as string
1250 catalogs: list of catalogs.
1251
1252 Return: catalogs name or None
1253 """
1254
1255 if not self.validate_uuid4(uuid_string=catalog_uuid):
1256 return None
1257
1258 for catalog in catalogs:
1259 catalog_id = catalog.get('id')
1260 if catalog_id == catalog_uuid:
1261 return catalog.get('name')
1262 return None
1263
1264 def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
1265 """ Method check catalog and return catalog name lookup done by catalog UUID.
1266
1267 Args
1268 catalog_name: catalog name as string
1269 catalogs: list of catalogs.
1270
1271 Return: catalogs name or None
1272 """
1273
1274 if not self.validate_uuid4(uuid_string=catalog_uuid):
1275 return None
1276
1277 for catalog in catalogs:
1278 catalog_id = catalog.get('id')
1279 if catalog_id == catalog_uuid:
1280 return catalog
1281 return None
1282
1283 def get_image_id_from_path(self, path=None, progress=False):
1284 """ Method upload OVF image to vCloud director.
1285
1286 Each OVF image represented as single catalog entry in vcloud director.
1287 The method check for existing catalog entry. The check done by file name without file extension.
1288
1289 if given catalog name already present method will respond with existing catalog uuid otherwise
1290 it will create new catalog entry and upload OVF file to newly created catalog.
1291
1292 If method can't create catalog entry or upload a file it will throw exception.
1293
1294 Method accept boolean flag progress that will output progress bar. It useful method
1295 for standalone upload use case. In case to test large file upload.
1296
1297 Args
1298 path: - valid path to OVF file.
1299 progress - boolean progress bar show progress bar.
1300
1301 Return: if image uploaded correct method will provide image catalog UUID.
1302 """
1303
1304 if not path:
1305 raise vimconn.vimconnException("Image path can't be None.")
1306
1307 if not os.path.isfile(path):
1308 raise vimconn.vimconnException("Can't read file. File not found.")
1309
1310 if not os.access(path, os.R_OK):
1311 raise vimconn.vimconnException("Can't read file. Check file permission to read.")
1312
1313 self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
1314
1315 dirpath, filename = os.path.split(path)
1316 flname, file_extension = os.path.splitext(path)
1317 if file_extension != '.ovf':
1318 self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
1319 raise vimconn.vimconnException("Wrong container. vCloud director supports only OVF.")
1320
1321 catalog_name = os.path.splitext(filename)[0]
1322 catalog_md5_name = hashlib.md5(path.encode('utf-8')).hexdigest()
1323 self.logger.debug("File name {} Catalog Name {} file path {} "
1324 "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
1325
1326 try:
1327 org,vdc = self.get_vdc_details()
1328 catalogs = org.list_catalogs()
1329 except Exception as exp:
1330 self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
1331 raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
1332
1333 if len(catalogs) == 0:
1334 self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
1335 if self.create_vimcatalog(org, catalog_md5_name) is None:
1336 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1337
1338 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1339 media_name=filename, medial_file_name=path, progress=progress)
1340 if not result:
1341 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
1342 return self.get_catalogid(catalog_name, catalogs)
1343 else:
1344 for catalog in catalogs:
1345 # search for existing catalog if we find same name we return ID
1346 # TODO optimize this
1347 if catalog['name'] == catalog_md5_name:
1348 self.logger.debug("Found existing catalog entry for {} "
1349 "catalog id {}".format(catalog_name,
1350 self.get_catalogid(catalog_md5_name, catalogs)))
1351 return self.get_catalogid(catalog_md5_name, catalogs)
1352
1353 # if we didn't find existing catalog we create a new one and upload image.
1354 self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
1355 if self.create_vimcatalog(org, catalog_md5_name) is None:
1356 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
1357
1358 result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
1359 media_name=filename, medial_file_name=path, progress=progress)
1360 if not result:
1361 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
1362
1363 return self.get_catalogid(catalog_md5_name, org.list_catalogs())
1364
1365 def get_image_list(self, filter_dict={}):
1366 '''Obtain tenant images from VIM
1367 Filter_dict can be:
1368 name: image name
1369 id: image uuid
1370 checksum: image checksum
1371 location: image path
1372 Returns the image list of dictionaries:
1373 [{<the fields at Filter_dict plus some VIM specific>}, ...]
1374 List can be empty
1375 '''
1376
1377 try:
1378 org, vdc = self.get_vdc_details()
1379 image_list = []
1380 catalogs = org.list_catalogs()
1381 if len(catalogs) == 0:
1382 return image_list
1383 else:
1384 for catalog in catalogs:
1385 catalog_uuid = catalog.get('id')
1386 name = catalog.get('name')
1387 filtered_dict = {}
1388 if filter_dict.get("name") and filter_dict["name"] != name:
1389 continue
1390 if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
1391 continue
1392 filtered_dict ["name"] = name
1393 filtered_dict ["id"] = catalog_uuid
1394 image_list.append(filtered_dict)
1395
1396 self.logger.debug("List of already created catalog items: {}".format(image_list))
1397 return image_list
1398 except Exception as exp:
1399 raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
1400
1401 def get_vappid(self, vdc=None, vapp_name=None):
1402 """ Method takes vdc object and vApp name and returns vapp uuid or None
1403
1404 Args:
1405 vdc: The VDC object.
1406 vapp_name: is application vappp name identifier
1407
1408 Returns:
1409 The return vApp name otherwise None
1410 """
1411 if vdc is None or vapp_name is None:
1412 return None
1413 # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
1414 try:
1415 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity \
1416 if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1417 if len(refs) == 1:
1418 return refs[0].href.split("vapp")[1][1:]
1419 except Exception as e:
1420 self.logger.exception(e)
1421 return False
1422 return None
1423
1424 def check_vapp(self, vdc=None, vapp_uuid=None):
1425 """ Method Method returns True or False if vapp deployed in vCloud director
1426
1427 Args:
1428 vca: Connector to VCA
1429 vdc: The VDC object.
1430 vappid: vappid is application identifier
1431
1432 Returns:
1433 The return True if vApp deployed
1434 :param vdc:
1435 :param vapp_uuid:
1436 """
1437 try:
1438 refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
1439 if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
1440 for ref in refs:
1441 vappid = ref.href.split("vapp")[1][1:]
1442 # find vapp with respected vapp uuid
1443 if vappid == vapp_uuid:
1444 return True
1445 except Exception as e:
1446 self.logger.exception(e)
1447 return False
1448 return False
1449
1450 def get_namebyvappid(self, vapp_uuid=None):
1451 """Method returns vApp name from vCD and lookup done by vapp_id.
1452
1453 Args:
1454 vapp_uuid: vappid is application identifier
1455
1456 Returns:
1457 The return vApp name otherwise None
1458 """
1459 try:
1460 if self.client and vapp_uuid:
1461 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
1462 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1463 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1464
1465 response = self.perform_request(req_type='GET',
1466 url=vapp_call,
1467 headers=headers)
1468 #Retry login if session expired & retry sending request
1469 if response.status_code == 403:
1470 response = self.retry_rest('GET', vapp_call)
1471
1472 tree = XmlElementTree.fromstring(response.text)
1473 return tree.attrib['name'] if 'name' in tree.attrib else None
1474 except Exception as e:
1475 self.logger.exception(e)
1476 return None
1477 return None
1478
1479 def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
1480 cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
1481 """Adds a VM instance to VIM
1482 Params:
1483 'start': (boolean) indicates if VM must start or created in pause mode.
1484 'image_id','flavor_id': image and flavor VIM id to use for the VM
1485 'net_list': list of interfaces, each one is a dictionary with:
1486 'name': (optional) name for the interface.
1487 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
1488 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
1489 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
1490 'mac_address': (optional) mac address to assign to this interface
1491 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
1492 the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
1493 'type': (mandatory) can be one of:
1494 'virtual', in this case always connected to a network of type 'net_type=bridge'
1495 'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
1496 can created unconnected
1497 'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
1498 'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
1499 are allocated on the same physical NIC
1500 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
1501 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
1502 or True, it must apply the default VIM behaviour
1503 After execution the method will add the key:
1504 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
1505 interface. 'net_list' is modified
1506 'cloud_config': (optional) dictionary with:
1507 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
1508 'users': (optional) list of users to be inserted, each item is a dict with:
1509 'name': (mandatory) user name,
1510 'key-pairs': (optional) list of strings with the public key to be inserted to the user
1511 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
1512 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
1513 'config-files': (optional). List of files to be transferred. Each item is a dict with:
1514 'dest': (mandatory) string with the destination absolute path
1515 'encoding': (optional, by default text). Can be one of:
1516 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
1517 'content' (mandatory): string with the content of the file
1518 'permissions': (optional) string with file permissions, typically octal notation '0644'
1519 'owner': (optional) file owner, string with the format 'owner:group'
1520 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
1521 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
1522 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
1523 'size': (mandatory) string with the size of the disk in GB
1524 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
1525 availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
1526 availability_zone_index is None
1527 Returns a tuple with the instance identifier and created_items or raises an exception on error
1528 created_items can be None or a dictionary where this method can include key-values that will be passed to
1529 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
1530 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
1531 as not present.
1532 """
1533 self.logger.info("Creating new instance for entry {}".format(name))
1534 self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
1535 "availability_zone_index {} availability_zone_list {}"\
1536 .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
1537 availability_zone_index, availability_zone_list))
1538
1539 #new vm name = vmname + tenant_id + uuid
1540 new_vm_name = [name, '-', str(uuid.uuid4())]
1541 vmname_andid = ''.join(new_vm_name)
1542
1543 for net in net_list:
1544 if net['type'] == "PCI-PASSTHROUGH":
1545 raise vimconn.vimconnNotSupportedException(
1546 "Current vCD version does not support type : {}".format(net['type']))
1547
1548 if len(net_list) > 10:
1549 raise vimconn.vimconnNotSupportedException(
1550 "The VM hardware versions 7 and above support upto 10 NICs only")
1551
1552 # if vm already deployed we return existing uuid
1553 # we check for presence of VDC, Catalog entry and Flavor.
1554 org, vdc = self.get_vdc_details()
1555 if vdc is None:
1556 raise vimconn.vimconnNotFoundException(
1557 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
1558 catalogs = org.list_catalogs()
1559 if catalogs is None:
1560 #Retry once, if failed by refreshing token
1561 self.get_token()
1562 org = Org(self.client, resource=self.client.get_org())
1563 catalogs = org.list_catalogs()
1564 if catalogs is None:
1565 raise vimconn.vimconnNotFoundException(
1566 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
1567
1568 catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1569 if catalog_hash_name:
1570 self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
1571 else:
1572 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1573 "(Failed retrieve catalog information {})".format(name, image_id))
1574
1575 # Set vCPU and Memory based on flavor.
1576 vm_cpus = None
1577 vm_memory = None
1578 vm_disk = None
1579 numas = None
1580
1581 if flavor_id is not None:
1582 if flavor_id not in vimconnector.flavorlist:
1583 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
1584 "Failed retrieve flavor information "
1585 "flavor id {}".format(name, flavor_id))
1586 else:
1587 try:
1588 flavor = vimconnector.flavorlist[flavor_id]
1589 vm_cpus = flavor[FLAVOR_VCPUS_KEY]
1590 vm_memory = flavor[FLAVOR_RAM_KEY]
1591 vm_disk = flavor[FLAVOR_DISK_KEY]
1592 extended = flavor.get("extended", None)
1593 if extended:
1594 numas=extended.get("numas", None)
1595
1596 except Exception as exp:
1597 raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
1598
1599 # image upload creates template name as catalog name space Template.
1600 templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
1601 power_on = 'false'
1602 if start:
1603 power_on = 'true'
1604
1605 # client must provide at least one entry in net_list if not we report error
1606 #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
1607 # If no mgmt, then the 1st NN in netlist is considered as primary net.
1608 primary_net = None
1609 primary_netname = None
1610 primary_net_href = None
1611 network_mode = 'bridged'
1612 if net_list is not None and len(net_list) > 0:
1613 for net in net_list:
1614 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
1615 primary_net = net
1616 if primary_net is None:
1617 primary_net = net_list[0]
1618
1619 try:
1620 primary_net_id = primary_net['net_id']
1621 url_list = [self.url, '/api/network/', primary_net_id]
1622 primary_net_href = ''.join(url_list)
1623 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
1624 if 'name' in network_dict:
1625 primary_netname = network_dict['name']
1626
1627 except KeyError:
1628 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
1629 else:
1630 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
1631
1632 # use: 'data', 'bridge', 'mgmt'
1633 # create vApp. Set vcpu and ram based on flavor id.
1634 try:
1635 vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
1636 if not vdc_obj:
1637 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
1638
1639 for retry in (1, 2):
1640 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
1641 catalog_items = [items.attrib]
1642
1643 if len(catalog_items) == 1:
1644 if self.client:
1645 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
1646 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1647
1648 response = self.perform_request(req_type='GET',
1649 url=catalog_items[0].get('href'),
1650 headers=headers)
1651 catalogItem = XmlElementTree.fromstring(response.text)
1652 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
1653 vapp_tempalte_href = entity.get("href")
1654
1655 response = self.perform_request(req_type='GET',
1656 url=vapp_tempalte_href,
1657 headers=headers)
1658 if response.status_code != requests.codes.ok:
1659 self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
1660 response.status_code))
1661 else:
1662 result = (response.text).replace("\n", " ")
1663
1664 vapp_template_tree = XmlElementTree.fromstring(response.text)
1665 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
1666 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
1667 vm_name = vm_element.get('name')
1668 vm_id = vm_element.get('id')
1669 vm_href = vm_element.get('href')
1670
1671 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1672 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
1673 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
1674
1675 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
1676 vdc_id = vdc.get('id').split(':')[-1]
1677 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
1678 vdc_id)
1679 with open(os.path.join(os.path.dirname(__file__), 'InstantiateVAppTemplateParams.xml'), 'r') as f:
1680 template = f.read()
1681
1682 data = template.format(vmname_andid,
1683 primary_netname,
1684 primary_net_href,
1685 vapp_tempalte_href,
1686 vm_href,
1687 vm_id,
1688 vm_name,
1689 primary_netname,
1690 cpu=vm_cpus,
1691 core=1,
1692 memory=vm_memory)
1693
1694 response = self.perform_request(req_type='POST',
1695 url=instantiate_vapp_href,
1696 headers=headers,
1697 data=data)
1698
1699 if response.status_code != 201:
1700 self.logger.error("REST call {} failed reason : {}"\
1701 "status code : {}".format(instantiate_vapp_href,
1702 response.text,
1703 response.status_code))
1704 raise vimconn.vimconnException("new_vminstance(): Failed to create"\
1705 "vAapp {}".format(vmname_andid))
1706 else:
1707 vapptask = self.get_task_from_response(response.text)
1708
1709 if vapptask is None and retry==1:
1710 self.get_token() # Retry getting token
1711 continue
1712 else:
1713 break
1714
1715 if vapptask is None or vapptask is False:
1716 raise vimconn.vimconnUnexpectedResponse(
1717 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1718
1719 # wait for task to complete
1720 result = self.client.get_task_monitor().wait_for_success(task=vapptask)
1721
1722 if result.get('status') == 'success':
1723 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
1724 else:
1725 raise vimconn.vimconnUnexpectedResponse(
1726 "new_vminstance(): failed to create vApp {}".format(vmname_andid))
1727
1728 except Exception as exp:
1729 raise vimconn.vimconnUnexpectedResponse(
1730 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
1731
1732 # we should have now vapp in undeployed state.
1733 try:
1734 vdc_obj = VDC(self.client, href=vdc.get('href'))
1735 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1736 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1737 vapp = VApp(self.client, resource=vapp_resource)
1738
1739 except Exception as exp:
1740 raise vimconn.vimconnUnexpectedResponse(
1741 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1742 .format(vmname_andid, exp))
1743
1744 if vapp_uuid is None:
1745 raise vimconn.vimconnUnexpectedResponse(
1746 "new_vminstance(): Failed to retrieve vApp {} after creation".format(vmname_andid))
1747
1748 # Add PCI passthrough/SRIOV configrations
1749 pci_devices_info = []
1750 reserve_memory = False
1751
1752 for net in net_list:
1753 if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
1754 pci_devices_info.append(net)
1755 elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
1756 reserve_memory = True
1757
1758 # Add PCI
1759 if len(pci_devices_info) > 0:
1760 self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
1761 vmname_andid))
1762 PCI_devices_status, _, _ = self.add_pci_devices(vapp_uuid,
1763 pci_devices_info,
1764 vmname_andid)
1765 if PCI_devices_status:
1766 self.logger.info("Added PCI devives {} to VM {}".format(
1767 pci_devices_info,
1768 vmname_andid))
1769 reserve_memory = True
1770 else:
1771 self.logger.info("Fail to add PCI devives {} to VM {}".format(
1772 pci_devices_info,
1773 vmname_andid))
1774
1775 # Add serial console - this allows cloud images to boot as if we are running under OpenStack
1776 self.add_serial_device(vapp_uuid)
1777
1778 if vm_disk:
1779 # Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
1780 result = self.modify_vm_disk(vapp_uuid, vm_disk)
1781 if result:
1782 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
1783
1784 #Add new or existing disks to vApp
1785 if disk_list:
1786 added_existing_disk = False
1787 for disk in disk_list:
1788 if 'device_type' in disk and disk['device_type'] == 'cdrom':
1789 image_id = disk['image_id']
1790 # Adding CD-ROM to VM
1791 # will revisit code once specification ready to support this feature
1792 self.insert_media_to_vm(vapp, image_id)
1793 elif "image_id" in disk and disk["image_id"] is not None:
1794 self.logger.debug("Adding existing disk from image {} to vm {} ".format(
1795 disk["image_id"] , vapp_uuid))
1796 self.add_existing_disk(catalogs=catalogs,
1797 image_id=disk["image_id"],
1798 size = disk["size"],
1799 template_name=templateName,
1800 vapp_uuid=vapp_uuid
1801 )
1802 added_existing_disk = True
1803 else:
1804 #Wait till added existing disk gets reflected into vCD database/API
1805 if added_existing_disk:
1806 time.sleep(5)
1807 added_existing_disk = False
1808 self.add_new_disk(vapp_uuid, disk['size'])
1809
1810 if numas:
1811 # Assigning numa affinity setting
1812 for numa in numas:
1813 if 'paired-threads-id' in numa:
1814 paired_threads_id = numa['paired-threads-id']
1815 self.set_numa_affinity(vapp_uuid, paired_threads_id)
1816
1817 # add NICs & connect to networks in netlist
1818 try:
1819 vdc_obj = VDC(self.client, href=vdc.get('href'))
1820 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1821 vapp = VApp(self.client, resource=vapp_resource)
1822 vapp_id = vapp_resource.get('id').split(':')[-1]
1823
1824 self.logger.info("Removing primary NIC: ")
1825 # First remove all NICs so that NIC properties can be adjusted as needed
1826 self.remove_primary_network_adapter_from_all_vms(vapp)
1827
1828 self.logger.info("Request to connect VM to a network: {}".format(net_list))
1829 primary_nic_index = 0
1830 nicIndex = 0
1831 for net in net_list:
1832 # openmano uses network id in UUID format.
1833 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
1834 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
1835 # 'vpci': '0000:00:11.0', 'name': 'eth0'}]
1836
1837 if 'net_id' not in net:
1838 continue
1839
1840 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
1841 #Same will be returned in refresh_vms_status() as vim_interface_id
1842 net['vim_id'] = net['net_id'] # Provide the same VIM identifier as the VIM network
1843
1844 interface_net_id = net['net_id']
1845 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
1846 interface_network_mode = net['use']
1847
1848 if interface_network_mode == 'mgmt':
1849 primary_nic_index = nicIndex
1850
1851 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
1852 - DHCP (The IP address is obtained from a DHCP service.)
1853 - MANUAL (The IP address is assigned manually in the IpAddress element.)
1854 - NONE (No IP addressing mode specified.)"""
1855
1856 if primary_netname is not None:
1857 self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
1858 nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
1859 if len(nets) == 1:
1860 self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
1861
1862 if interface_net_name != primary_netname:
1863 # connect network to VM - with all DHCP by default
1864 self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
1865 self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
1866
1867 type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
1868 nic_type = 'VMXNET3'
1869 if 'type' in net and net['type'] not in type_list:
1870 # fetching nic type from vnf
1871 if 'model' in net:
1872 if net['model'] is not None:
1873 if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
1874 nic_type = 'VMXNET3'
1875 else:
1876 nic_type = net['model']
1877
1878 self.logger.info("new_vminstance(): adding network adapter "\
1879 "to a network {}".format(nets[0].get('name')))
1880 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1881 primary_nic_index,
1882 nicIndex,
1883 net,
1884 nic_type=nic_type)
1885 else:
1886 self.logger.info("new_vminstance(): adding network adapter "\
1887 "to a network {}".format(nets[0].get('name')))
1888 if net['type'] in ['SR-IOV', 'VF']:
1889 nic_type = net['type']
1890 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
1891 primary_nic_index,
1892 nicIndex,
1893 net,
1894 nic_type=nic_type)
1895 nicIndex += 1
1896
1897 # cloud-init for ssh-key injection
1898 if cloud_config:
1899 # Create a catalog which will be carrying the config drive ISO
1900 # This catalog is deleted during vApp deletion. The catalog name carries
1901 # vApp UUID and thats how it gets identified during its deletion.
1902 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
1903 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
1904 config_drive_catalog_name))
1905 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
1906 if config_drive_catalog_id is None:
1907 error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
1908 "ISO".format(config_drive_catalog_name)
1909 raise Exception(error_msg)
1910
1911 # Create config-drive ISO
1912 _, userdata = self._create_user_data(cloud_config)
1913 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
1914 iso_path = self.create_config_drive_iso(userdata)
1915 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
1916
1917 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
1918 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
1919 # Attach the config-drive ISO to the VM
1920 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
1921 self.insert_media_to_vm(vapp, config_drive_catalog_id)
1922 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
1923
1924 # If VM has PCI devices or SRIOV reserve memory for VM
1925 if reserve_memory:
1926 self.reserve_memory_for_all_vms(vapp, memory_mb)
1927
1928 self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
1929
1930 poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
1931 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
1932 if result.get('status') == 'success':
1933 self.logger.info("new_vminstance(): Successfully power on "\
1934 "vApp {}".format(vmname_andid))
1935 else:
1936 self.logger.error("new_vminstance(): failed to power on vApp "\
1937 "{}".format(vmname_andid))
1938
1939 except Exception as exp:
1940 try:
1941 self.delete_vminstance(vapp_uuid)
1942 except Exception as exp2:
1943 self.logger.error("new_vminstance rollback fail {}".format(exp2))
1944 # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
1945 self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
1946 .format(name, exp))
1947 raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
1948 .format(name, exp))
1949 # check if vApp deployed and if that the case return vApp UUID otherwise -1
1950 wait_time = 0
1951 vapp_uuid = None
1952 while wait_time <= MAX_WAIT_TIME:
1953 try:
1954 vapp_resource = vdc_obj.get_vapp(vmname_andid)
1955 vapp = VApp(self.client, resource=vapp_resource)
1956 except Exception as exp:
1957 raise vimconn.vimconnUnexpectedResponse(
1958 "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
1959 .format(vmname_andid, exp))
1960
1961 #if vapp and vapp.me.deployed:
1962 if vapp and vapp_resource.get('deployed') == 'true':
1963 vapp_uuid = vapp_resource.get('id').split(':')[-1]
1964 break
1965 else:
1966 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
1967 time.sleep(INTERVAL_TIME)
1968
1969 wait_time +=INTERVAL_TIME
1970
1971 #SET Affinity Rule for VM
1972 #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
1973 #While creating VIM account user has to pass the Host Group names in availability_zone list
1974 #"availability_zone" is a part of VIM "config" parameters
1975 #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
1976 #Host groups are referred as availability zones
1977 #With following procedure, deployed VM will be added into a VM group.
1978 #Then A VM to Host Affinity rule will be created using the VM group & Host group.
1979 if(availability_zone_list):
1980 self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
1981 #Admin access required for creating Affinity rules
1982 client = self.connect_as_admin()
1983 if not client:
1984 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
1985 else:
1986 self.client = client
1987 if self.client:
1988 headers = {'Accept':'application/*+xml;version=27.0',
1989 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
1990 #Step1: Get provider vdc details from organization
1991 pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
1992 if pvdc_href is not None:
1993 #Step2: Found required pvdc, now get resource pool information
1994 respool_href = self.get_resource_pool_details(pvdc_href, headers)
1995 if respool_href is None:
1996 #Raise error if respool_href not found
1997 msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
1998 .format(pvdc_href)
1999 self.log_message(msg)
2000
2001 #Step3: Verify requested availability zone(hostGroup) is present in vCD
2002 # get availability Zone
2003 vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
2004 # check if provided av zone(hostGroup) is present in vCD VIM
2005 status = self.check_availibility_zone(vm_az, respool_href, headers)
2006 if status is False:
2007 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
2008 "resource pool {} status: {}".format(vm_az,respool_href,status)
2009 self.log_message(msg)
2010 else:
2011 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
2012
2013 #Step4: Find VM group references to create vm group
2014 vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
2015 if vmgrp_href == None:
2016 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
2017 self.log_message(msg)
2018
2019 #Step5: Create a VmGroup with name az_VmGroup
2020 vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
2021 status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
2022 if status is not True:
2023 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
2024 self.log_message(msg)
2025
2026 #VM Group url to add vms to vm group
2027 vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
2028
2029 #Step6: Add VM to VM Group
2030 #Find VM uuid from vapp_uuid
2031 vm_details = self.get_vapp_details_rest(vapp_uuid)
2032 vm_uuid = vm_details['vmuuid']
2033
2034 status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
2035 if status is not True:
2036 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
2037 self.log_message(msg)
2038
2039 #Step7: Create VM to Host affinity rule
2040 addrule_href = self.get_add_rule_reference (respool_href, headers)
2041 if addrule_href is None:
2042 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
2043 .format(respool_href)
2044 self.log_message(msg)
2045
2046 status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity", headers)
2047 if status is False:
2048 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
2049 .format(name, vm_az)
2050 self.log_message(msg)
2051 else:
2052 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
2053 .format(name, vm_az))
2054 #Reset token to a normal user to perform other operations
2055 self.get_token()
2056
2057 if vapp_uuid is not None:
2058 return vapp_uuid, None
2059 else:
2060 raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
2061
2062 def create_config_drive_iso(self, user_data):
2063 tmpdir = tempfile.mkdtemp()
2064 iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
2065 latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
2066 os.makedirs(latest_dir)
2067 with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
2068 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
2069 userdata_file_obj.write(user_data)
2070 meta_file_obj.write(json.dumps({"availability_zone": "nova",
2071 "launch_index": 0,
2072 "name": "ConfigDrive",
2073 "uuid": str(uuid.uuid4())}
2074 )
2075 )
2076 genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
2077 iso_path=iso_path, source_dir_path=tmpdir)
2078 self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
2079 try:
2080 FNULL = open(os.devnull, 'w')
2081 subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
2082 except subprocess.CalledProcessError as e:
2083 shutil.rmtree(tmpdir, ignore_errors=True)
2084 error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
2085 self.logger.error(error_msg)
2086 raise Exception(error_msg)
2087 return iso_path
2088
2089 def upload_iso_to_catalog(self, catalog_id, iso_file_path):
2090 if not os.path.isfile(iso_file_path):
2091 error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
2092 self.logger.error(error_msg)
2093 raise Exception(error_msg)
2094 iso_file_stat = os.stat(iso_file_path)
2095 xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
2096 <Media
2097 xmlns="http://www.vmware.com/vcloud/v1.5"
2098 name="{iso_name}"
2099 size="{iso_size}"
2100 imageType="iso">
2101 <Description>ISO image for config-drive</Description>
2102 </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
2103 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2104 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2105 headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
2106 catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
2107 response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
2108
2109 if response.status_code != 201:
2110 error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
2111 self.logger.error(error_msg)
2112 raise Exception(error_msg)
2113
2114 catalogItem = XmlElementTree.fromstring(response.text)
2115 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
2116 entity_href = entity.get('href')
2117
2118 response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
2119 if response.status_code != 200:
2120 raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
2121
2122 match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
2123 if match:
2124 media_upload_href = match.group(1)
2125 else:
2126 raise Exception('Could not parse the upload URL for the media file from the last response')
2127 upload_iso_task = self.get_task_from_response(response.text)
2128 headers['Content-Type'] = 'application/octet-stream'
2129 response = self.perform_request(req_type='PUT',
2130 url=media_upload_href,
2131 headers=headers,
2132 data=open(iso_file_path, 'rb'))
2133
2134 if response.status_code != 200:
2135 raise Exception('PUT request to "{}" failed'.format(media_upload_href))
2136 result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
2137 if result.get('status') != 'success':
2138 raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
2139
2140 def get_vcd_availibility_zones(self,respool_href, headers):
2141 """ Method to find presence of av zone is VIM resource pool
2142
2143 Args:
2144 respool_href - resource pool href
2145 headers - header information
2146
2147 Returns:
2148 vcd_az - list of azone present in vCD
2149 """
2150 vcd_az = []
2151 url=respool_href
2152 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2153
2154 if resp.status_code != requests.codes.ok:
2155 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2156 else:
2157 #Get the href to hostGroups and find provided hostGroup is present in it
2158 resp_xml = XmlElementTree.fromstring(resp.content)
2159 for child in resp_xml:
2160 if 'VMWProviderVdcResourcePool' in child.tag:
2161 for schild in child:
2162 if 'Link' in schild.tag:
2163 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2164 hostGroup = schild.attrib.get('href')
2165 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
2166 if hg_resp.status_code != requests.codes.ok:
2167 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
2168 else:
2169 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2170 for hostGroup in hg_resp_xml:
2171 if 'HostGroup' in hostGroup.tag:
2172 #append host group name to the list
2173 vcd_az.append(hostGroup.attrib.get("name"))
2174 return vcd_az
2175
2176
2177 def set_availability_zones(self):
2178 """
2179 Set vim availability zone
2180 """
2181
2182 vim_availability_zones = None
2183 availability_zone = None
2184 if 'availability_zone' in self.config:
2185 vim_availability_zones = self.config.get('availability_zone')
2186 if isinstance(vim_availability_zones, str):
2187 availability_zone = [vim_availability_zones]
2188 elif isinstance(vim_availability_zones, list):
2189 availability_zone = vim_availability_zones
2190 else:
2191 return availability_zone
2192
2193 return availability_zone
2194
2195
2196 def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
2197 """
2198 Return the availability zone to be used by the created VM.
2199 returns: The VIM availability zone to be used or None
2200 """
2201 if availability_zone_index is None:
2202 if not self.config.get('availability_zone'):
2203 return None
2204 elif isinstance(self.config.get('availability_zone'), str):
2205 return self.config['availability_zone']
2206 else:
2207 return self.config['availability_zone'][0]
2208
2209 vim_availability_zones = self.availability_zone
2210
2211 # check if VIM offer enough availability zones describe in the VNFD
2212 if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
2213 # check if all the names of NFV AV match VIM AV names
2214 match_by_index = False
2215 for av in availability_zone_list:
2216 if av not in vim_availability_zones:
2217 match_by_index = True
2218 break
2219 if match_by_index:
2220 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
2221 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
2222 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
2223 self.logger.debug("VIM Availability zones will be used by index")
2224 return vim_availability_zones[availability_zone_index]
2225 else:
2226 return availability_zone_list[availability_zone_index]
2227 else:
2228 raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
2229
2230
2231 def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
2232 """ Method to create VM to Host Affinity rule in vCD
2233
2234 Args:
2235 addrule_href - href to make a POST request
2236 vmgrpname - name of the VM group created
2237 hostgrpnmae - name of the host group created earlier
2238 polarity - Affinity or Anti-affinity (default: Affinity)
2239 headers - headers to make REST call
2240
2241 Returns:
2242 True- if rule is created
2243 False- Failed to create rule due to some error
2244
2245 """
2246 task_status = False
2247 rule_name = polarity + "_" + vmgrpname
2248 payload = """<?xml version="1.0" encoding="UTF-8"?>
2249 <vmext:VMWVmHostAffinityRule
2250 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
2251 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
2252 type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
2253 <vcloud:Name>{}</vcloud:Name>
2254 <vcloud:IsEnabled>true</vcloud:IsEnabled>
2255 <vcloud:IsMandatory>true</vcloud:IsMandatory>
2256 <vcloud:Polarity>{}</vcloud:Polarity>
2257 <vmext:HostGroupName>{}</vmext:HostGroupName>
2258 <vmext:VmGroupName>{}</vmext:VmGroupName>
2259 </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
2260
2261 resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
2262
2263 if resp.status_code != requests.codes.accepted:
2264 self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
2265 task_status = False
2266 return task_status
2267 else:
2268 affinity_task = self.get_task_from_response(resp.content)
2269 self.logger.debug ("affinity_task: {}".format(affinity_task))
2270 if affinity_task is None or affinity_task is False:
2271 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
2272 # wait for task to complete
2273 result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
2274 if result.get('status') == 'success':
2275 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
2276 return True
2277 else:
2278 raise vimconn.vimconnUnexpectedResponse(
2279 "failed to create affinity rule {}".format(rule_name))
2280
2281
2282 def get_add_rule_reference (self, respool_href, headers):
2283 """ This method finds href to add vm to host affinity rule to vCD
2284
2285 Args:
2286 respool_href- href to resource pool
2287 headers- header information to make REST call
2288
2289 Returns:
2290 None - if no valid href to add rule found or
2291 addrule_href - href to add vm to host affinity rule of resource pool
2292 """
2293 addrule_href = None
2294 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2295
2296 if resp.status_code != requests.codes.ok:
2297 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2298 else:
2299
2300 resp_xml = XmlElementTree.fromstring(resp.content)
2301 for child in resp_xml:
2302 if 'VMWProviderVdcResourcePool' in child.tag:
2303 for schild in child:
2304 if 'Link' in schild.tag:
2305 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
2306 schild.attrib.get('rel') == "add":
2307 addrule_href = schild.attrib.get('href')
2308 break
2309
2310 return addrule_href
2311
2312
2313 def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
2314 """ Method to add deployed VM to newly created VM Group.
2315 This is required to create VM to Host affinity in vCD
2316
2317 Args:
2318 vm_uuid- newly created vm uuid
2319 vmGroupNameURL- URL to VM Group name
2320 vmGroup_name- Name of VM group created
2321 headers- Headers for REST request
2322
2323 Returns:
2324 True- if VM added to VM group successfully
2325 False- if any error encounter
2326 """
2327
2328 addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
2329
2330 if addvm_resp.status_code != requests.codes.ok:
2331 self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
2332 .format(vmGroupNameURL, addvm_resp.status_code))
2333 return False
2334 else:
2335 resp_xml = XmlElementTree.fromstring(addvm_resp.content)
2336 for child in resp_xml:
2337 if child.tag.split('}')[1] == 'Link':
2338 if child.attrib.get("rel") == "addVms":
2339 addvmtogrpURL = child.attrib.get("href")
2340
2341 #Get vm details
2342 url_list = [self.url, '/api/vApp/vm-',vm_uuid]
2343 vmdetailsURL = ''.join(url_list)
2344
2345 resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
2346
2347 if resp.status_code != requests.codes.ok:
2348 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
2349 return False
2350
2351 #Parse VM details
2352 resp_xml = XmlElementTree.fromstring(resp.content)
2353 if resp_xml.tag.split('}')[1] == "Vm":
2354 vm_id = resp_xml.attrib.get("id")
2355 vm_name = resp_xml.attrib.get("name")
2356 vm_href = resp_xml.attrib.get("href")
2357 #print vm_id, vm_name, vm_href
2358 #Add VM into VMgroup
2359 payload = """<?xml version="1.0" encoding="UTF-8"?>\
2360 <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
2361 xmlns="http://www.vmware.com/vcloud/versions" \
2362 xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
2363 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
2364 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
2365 xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
2366 xmlns:ns7="http://www.vmware.com/schema/ovf" \
2367 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
2368 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
2369 <ns2:VmReference href="{}" id="{}" name="{}" \
2370 type="application/vnd.vmware.vcloud.vm+xml" />\
2371 </ns2:Vms>""".format(vm_href, vm_id, vm_name)
2372
2373 addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
2374
2375 if addvmtogrp_resp.status_code != requests.codes.accepted:
2376 self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
2377 return False
2378 else:
2379 self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
2380 return True
2381
2382
2383 def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
2384 """Method to create a VM group in vCD
2385
2386 Args:
2387 vmgroup_name : Name of VM group to be created
2388 vmgroup_href : href for vmgroup
2389 headers- Headers for REST request
2390 """
2391 #POST to add URL with required data
2392 vmgroup_status = False
2393 payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
2394 xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
2395 <vmCount>1</vmCount>\
2396 </VMWVmGroup>""".format(vmgroup_name)
2397 resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
2398
2399 if resp.status_code != requests.codes.accepted:
2400 self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
2401 return vmgroup_status
2402 else:
2403 vmgroup_task = self.get_task_from_response(resp.content)
2404 if vmgroup_task is None or vmgroup_task is False:
2405 raise vimconn.vimconnUnexpectedResponse(
2406 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2407
2408 # wait for task to complete
2409 result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
2410
2411 if result.get('status') == 'success':
2412 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
2413 #time.sleep(10)
2414 vmgroup_status = True
2415 return vmgroup_status
2416 else:
2417 raise vimconn.vimconnUnexpectedResponse(\
2418 "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
2419
2420
2421 def find_vmgroup_reference(self, url, headers):
2422 """ Method to create a new VMGroup which is required to add created VM
2423 Args:
2424 url- resource pool href
2425 headers- header information
2426
2427 Returns:
2428 returns href to VM group to create VM group
2429 """
2430 #Perform GET on resource pool to find 'add' link to create VMGroup
2431 #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
2432 vmgrp_href = None
2433 resp = self.perform_request(req_type='GET',url=url, headers=headers)
2434
2435 if resp.status_code != requests.codes.ok:
2436 self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
2437 else:
2438 #Get the href to add vmGroup to vCD
2439 resp_xml = XmlElementTree.fromstring(resp.content)
2440 for child in resp_xml:
2441 if 'VMWProviderVdcResourcePool' in child.tag:
2442 for schild in child:
2443 if 'Link' in schild.tag:
2444 #Find href with type VMGroup and rel with add
2445 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
2446 and schild.attrib.get('rel') == "add":
2447 vmgrp_href = schild.attrib.get('href')
2448 return vmgrp_href
2449
2450
2451 def check_availibility_zone(self, az, respool_href, headers):
2452 """ Method to verify requested av zone is present or not in provided
2453 resource pool
2454
2455 Args:
2456 az - name of hostgroup (availibility_zone)
2457 respool_href - Resource Pool href
2458 headers - Headers to make REST call
2459 Returns:
2460 az_found - True if availibility_zone is found else False
2461 """
2462 az_found = False
2463 headers['Accept']='application/*+xml;version=27.0'
2464 resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
2465
2466 if resp.status_code != requests.codes.ok:
2467 self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
2468 else:
2469 #Get the href to hostGroups and find provided hostGroup is present in it
2470 resp_xml = XmlElementTree.fromstring(resp.content)
2471
2472 for child in resp_xml:
2473 if 'VMWProviderVdcResourcePool' in child.tag:
2474 for schild in child:
2475 if 'Link' in schild.tag:
2476 if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
2477 hostGroup_href = schild.attrib.get('href')
2478 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
2479 if hg_resp.status_code != requests.codes.ok:
2480 self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
2481 else:
2482 hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
2483 for hostGroup in hg_resp_xml:
2484 if 'HostGroup' in hostGroup.tag:
2485 if hostGroup.attrib.get("name") == az:
2486 az_found = True
2487 break
2488 return az_found
2489
2490
2491 def get_pvdc_for_org(self, org_vdc, headers):
2492 """ This method gets provider vdc references from organisation
2493
2494 Args:
2495 org_vdc - name of the organisation VDC to find pvdc
2496 headers - headers to make REST call
2497
2498 Returns:
2499 None - if no pvdc href found else
2500 pvdc_href - href to pvdc
2501 """
2502
2503 #Get provider VDC references from vCD
2504 pvdc_href = None
2505 #url = '<vcd url>/api/admin/extension/providerVdcReferences'
2506 url_list = [self.url, '/api/admin/extension/providerVdcReferences']
2507 url = ''.join(url_list)
2508
2509 response = self.perform_request(req_type='GET',url=url, headers=headers)
2510 if response.status_code != requests.codes.ok:
2511 self.logger.debug ("REST API call {} failed. Return status code {}"\
2512 .format(url, response.status_code))
2513 else:
2514 xmlroot_response = XmlElementTree.fromstring(response.text)
2515 for child in xmlroot_response:
2516 if 'ProviderVdcReference' in child.tag:
2517 pvdc_href = child.attrib.get('href')
2518 #Get vdcReferences to find org
2519 pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2520 if pvdc_resp.status_code != requests.codes.ok:
2521 raise vimconn.vimconnException("REST API call {} failed. "\
2522 "Return status code {}"\
2523 .format(url, pvdc_resp.status_code))
2524
2525 pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
2526 for child in pvdc_resp_xml:
2527 if 'Link' in child.tag:
2528 if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
2529 vdc_href = child.attrib.get('href')
2530
2531 #Check if provided org is present in vdc
2532 vdc_resp = self.perform_request(req_type='GET',
2533 url=vdc_href,
2534 headers=headers)
2535 if vdc_resp.status_code != requests.codes.ok:
2536 raise vimconn.vimconnException("REST API call {} failed. "\
2537 "Return status code {}"\
2538 .format(url, vdc_resp.status_code))
2539 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
2540 for child in vdc_resp_xml:
2541 if 'VdcReference' in child.tag:
2542 if child.attrib.get('name') == org_vdc:
2543 return pvdc_href
2544
2545
2546 def get_resource_pool_details(self, pvdc_href, headers):
2547 """ Method to get resource pool information.
2548 Host groups are property of resource group.
2549 To get host groups, we need to GET details of resource pool.
2550
2551 Args:
2552 pvdc_href: href to pvdc details
2553 headers: headers
2554
2555 Returns:
2556 respool_href - Returns href link reference to resource pool
2557 """
2558 respool_href = None
2559 resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
2560
2561 if resp.status_code != requests.codes.ok:
2562 self.logger.debug ("REST API call {} failed. Return status code {}"\
2563 .format(pvdc_href, resp.status_code))
2564 else:
2565 respool_resp_xml = XmlElementTree.fromstring(resp.content)
2566 for child in respool_resp_xml:
2567 if 'Link' in child.tag:
2568 if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
2569 respool_href = child.attrib.get("href")
2570 break
2571 return respool_href
2572
2573
2574 def log_message(self, msg):
2575 """
2576 Method to log error messages related to Affinity rule creation
2577 in new_vminstance & raise Exception
2578 Args :
2579 msg - Error message to be logged
2580
2581 """
2582 #get token to connect vCD as a normal user
2583 self.get_token()
2584 self.logger.debug(msg)
2585 raise vimconn.vimconnException(msg)
2586
2587
2588 ##
2589 ##
2590 ## based on current discussion
2591 ##
2592 ##
2593 ## server:
2594 # created: '2016-09-08T11:51:58'
2595 # description: simple-instance.linux1.1
2596 # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
2597 # hostId: e836c036-74e7-11e6-b249-0800273e724c
2598 # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
2599 # status: ACTIVE
2600 # error_msg:
2601 # interfaces: …
2602 #
2603 def get_vminstance(self, vim_vm_uuid=None):
2604 """Returns the VM instance information from VIM"""
2605
2606 self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
2607
2608 org, vdc = self.get_vdc_details()
2609 if vdc is None:
2610 raise vimconn.vimconnConnectionException(
2611 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2612
2613 vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
2614 if not vm_info_dict:
2615 self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2616 raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
2617
2618 status_key = vm_info_dict['status']
2619 error = ''
2620 try:
2621 vm_dict = {'created': vm_info_dict['created'],
2622 'description': vm_info_dict['name'],
2623 'status': vcdStatusCode2manoFormat[int(status_key)],
2624 'hostId': vm_info_dict['vmuuid'],
2625 'error_msg': error,
2626 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2627
2628 if 'interfaces' in vm_info_dict:
2629 vm_dict['interfaces'] = vm_info_dict['interfaces']
2630 else:
2631 vm_dict['interfaces'] = []
2632 except KeyError:
2633 vm_dict = {'created': '',
2634 'description': '',
2635 'status': vcdStatusCode2manoFormat[int(-1)],
2636 'hostId': vm_info_dict['vmuuid'],
2637 'error_msg': "Inconsistency state",
2638 'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
2639
2640 return vm_dict
2641
2642 def delete_vminstance(self, vm__vim_uuid, created_items=None):
2643 """Method poweroff and remove VM instance from vcloud director network.
2644
2645 Args:
2646 vm__vim_uuid: VM UUID
2647
2648 Returns:
2649 Returns the instance identifier
2650 """
2651
2652 self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
2653
2654 _, vdc = self.get_vdc_details()
2655 vdc_obj = VDC(self.client, href=vdc.get('href'))
2656 if vdc_obj is None:
2657 self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
2658 self.tenant_name))
2659 raise vimconn.vimconnException(
2660 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2661
2662 try:
2663 vapp_name = self.get_namebyvappid(vm__vim_uuid)
2664 if vapp_name is None:
2665 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2666 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2667 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
2668 vapp_resource = vdc_obj.get_vapp(vapp_name)
2669 vapp = VApp(self.client, resource=vapp_resource)
2670
2671 # Delete vApp and wait for status change if task executed and vApp is None.
2672
2673 if vapp:
2674 if vapp_resource.get('deployed') == 'true':
2675 self.logger.info("Powering off vApp {}".format(vapp_name))
2676 # Power off vApp
2677 powered_off = False
2678 wait_time = 0
2679 while wait_time <= MAX_WAIT_TIME:
2680 power_off_task = vapp.power_off()
2681 result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
2682
2683 if result.get('status') == 'success':
2684 powered_off = True
2685 break
2686 else:
2687 self.logger.info("Wait for vApp {} to power off".format(vapp_name))
2688 time.sleep(INTERVAL_TIME)
2689
2690 wait_time += INTERVAL_TIME
2691 if not powered_off:
2692 self.logger.debug(
2693 "delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
2694 else:
2695 self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
2696
2697 # Undeploy vApp
2698 self.logger.info("Undeploy vApp {}".format(vapp_name))
2699 wait_time = 0
2700 undeployed = False
2701 while wait_time <= MAX_WAIT_TIME:
2702 vapp = VApp(self.client, resource=vapp_resource)
2703 if not vapp:
2704 self.logger.debug(
2705 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2706 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2707 undeploy_task = vapp.undeploy()
2708
2709 result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
2710 if result.get('status') == 'success':
2711 undeployed = True
2712 break
2713 else:
2714 self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
2715 time.sleep(INTERVAL_TIME)
2716
2717 wait_time += INTERVAL_TIME
2718
2719 if not undeployed:
2720 self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
2721
2722 # delete vapp
2723 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
2724
2725 if vapp is not None:
2726 wait_time = 0
2727 result = False
2728
2729 while wait_time <= MAX_WAIT_TIME:
2730 vapp = VApp(self.client, resource=vapp_resource)
2731 if not vapp:
2732 self.logger.debug(
2733 "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
2734 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
2735
2736 delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
2737
2738 result = self.client.get_task_monitor().wait_for_success(task=delete_task)
2739 if result.get('status') == 'success':
2740 break
2741 else:
2742 self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
2743 time.sleep(INTERVAL_TIME)
2744
2745 wait_time += INTERVAL_TIME
2746
2747 if result is None:
2748 self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
2749 else:
2750 self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
2751 config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
2752 catalog_list = self.get_image_list()
2753 try:
2754 config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
2755 if catalog_['name'] == config_drive_catalog_name][0]
2756 except IndexError:
2757 pass
2758 if config_drive_catalog_id:
2759 self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
2760 'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
2761 self.delete_image(config_drive_catalog_id)
2762 return vm__vim_uuid
2763 except Exception:
2764 self.logger.debug(traceback.format_exc())
2765 raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
2766
2767 def refresh_vms_status(self, vm_list):
2768 """Get the status of the virtual machines and their interfaces/ports
2769 Params: the list of VM identifiers
2770 Returns a dictionary with:
2771 vm_id: #VIM id of this Virtual Machine
2772 status: #Mandatory. Text with one of:
2773 # DELETED (not found at vim)
2774 # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
2775 # OTHER (Vim reported other status not understood)
2776 # ERROR (VIM indicates an ERROR status)
2777 # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
2778 # CREATING (on building process), ERROR
2779 # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
2780 #
2781 error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
2782 vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2783 interfaces:
2784 - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
2785 mac_address: #Text format XX:XX:XX:XX:XX:XX
2786 vim_net_id: #network id where this interface is connected
2787 vim_interface_id: #interface/port VIM id
2788 ip_address: #null, or text with IPv4, IPv6 address
2789 """
2790
2791 self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
2792
2793 org,vdc = self.get_vdc_details()
2794 if vdc is None:
2795 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
2796
2797 vms_dict = {}
2798 nsx_edge_list = []
2799 for vmuuid in vm_list:
2800 vapp_name = self.get_namebyvappid(vmuuid)
2801 if vapp_name is not None:
2802
2803 try:
2804 vm_pci_details = self.get_vm_pci_details(vmuuid)
2805 vdc_obj = VDC(self.client, href=vdc.get('href'))
2806 vapp_resource = vdc_obj.get_vapp(vapp_name)
2807 the_vapp = VApp(self.client, resource=vapp_resource)
2808
2809 vm_details = {}
2810 for vm in the_vapp.get_all_vms():
2811 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
2812 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
2813 response = self.perform_request(req_type='GET',
2814 url=vm.get('href'),
2815 headers=headers)
2816
2817 if response.status_code != 200:
2818 self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
2819 "status code : {}".format(vm.get('href'),
2820 response.text,
2821 response.status_code))
2822 raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
2823 "VM details")
2824 xmlroot = XmlElementTree.fromstring(response.text)
2825
2826 result = response.text.replace("\n", " ")
2827 hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
2828 if hdd_match:
2829 hdd_mb = hdd_match.group(1)
2830 vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
2831 cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
2832 if cpus_match:
2833 cpus = cpus_match.group(1)
2834 vm_details['cpus'] = int(cpus) if cpus else None
2835 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
2836 vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
2837 vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
2838 vm_details['id'] = xmlroot.get('id')
2839 vm_details['name'] = xmlroot.get('name')
2840 vm_info = [vm_details]
2841 if vm_pci_details:
2842 vm_info[0].update(vm_pci_details)
2843
2844 vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2845 'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
2846 'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
2847
2848 # get networks
2849 vm_ip = None
2850 vm_mac = None
2851 networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
2852 for network in networks:
2853 mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
2854 vm_mac = mac_s.group(1) if mac_s else None
2855 ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
2856 vm_ip = ip_s.group(1) if ip_s else None
2857
2858 if vm_ip is None:
2859 if not nsx_edge_list:
2860 nsx_edge_list = self.get_edge_details()
2861 if nsx_edge_list is None:
2862 raise vimconn.vimconnException("refresh_vms_status:"\
2863 "Failed to get edge details from NSX Manager")
2864 if vm_mac is not None:
2865 vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
2866
2867 net_s = re.search('network="(.*?)"',network)
2868 network_name = net_s.group(1) if net_s else None
2869
2870 vm_net_id = self.get_network_id_by_name(network_name)
2871 interface = {"mac_address": vm_mac,
2872 "vim_net_id": vm_net_id,
2873 "vim_interface_id": vm_net_id,
2874 "ip_address": vm_ip}
2875
2876 vm_dict["interfaces"].append(interface)
2877
2878 # add a vm to vm dict
2879 vms_dict.setdefault(vmuuid, vm_dict)
2880 self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
2881 except Exception as exp:
2882 self.logger.debug("Error in response {}".format(exp))
2883 self.logger.debug(traceback.format_exc())
2884
2885 return vms_dict
2886
2887
2888 def get_edge_details(self):
2889 """Get the NSX edge list from NSX Manager
2890 Returns list of NSX edges
2891 """
2892 edge_list = []
2893 rheaders = {'Content-Type': 'application/xml'}
2894 nsx_api_url = '/api/4.0/edges'
2895
2896 self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
2897
2898 try:
2899 resp = requests.get(self.nsx_manager + nsx_api_url,
2900 auth = (self.nsx_user, self.nsx_password),
2901 verify = False, headers = rheaders)
2902 if resp.status_code == requests.codes.ok:
2903 paged_Edge_List = XmlElementTree.fromstring(resp.text)
2904 for edge_pages in paged_Edge_List:
2905 if edge_pages.tag == 'edgePage':
2906 for edge_summary in edge_pages:
2907 if edge_summary.tag == 'pagingInfo':
2908 for element in edge_summary:
2909 if element.tag == 'totalCount' and element.text == '0':
2910 raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
2911 .format(self.nsx_manager))
2912
2913 if edge_summary.tag == 'edgeSummary':
2914 for element in edge_summary:
2915 if element.tag == 'id':
2916 edge_list.append(element.text)
2917 else:
2918 raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
2919 .format(self.nsx_manager))
2920
2921 if not edge_list:
2922 raise vimconn.vimconnException("get_edge_details: "\
2923 "No NSX edge details found: {}"
2924 .format(self.nsx_manager))
2925 else:
2926 self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
2927 return edge_list
2928 else:
2929 self.logger.debug("get_edge_details: "
2930 "Failed to get NSX edge details from NSX Manager: {}"
2931 .format(resp.content))
2932 return None
2933
2934 except Exception as exp:
2935 self.logger.debug("get_edge_details: "\
2936 "Failed to get NSX edge details from NSX Manager: {}"
2937 .format(exp))
2938 raise vimconn.vimconnException("get_edge_details: "\
2939 "Failed to get NSX edge details from NSX Manager: {}"
2940 .format(exp))
2941
2942
2943 def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
2944 """Get IP address details from NSX edges, using the MAC address
2945 PARAMS: nsx_edges : List of NSX edges
2946 mac_address : Find IP address corresponding to this MAC address
2947 Returns: IP address corrresponding to the provided MAC address
2948 """
2949
2950 ip_addr = None
2951 rheaders = {'Content-Type': 'application/xml'}
2952
2953 self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
2954
2955 try:
2956 for edge in nsx_edges:
2957 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
2958
2959 resp = requests.get(self.nsx_manager + nsx_api_url,
2960 auth = (self.nsx_user, self.nsx_password),
2961 verify = False, headers = rheaders)
2962
2963 if resp.status_code == requests.codes.ok:
2964 dhcp_leases = XmlElementTree.fromstring(resp.text)
2965 for child in dhcp_leases:
2966 if child.tag == 'dhcpLeaseInfo':
2967 dhcpLeaseInfo = child
2968 for leaseInfo in dhcpLeaseInfo:
2969 for elem in leaseInfo:
2970 if (elem.tag)=='macAddress':
2971 edge_mac_addr = elem.text
2972 if (elem.tag)=='ipAddress':
2973 ip_addr = elem.text
2974 if edge_mac_addr is not None:
2975 if edge_mac_addr == mac_address:
2976 self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
2977 .format(ip_addr, mac_address,edge))
2978 return ip_addr
2979 else:
2980 self.logger.debug("get_ipaddr_from_NSXedge: "\
2981 "Error occurred while getting DHCP lease info from NSX Manager: {}"
2982 .format(resp.content))
2983
2984 self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
2985 return None
2986
2987 except XmlElementTree.ParseError as Err:
2988 self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
2989
2990 def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
2991 """Send and action over a VM instance from VIM
2992 Returns the vm_id if the action was successfully sent to the VIM"""
2993
2994 self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
2995 if vm__vim_uuid is None or action_dict is None:
2996 raise vimconn.vimconnException("Invalid request. VM id or action is None.")
2997
2998 org, vdc = self.get_vdc_details()
2999 if vdc is None:
3000 raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
3001
3002 vapp_name = self.get_namebyvappid(vm__vim_uuid)
3003 if vapp_name is None:
3004 self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3005 raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
3006 else:
3007 self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
3008
3009 try:
3010 vdc_obj = VDC(self.client, href=vdc.get('href'))
3011 vapp_resource = vdc_obj.get_vapp(vapp_name)
3012 vapp = VApp(self.client, resource=vapp_resource)
3013 if "start" in action_dict:
3014 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
3015 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3016 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3017 self.instance_actions_result("start", result, vapp_name)
3018 elif "rebuild" in action_dict:
3019 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
3020 rebuild_task = vapp.deploy(power_on=True)
3021 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
3022 self.instance_actions_result("rebuild", result, vapp_name)
3023 elif "pause" in action_dict:
3024 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
3025 pause_task = vapp.undeploy(action='suspend')
3026 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
3027 self.instance_actions_result("pause", result, vapp_name)
3028 elif "resume" in action_dict:
3029 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
3030 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
3031 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
3032 self.instance_actions_result("resume", result, vapp_name)
3033 elif "shutoff" in action_dict or "shutdown" in action_dict:
3034 action_name , value = list(action_dict.items())[0]
3035 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
3036 shutdown_task = vapp.shutdown()
3037 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
3038 if action_name == "shutdown":
3039 self.instance_actions_result("shutdown", result, vapp_name)
3040 else:
3041 self.instance_actions_result("shutoff", result, vapp_name)
3042 elif "forceOff" in action_dict:
3043 result = vapp.undeploy(action='powerOff')
3044 self.instance_actions_result("forceOff", result, vapp_name)
3045 elif "reboot" in action_dict:
3046 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
3047 reboot_task = vapp.reboot()
3048 self.client.get_task_monitor().wait_for_success(task=reboot_task)
3049 else:
3050 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
3051 return vm__vim_uuid
3052 except Exception as exp :
3053 self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
3054 raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
3055
3056 def instance_actions_result(self, action, result, vapp_name):
3057 if result.get('status') == 'success':
3058 self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
3059 else:
3060 self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
3061
3062 def get_vminstance_console(self, vm_id, console_type="novnc"):
3063 """
3064 Get a console for the virtual machine
3065 Params:
3066 vm_id: uuid of the VM
3067 console_type, can be:
3068 "novnc" (by default), "xvpvnc" for VNC types,
3069 "rdp-html5" for RDP types, "spice-html5" for SPICE types
3070 Returns dict with the console parameters:
3071 protocol: ssh, ftp, http, https, ...
3072 server: usually ip address
3073 port: the http, ssh, ... port
3074 suffix: extra text, e.g. the http path and query string
3075 """
3076 console_dict = {}
3077
3078 if console_type==None or console_type=='novnc':
3079
3080 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
3081
3082 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3083 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3084 response = self.perform_request(req_type='POST',
3085 url=url_rest_call,
3086 headers=headers)
3087
3088 if response.status_code == 403:
3089 response = self.retry_rest('GET', url_rest_call)
3090
3091 if response.status_code != 200:
3092 self.logger.error("REST call {} failed reason : {}"\
3093 "status code : {}".format(url_rest_call,
3094 response.text,
3095 response.status_code))
3096 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3097 "VM Mks ticket details")
3098 s = re.search("<Host>(.*?)</Host>", response.text)
3099 console_dict['server'] = s.group(1) if s else None
3100 s1 = re.search("<Port>(\d+)</Port>", response.text)
3101 console_dict['port'] = s1.group(1) if s1 else None
3102
3103
3104 url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
3105
3106 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3107 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3108 response = self.perform_request(req_type='POST',
3109 url=url_rest_call,
3110 headers=headers)
3111
3112 if response.status_code == 403:
3113 response = self.retry_rest('GET', url_rest_call)
3114
3115 if response.status_code != 200:
3116 self.logger.error("REST call {} failed reason : {}"\
3117 "status code : {}".format(url_rest_call,
3118 response.text,
3119 response.status_code))
3120 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
3121 "VM console details")
3122 s = re.search(">.*?/(vm-\d+.*)</", response.text)
3123 console_dict['suffix'] = s.group(1) if s else None
3124 console_dict['protocol'] = "https"
3125
3126 return console_dict
3127
3128 # NOT USED METHODS in current version
3129
3130 def host_vim2gui(self, host, server_dict):
3131 """Transform host dictionary from VIM format to GUI format,
3132 and append to the server_dict
3133 """
3134 raise vimconn.vimconnNotImplemented("Should have implemented this")
3135
3136 def get_hosts_info(self):
3137 """Get the information of deployed hosts
3138 Returns the hosts content"""
3139 raise vimconn.vimconnNotImplemented("Should have implemented this")
3140
3141 def get_hosts(self, vim_tenant):
3142 """Get the hosts and deployed instances
3143 Returns the hosts content"""
3144 raise vimconn.vimconnNotImplemented("Should have implemented this")
3145
3146 def get_processor_rankings(self):
3147 """Get the processor rankings in the VIM database"""
3148 raise vimconn.vimconnNotImplemented("Should have implemented this")
3149
3150 def new_host(self, host_data):
3151 """Adds a new host to VIM"""
3152 '''Returns status code of the VIM response'''
3153 raise vimconn.vimconnNotImplemented("Should have implemented this")
3154
3155 def new_external_port(self, port_data):
3156 """Adds a external port to VIM"""
3157 '''Returns the port identifier'''
3158 raise vimconn.vimconnNotImplemented("Should have implemented this")
3159
3160 def new_external_network(self, net_name, net_type):
3161 """Adds a external network to VIM (shared)"""
3162 '''Returns the network identifier'''
3163 raise vimconn.vimconnNotImplemented("Should have implemented this")
3164
3165 def connect_port_network(self, port_id, network_id, admin=False):
3166 """Connects a external port to a network"""
3167 '''Returns status code of the VIM response'''
3168 raise vimconn.vimconnNotImplemented("Should have implemented this")
3169
3170 def new_vminstancefromJSON(self, vm_data):
3171 """Adds a VM instance to VIM"""
3172 '''Returns the instance identifier'''
3173 raise vimconn.vimconnNotImplemented("Should have implemented this")
3174
3175 def get_network_name_by_id(self, network_uuid=None):
3176 """Method gets vcloud director network named based on supplied uuid.
3177
3178 Args:
3179 network_uuid: network_id
3180
3181 Returns:
3182 The return network name.
3183 """
3184
3185 if not network_uuid:
3186 return None
3187
3188 try:
3189 org_dict = self.get_org(self.org_uuid)
3190 if 'networks' in org_dict:
3191 org_network_dict = org_dict['networks']
3192 for net_uuid in org_network_dict:
3193 if net_uuid == network_uuid:
3194 return org_network_dict[net_uuid]
3195 except:
3196 self.logger.debug("Exception in get_network_name_by_id")
3197 self.logger.debug(traceback.format_exc())
3198
3199 return None
3200
3201 def get_network_id_by_name(self, network_name=None):
3202 """Method gets vcloud director network uuid based on supplied name.
3203
3204 Args:
3205 network_name: network_name
3206 Returns:
3207 The return network uuid.
3208 network_uuid: network_id
3209 """
3210 if not network_name:
3211 self.logger.debug("get_network_id_by_name() : Network name is empty")
3212 return None
3213
3214 try:
3215 org_dict = self.get_org(self.org_uuid)
3216 if org_dict and 'networks' in org_dict:
3217 org_network_dict = org_dict['networks']
3218 for net_uuid, net_name in org_network_dict.items():
3219 if net_name == network_name:
3220 return net_uuid
3221
3222 except KeyError as exp:
3223 self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
3224
3225 return None
3226
3227 def get_physical_network_by_name(self, physical_network_name):
3228 '''
3229 Methos returns uuid of physical network which passed
3230 Args:
3231 physical_network_name: physical network name
3232 Returns:
3233 UUID of physical_network_name
3234 '''
3235 try:
3236 client_as_admin = self.connect_as_admin()
3237 if not client_as_admin:
3238 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3239 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3240 vm_list_rest_call = ''.join(url_list)
3241
3242 if client_as_admin._session:
3243 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3244 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3245
3246 response = self.perform_request(req_type='GET',
3247 url=vm_list_rest_call,
3248 headers=headers)
3249
3250 provider_network = None
3251 available_network = None
3252 add_vdc_rest_url = None
3253
3254 if response.status_code != requests.codes.ok:
3255 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3256 response.status_code))
3257 return None
3258 else:
3259 try:
3260 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3261 for child in vm_list_xmlroot:
3262
3263 if child.tag.split("}")[1] == 'ProviderVdcReference':
3264 provider_network = child.attrib.get('href')
3265 # application/vnd.vmware.admin.providervdc+xml
3266 if child.tag.split("}")[1] == 'Link':
3267 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3268 and child.attrib.get('rel') == 'add':
3269 add_vdc_rest_url = child.attrib.get('href')
3270 except:
3271 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3272 self.logger.debug("Respond body {}".format(response.text))
3273 return None
3274
3275 # find pvdc provided available network
3276 response = self.perform_request(req_type='GET',
3277 url=provider_network,
3278 headers=headers)
3279
3280 if response.status_code != requests.codes.ok:
3281 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3282 response.status_code))
3283 return None
3284
3285 try:
3286 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3287 for child in vm_list_xmlroot.iter():
3288 if child.tag.split("}")[1] == 'AvailableNetworks':
3289 for networks in child.iter():
3290 if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
3291 if networks.attrib.get('name') == physical_network_name:
3292 network_url = networks.attrib.get('href')
3293 available_network = network_url[network_url.rindex('/')+1:]
3294 break
3295 except Exception as e:
3296 return None
3297
3298 return available_network
3299 except Exception as e:
3300 self.logger.error("Error while getting physical network: {}".format(e))
3301
3302 def list_org_action(self):
3303 """
3304 Method leverages vCloud director and query for available organization for particular user
3305
3306 Args:
3307 vca - is active VCA connection.
3308 vdc_name - is a vdc name that will be used to query vms action
3309
3310 Returns:
3311 The return XML respond
3312 """
3313 url_list = [self.url, '/api/org']
3314 vm_list_rest_call = ''.join(url_list)
3315
3316 if self.client._session:
3317 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3318 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3319
3320 response = self.perform_request(req_type='GET',
3321 url=vm_list_rest_call,
3322 headers=headers)
3323
3324 if response.status_code == 403:
3325 response = self.retry_rest('GET', vm_list_rest_call)
3326
3327 if response.status_code == requests.codes.ok:
3328 return response.text
3329
3330 return None
3331
3332 def get_org_action(self, org_uuid=None):
3333 """
3334 Method leverages vCloud director and retrieve available object for organization.
3335
3336 Args:
3337 org_uuid - vCD organization uuid
3338 self.client - is active connection.
3339
3340 Returns:
3341 The return XML respond
3342 """
3343
3344 if org_uuid is None:
3345 return None
3346
3347 url_list = [self.url, '/api/org/', org_uuid]
3348 vm_list_rest_call = ''.join(url_list)
3349
3350 if self.client._session:
3351 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3352 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3353
3354 #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
3355 response = self.perform_request(req_type='GET',
3356 url=vm_list_rest_call,
3357 headers=headers)
3358 if response.status_code == 403:
3359 response = self.retry_rest('GET', vm_list_rest_call)
3360
3361 if response.status_code == requests.codes.ok:
3362 return response.text
3363 return None
3364
3365 def get_org(self, org_uuid=None):
3366 """
3367 Method retrieves available organization in vCloud Director
3368
3369 Args:
3370 org_uuid - is a organization uuid.
3371
3372 Returns:
3373 The return dictionary with following key
3374 "network" - for network list under the org
3375 "catalogs" - for network list under the org
3376 "vdcs" - for vdc list under org
3377 """
3378
3379 org_dict = {}
3380
3381 if org_uuid is None:
3382 return org_dict
3383
3384 content = self.get_org_action(org_uuid=org_uuid)
3385 try:
3386 vdc_list = {}
3387 network_list = {}
3388 catalog_list = {}
3389 vm_list_xmlroot = XmlElementTree.fromstring(content)
3390 for child in vm_list_xmlroot:
3391 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
3392 vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3393 org_dict['vdcs'] = vdc_list
3394 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
3395 network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3396 org_dict['networks'] = network_list
3397 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
3398 catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
3399 org_dict['catalogs'] = catalog_list
3400 except:
3401 pass
3402
3403 return org_dict
3404
3405 def get_org_list(self):
3406 """
3407 Method retrieves available organization in vCloud Director
3408
3409 Args:
3410 vca - is active VCA connection.
3411
3412 Returns:
3413 The return dictionary and key for each entry VDC UUID
3414 """
3415
3416 org_dict = {}
3417
3418 content = self.list_org_action()
3419 try:
3420 vm_list_xmlroot = XmlElementTree.fromstring(content)
3421 for vm_xml in vm_list_xmlroot:
3422 if vm_xml.tag.split("}")[1] == 'Org':
3423 org_uuid = vm_xml.attrib['href'].split('/')[-1:]
3424 org_dict[org_uuid[0]] = vm_xml.attrib['name']
3425 except:
3426 pass
3427
3428 return org_dict
3429
3430 def vms_view_action(self, vdc_name=None):
3431 """ Method leverages vCloud director vms query call
3432
3433 Args:
3434 vca - is active VCA connection.
3435 vdc_name - is a vdc name that will be used to query vms action
3436
3437 Returns:
3438 The return XML respond
3439 """
3440 vca = self.connect()
3441 if vdc_name is None:
3442 return None
3443
3444 url_list = [vca.host, '/api/vms/query']
3445 vm_list_rest_call = ''.join(url_list)
3446
3447 if not (not vca.vcloud_session or not vca.vcloud_session.organization):
3448 refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
3449 ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
3450 if len(refs) == 1:
3451 response = Http.get(url=vm_list_rest_call,
3452 headers=vca.vcloud_session.get_vcloud_headers(),
3453 verify=vca.verify,
3454 logger=vca.logger)
3455 if response.status_code == requests.codes.ok:
3456 return response.text
3457
3458 return None
3459
3460 def get_vapp_list(self, vdc_name=None):
3461 """
3462 Method retrieves vApp list deployed vCloud director and returns a dictionary
3463 contains a list of all vapp deployed for queried VDC.
3464 The key for a dictionary is vApp UUID
3465
3466
3467 Args:
3468 vca - is active VCA connection.
3469 vdc_name - is a vdc name that will be used to query vms action
3470
3471 Returns:
3472 The return dictionary and key for each entry vapp UUID
3473 """
3474
3475 vapp_dict = {}
3476 if vdc_name is None:
3477 return vapp_dict
3478
3479 content = self.vms_view_action(vdc_name=vdc_name)
3480 try:
3481 vm_list_xmlroot = XmlElementTree.fromstring(content)
3482 for vm_xml in vm_list_xmlroot:
3483 if vm_xml.tag.split("}")[1] == 'VMRecord':
3484 if vm_xml.attrib['isVAppTemplate'] == 'true':
3485 rawuuid = vm_xml.attrib['container'].split('/')[-1:]
3486 if 'vappTemplate-' in rawuuid[0]:
3487 # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3488 # vm and use raw UUID as key
3489 vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
3490 except:
3491 pass
3492
3493 return vapp_dict
3494
3495 def get_vm_list(self, vdc_name=None):
3496 """
3497 Method retrieves VM's list deployed vCloud director. It returns a dictionary
3498 contains a list of all VM's deployed for queried VDC.
3499 The key for a dictionary is VM UUID
3500
3501
3502 Args:
3503 vca - is active VCA connection.
3504 vdc_name - is a vdc name that will be used to query vms action
3505
3506 Returns:
3507 The return dictionary and key for each entry vapp UUID
3508 """
3509 vm_dict = {}
3510
3511 if vdc_name is None:
3512 return vm_dict
3513
3514 content = self.vms_view_action(vdc_name=vdc_name)
3515 try:
3516 vm_list_xmlroot = XmlElementTree.fromstring(content)
3517 for vm_xml in vm_list_xmlroot:
3518 if vm_xml.tag.split("}")[1] == 'VMRecord':
3519 if vm_xml.attrib['isVAppTemplate'] == 'false':
3520 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3521 if 'vm-' in rawuuid[0]:
3522 # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
3523 # vm and use raw UUID as key
3524 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3525 except:
3526 pass
3527
3528 return vm_dict
3529
3530 def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
3531 """
3532 Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
3533 contains a list of all VM's deployed for queried VDC.
3534 The key for a dictionary is VM UUID
3535
3536
3537 Args:
3538 vca - is active VCA connection.
3539 vdc_name - is a vdc name that will be used to query vms action
3540
3541 Returns:
3542 The return dictionary and key for each entry vapp UUID
3543 """
3544 vm_dict = {}
3545 vca = self.connect()
3546 if not vca:
3547 raise vimconn.vimconnConnectionException("self.connect() is failed")
3548
3549 if vdc_name is None:
3550 return vm_dict
3551
3552 content = self.vms_view_action(vdc_name=vdc_name)
3553 try:
3554 vm_list_xmlroot = XmlElementTree.fromstring(content)
3555 for vm_xml in vm_list_xmlroot:
3556 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
3557 # lookup done by UUID
3558 if isuuid:
3559 if vapp_name in vm_xml.attrib['container']:
3560 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3561 if 'vm-' in rawuuid[0]:
3562 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3563 break
3564 # lookup done by Name
3565 else:
3566 if vapp_name in vm_xml.attrib['name']:
3567 rawuuid = vm_xml.attrib['href'].split('/')[-1:]
3568 if 'vm-' in rawuuid[0]:
3569 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
3570 break
3571 except:
3572 pass
3573
3574 return vm_dict
3575
3576 def get_network_action(self, network_uuid=None):
3577 """
3578 Method leverages vCloud director and query network based on network uuid
3579
3580 Args:
3581 vca - is active VCA connection.
3582 network_uuid - is a network uuid
3583
3584 Returns:
3585 The return XML respond
3586 """
3587
3588 if network_uuid is None:
3589 return None
3590
3591 url_list = [self.url, '/api/network/', network_uuid]
3592 vm_list_rest_call = ''.join(url_list)
3593
3594 if self.client._session:
3595 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3596 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
3597
3598 response = self.perform_request(req_type='GET',
3599 url=vm_list_rest_call,
3600 headers=headers)
3601 #Retry login if session expired & retry sending request
3602 if response.status_code == 403:
3603 response = self.retry_rest('GET', vm_list_rest_call)
3604
3605 if response.status_code == requests.codes.ok:
3606 return response.text
3607
3608 return None
3609
3610 def get_vcd_network(self, network_uuid=None):
3611 """
3612 Method retrieves available network from vCloud Director
3613
3614 Args:
3615 network_uuid - is VCD network UUID
3616
3617 Each element serialized as key : value pair
3618
3619 Following keys available for access. network_configuration['Gateway'}
3620 <Configuration>
3621 <IpScopes>
3622 <IpScope>
3623 <IsInherited>true</IsInherited>
3624 <Gateway>172.16.252.100</Gateway>
3625 <Netmask>255.255.255.0</Netmask>
3626 <Dns1>172.16.254.201</Dns1>
3627 <Dns2>172.16.254.202</Dns2>
3628 <DnsSuffix>vmwarelab.edu</DnsSuffix>
3629 <IsEnabled>true</IsEnabled>
3630 <IpRanges>
3631 <IpRange>
3632 <StartAddress>172.16.252.1</StartAddress>
3633 <EndAddress>172.16.252.99</EndAddress>
3634 </IpRange>
3635 </IpRanges>
3636 </IpScope>
3637 </IpScopes>
3638 <FenceMode>bridged</FenceMode>
3639
3640 Returns:
3641 The return dictionary and key for each entry vapp UUID
3642 """
3643
3644 network_configuration = {}
3645 if network_uuid is None:
3646 return network_uuid
3647
3648 try:
3649 content = self.get_network_action(network_uuid=network_uuid)
3650 if content is not None:
3651 vm_list_xmlroot = XmlElementTree.fromstring(content)
3652
3653 network_configuration['status'] = vm_list_xmlroot.get("status")
3654 network_configuration['name'] = vm_list_xmlroot.get("name")
3655 network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
3656
3657 for child in vm_list_xmlroot:
3658 if child.tag.split("}")[1] == 'IsShared':
3659 network_configuration['isShared'] = child.text.strip()
3660 if child.tag.split("}")[1] == 'Configuration':
3661 for configuration in child.iter():
3662 tagKey = configuration.tag.split("}")[1].strip()
3663 if tagKey != "":
3664 network_configuration[tagKey] = configuration.text.strip()
3665 except Exception as exp :
3666 self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
3667 raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
3668
3669 return network_configuration
3670
3671 def delete_network_action(self, network_uuid=None):
3672 """
3673 Method delete given network from vCloud director
3674
3675 Args:
3676 network_uuid - is a network uuid that client wish to delete
3677
3678 Returns:
3679 The return None or XML respond or false
3680 """
3681 client = self.connect_as_admin()
3682 if not client:
3683 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
3684 if network_uuid is None:
3685 return False
3686
3687 url_list = [self.url, '/api/admin/network/', network_uuid]
3688 vm_list_rest_call = ''.join(url_list)
3689
3690 if client._session:
3691 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3692 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
3693 response = self.perform_request(req_type='DELETE',
3694 url=vm_list_rest_call,
3695 headers=headers)
3696 if response.status_code == 202:
3697 return True
3698
3699 return False
3700
3701 def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3702 ip_profile=None, isshared='true'):
3703 """
3704 Method create network in vCloud director
3705
3706 Args:
3707 network_name - is network name to be created.
3708 net_type - can be 'bridge','data','ptp','mgmt'.
3709 ip_profile is a dict containing the IP parameters of the network
3710 isshared - is a boolean
3711 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3712 It optional attribute. by default if no parent network indicate the first available will be used.
3713
3714 Returns:
3715 The return network uuid or return None
3716 """
3717
3718 new_network_name = [network_name, '-', str(uuid.uuid4())]
3719 content = self.create_network_rest(network_name=''.join(new_network_name),
3720 ip_profile=ip_profile,
3721 net_type=net_type,
3722 parent_network_uuid=parent_network_uuid,
3723 isshared=isshared)
3724 if content is None:
3725 self.logger.debug("Failed create network {}.".format(network_name))
3726 return None
3727
3728 try:
3729 vm_list_xmlroot = XmlElementTree.fromstring(content)
3730 vcd_uuid = vm_list_xmlroot.get('id').split(":")
3731 if len(vcd_uuid) == 4:
3732 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
3733 return vcd_uuid[3]
3734 except:
3735 self.logger.debug("Failed create network {}".format(network_name))
3736 return None
3737
3738 def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
3739 ip_profile=None, isshared='true'):
3740 """
3741 Method create network in vCloud director
3742
3743 Args:
3744 network_name - is network name to be created.
3745 net_type - can be 'bridge','data','ptp','mgmt'.
3746 ip_profile is a dict containing the IP parameters of the network
3747 isshared - is a boolean
3748 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3749 It optional attribute. by default if no parent network indicate the first available will be used.
3750
3751 Returns:
3752 The return network uuid or return None
3753 """
3754 client_as_admin = self.connect_as_admin()
3755 if not client_as_admin:
3756 raise vimconn.vimconnConnectionException("Failed to connect vCD.")
3757 if network_name is None:
3758 return None
3759
3760 url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
3761 vm_list_rest_call = ''.join(url_list)
3762
3763 if client_as_admin._session:
3764 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
3765 'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
3766
3767 response = self.perform_request(req_type='GET',
3768 url=vm_list_rest_call,
3769 headers=headers)
3770
3771 provider_network = None
3772 available_networks = None
3773 add_vdc_rest_url = None
3774
3775 if response.status_code != requests.codes.ok:
3776 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3777 response.status_code))
3778 return None
3779 else:
3780 try:
3781 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3782 for child in vm_list_xmlroot:
3783
3784 if child.tag.split("}")[1] == 'ProviderVdcReference':
3785 provider_network = child.attrib.get('href')
3786 # application/vnd.vmware.admin.providervdc+xml
3787 if child.tag.split("}")[1] == 'Link':
3788 if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
3789 and child.attrib.get('rel') == 'add':
3790 add_vdc_rest_url = child.attrib.get('href')
3791 except:
3792 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
3793 self.logger.debug("Respond body {}".format(response.text))
3794 return None
3795
3796 # find pvdc provided available network
3797 response = self.perform_request(req_type='GET',
3798 url=provider_network,
3799 headers=headers)
3800
3801 if response.status_code != requests.codes.ok:
3802 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
3803 response.status_code))
3804 return None
3805
3806 if parent_network_uuid is None:
3807 try:
3808 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
3809 for child in vm_list_xmlroot.iter():
3810 if child.tag.split("}")[1] == 'AvailableNetworks':
3811 for networks in child.iter():
3812 # application/vnd.vmware.admin.network+xml
3813 if networks.attrib.get('href') is not None:
3814 available_networks = networks.attrib.get('href')
3815 break
3816 except:
3817 return None
3818
3819 try:
3820 #Configure IP profile of the network
3821 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
3822
3823 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
3824 subnet_rand = random.randint(0, 255)
3825 ip_base = "192.168.{}.".format(subnet_rand)
3826 ip_profile['subnet_address'] = ip_base + "0/24"
3827 else:
3828 ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
3829
3830 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
3831 ip_profile['gateway_address']=ip_base + "1"
3832 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
3833 ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
3834 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
3835 ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
3836 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
3837 ip_profile['dhcp_start_address']=ip_base + "3"
3838 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
3839 ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
3840 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
3841 ip_profile['dns_address']=ip_base + "2"
3842
3843 gateway_address=ip_profile['gateway_address']
3844 dhcp_count=int(ip_profile['dhcp_count'])
3845 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
3846
3847 if ip_profile['dhcp_enabled']==True:
3848 dhcp_enabled='true'
3849 else:
3850 dhcp_enabled='false'
3851 dhcp_start_address=ip_profile['dhcp_start_address']
3852
3853 #derive dhcp_end_address from dhcp_start_address & dhcp_count
3854 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
3855 end_ip_int += dhcp_count - 1
3856 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
3857
3858 ip_version=ip_profile['ip_version']
3859 dns_address=ip_profile['dns_address']
3860 except KeyError as exp:
3861 self.logger.debug("Create Network REST: Key error {}".format(exp))
3862 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
3863
3864 # either use client provided UUID or search for a first available
3865 # if both are not defined we return none
3866 if parent_network_uuid is not None:
3867 provider_network = None
3868 available_networks = None
3869 add_vdc_rest_url = None
3870
3871 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
3872 add_vdc_rest_url = ''.join(url_list)
3873
3874 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
3875 available_networks = ''.join(url_list)
3876
3877 #Creating all networks as Direct Org VDC type networks.
3878 #Unused in case of Underlay (data/ptp) network interface.
3879 fence_mode="isolated"
3880 is_inherited='false'
3881 dns_list = dns_address.split(";")
3882 dns1 = dns_list[0]
3883 dns2_text = ""
3884 if len(dns_list) >= 2:
3885 dns2_text = "\n <Dns2>{}</Dns2>\n".format(dns_list[1])
3886 if net_type == "isolated":
3887 fence_mode="isolated"
3888 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3889 <Description>Openmano created</Description>
3890 <Configuration>
3891 <IpScopes>
3892 <IpScope>
3893 <IsInherited>{1:s}</IsInherited>
3894 <Gateway>{2:s}</Gateway>
3895 <Netmask>{3:s}</Netmask>
3896 <Dns1>{4:s}</Dns1>{5:s}
3897 <IsEnabled>{6:s}</IsEnabled>
3898 <IpRanges>
3899 <IpRange>
3900 <StartAddress>{7:s}</StartAddress>
3901 <EndAddress>{8:s}</EndAddress>
3902 </IpRange>
3903 </IpRanges>
3904 </IpScope>
3905 </IpScopes>
3906 <FenceMode>{9:s}</FenceMode>
3907 </Configuration>
3908 <IsShared>{10:s}</IsShared>
3909 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3910 subnet_address, dns1, dns2_text, dhcp_enabled,
3911 dhcp_start_address, dhcp_end_address,
3912 fence_mode, isshared)
3913 else:
3914 fence_mode = "bridged"
3915 data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
3916 <Description>Openmano created</Description>
3917 <Configuration>
3918 <IpScopes>
3919 <IpScope>
3920 <IsInherited>{1:s}</IsInherited>
3921 <Gateway>{2:s}</Gateway>
3922 <Netmask>{3:s}</Netmask>
3923 <Dns1>{4:s}</Dns1>{5:s}
3924 <IsEnabled>{6:s}</IsEnabled>
3925 <IpRanges>
3926 <IpRange>
3927 <StartAddress>{7:s}</StartAddress>
3928 <EndAddress>{8:s}</EndAddress>
3929 </IpRange>
3930 </IpRanges>
3931 </IpScope>
3932 </IpScopes>
3933 <ParentNetwork href="{9:s}"/>
3934 <FenceMode>{10:s}</FenceMode>
3935 </Configuration>
3936 <IsShared>{11:s}</IsShared>
3937 </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
3938 subnet_address, dns1, dns2_text, dhcp_enabled,
3939 dhcp_start_address, dhcp_end_address, available_networks,
3940 fence_mode, isshared)
3941
3942 headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
3943 try:
3944 response = self.perform_request(req_type='POST',
3945 url=add_vdc_rest_url,
3946 headers=headers,
3947 data=data)
3948
3949 if response.status_code != 201:
3950 self.logger.debug("Create Network POST REST API call failed. Return status code {}, response.text: {}"
3951 .format(response.status_code, response.text))
3952 else:
3953 network_task = self.get_task_from_response(response.text)
3954 self.logger.debug("Create Network REST : Waiting for Network creation complete")
3955 time.sleep(5)
3956 result = self.client.get_task_monitor().wait_for_success(task=network_task)
3957 if result.get('status') == 'success':
3958 return response.text
3959 else:
3960 self.logger.debug("create_network_rest task failed. Network Create response : {}"
3961 .format(response.text))
3962 except Exception as exp:
3963 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
3964
3965 return None
3966
3967 def convert_cidr_to_netmask(self, cidr_ip=None):
3968 """
3969 Method sets convert CIDR netmask address to normal IP format
3970 Args:
3971 cidr_ip : CIDR IP address
3972 Returns:
3973 netmask : Converted netmask
3974 """
3975 if cidr_ip is not None:
3976 if '/' in cidr_ip:
3977 network, net_bits = cidr_ip.split('/')
3978 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
3979 else:
3980 netmask = cidr_ip
3981 return netmask
3982 return None
3983
3984 def get_provider_rest(self, vca=None):
3985 """
3986 Method gets provider vdc view from vcloud director
3987
3988 Args:
3989 network_name - is network name to be created.
3990 parent_network_uuid - is parent provider vdc network that will be used for mapping.
3991 It optional attribute. by default if no parent network indicate the first available will be used.
3992
3993 Returns:
3994 The return xml content of respond or None
3995 """
3996
3997 url_list = [self.url, '/api/admin']
3998 if vca:
3999 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4000 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4001 response = self.perform_request(req_type='GET',
4002 url=''.join(url_list),
4003 headers=headers)
4004
4005 if response.status_code == requests.codes.ok:
4006 return response.text
4007 return None
4008
4009 def create_vdc(self, vdc_name=None):
4010
4011 vdc_dict = {}
4012
4013 xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
4014 if xml_content is not None:
4015 try:
4016 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
4017 for child in task_resp_xmlroot:
4018 if child.tag.split("}")[1] == 'Owner':
4019 vdc_id = child.attrib.get('href').split("/")[-1]
4020 vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
4021 return vdc_dict
4022 except:
4023 self.logger.debug("Respond body {}".format(xml_content))
4024
4025 return None
4026
4027 def create_vdc_from_tmpl_rest(self, vdc_name=None):
4028 """
4029 Method create vdc in vCloud director based on VDC template.
4030 it uses pre-defined template.
4031
4032 Args:
4033 vdc_name - name of a new vdc.
4034
4035 Returns:
4036 The return xml content of respond or None
4037 """
4038 # pre-requesite atleast one vdc template should be available in vCD
4039 self.logger.info("Creating new vdc {}".format(vdc_name))
4040 vca = self.connect_as_admin()
4041 if not vca:
4042 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4043 if vdc_name is None:
4044 return None
4045
4046 url_list = [self.url, '/api/vdcTemplates']
4047 vm_list_rest_call = ''.join(url_list)
4048
4049 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4050 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4051 response = self.perform_request(req_type='GET',
4052 url=vm_list_rest_call,
4053 headers=headers)
4054
4055 # container url to a template
4056 vdc_template_ref = None
4057 try:
4058 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4059 for child in vm_list_xmlroot:
4060 # application/vnd.vmware.admin.providervdc+xml
4061 # we need find a template from witch we instantiate VDC
4062 if child.tag.split("}")[1] == 'VdcTemplate':
4063 if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
4064 vdc_template_ref = child.attrib.get('href')
4065 except:
4066 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4067 self.logger.debug("Respond body {}".format(response.text))
4068 return None
4069
4070 # if we didn't found required pre defined template we return None
4071 if vdc_template_ref is None:
4072 return None
4073
4074 try:
4075 # instantiate vdc
4076 url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
4077 vm_list_rest_call = ''.join(url_list)
4078 data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
4079 <Source href="{1:s}"></Source>
4080 <Description>opnemano</Description>
4081 </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
4082
4083 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
4084
4085 response = self.perform_request(req_type='POST',
4086 url=vm_list_rest_call,
4087 headers=headers,
4088 data=data)
4089
4090 vdc_task = self.get_task_from_response(response.text)
4091 self.client.get_task_monitor().wait_for_success(task=vdc_task)
4092
4093 # if we all ok we respond with content otherwise by default None
4094 if response.status_code >= 200 and response.status_code < 300:
4095 return response.text
4096 return None
4097 except:
4098 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4099 self.logger.debug("Respond body {}".format(response.text))
4100
4101 return None
4102
4103 def create_vdc_rest(self, vdc_name=None):
4104 """
4105 Method create network in vCloud director
4106
4107 Args:
4108 vdc_name - vdc name to be created
4109 Returns:
4110 The return response
4111 """
4112
4113 self.logger.info("Creating new vdc {}".format(vdc_name))
4114
4115 vca = self.connect_as_admin()
4116 if not vca:
4117 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4118 if vdc_name is None:
4119 return None
4120
4121 url_list = [self.url, '/api/admin/org/', self.org_uuid]
4122 vm_list_rest_call = ''.join(url_list)
4123
4124 if vca._session:
4125 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4126 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4127 response = self.perform_request(req_type='GET',
4128 url=vm_list_rest_call,
4129 headers=headers)
4130
4131 provider_vdc_ref = None
4132 add_vdc_rest_url = None
4133 available_networks = None
4134
4135 if response.status_code != requests.codes.ok:
4136 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
4137 response.status_code))
4138 return None
4139 else:
4140 try:
4141 vm_list_xmlroot = XmlElementTree.fromstring(response.text)
4142 for child in vm_list_xmlroot:
4143 # application/vnd.vmware.admin.providervdc+xml
4144 if child.tag.split("}")[1] == 'Link':
4145 if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
4146 and child.attrib.get('rel') == 'add':
4147 add_vdc_rest_url = child.attrib.get('href')
4148 except:
4149 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4150 self.logger.debug("Respond body {}".format(response.text))
4151 return None
4152
4153 response = self.get_provider_rest(vca=vca)
4154 try:
4155 vm_list_xmlroot = XmlElementTree.fromstring(response)
4156 for child in vm_list_xmlroot:
4157 if child.tag.split("}")[1] == 'ProviderVdcReferences':
4158 for sub_child in child:
4159 provider_vdc_ref = sub_child.attrib.get('href')
4160 except:
4161 self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
4162 self.logger.debug("Respond body {}".format(response))
4163 return None
4164
4165 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
4166 data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
4167 <AllocationModel>ReservationPool</AllocationModel>
4168 <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
4169 <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
4170 </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
4171 <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
4172 <ProviderVdcReference
4173 name="Main Provider"
4174 href="{2:s}" />
4175 <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
4176 escape(vdc_name),
4177 provider_vdc_ref)
4178
4179 headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
4180
4181 response = self.perform_request(req_type='POST',
4182 url=add_vdc_rest_url,
4183 headers=headers,
4184 data=data)
4185
4186 # if we all ok we respond with content otherwise by default None
4187 if response.status_code == 201:
4188 return response.text
4189 return None
4190
4191 def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
4192 """
4193 Method retrieve vapp detail from vCloud director
4194
4195 Args:
4196 vapp_uuid - is vapp identifier.
4197
4198 Returns:
4199 The return network uuid or return None
4200 """
4201
4202 parsed_respond = {}
4203 vca = None
4204
4205 if need_admin_access:
4206 vca = self.connect_as_admin()
4207 else:
4208 vca = self.client
4209
4210 if not vca:
4211 raise vimconn.vimconnConnectionException("Failed to connect vCD")
4212 if vapp_uuid is None:
4213 return None
4214
4215 url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
4216 get_vapp_restcall = ''.join(url_list)
4217
4218 if vca._session:
4219 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4220 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
4221 response = self.perform_request(req_type='GET',
4222 url=get_vapp_restcall,
4223 headers=headers)
4224
4225 if response.status_code == 403:
4226 if need_admin_access == False:
4227 response = self.retry_rest('GET', get_vapp_restcall)
4228
4229 if response.status_code != requests.codes.ok:
4230 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
4231 response.status_code))
4232 return parsed_respond
4233
4234 try:
4235 xmlroot_respond = XmlElementTree.fromstring(response.text)
4236 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
4237
4238 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
4239 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
4240 'vmw': 'http://www.vmware.com/schema/ovf',
4241 'vm': 'http://www.vmware.com/vcloud/v1.5',
4242 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
4243 "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
4244 "xmlns":"http://www.vmware.com/vcloud/v1.5"
4245 }
4246
4247 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
4248 if created_section is not None:
4249 parsed_respond['created'] = created_section.text
4250
4251 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
4252 if network_section is not None and 'networkName' in network_section.attrib:
4253 parsed_respond['networkname'] = network_section.attrib['networkName']
4254
4255 ipscopes_section = \
4256 xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
4257 namespaces)
4258 if ipscopes_section is not None:
4259 for ipscope in ipscopes_section:
4260 for scope in ipscope:
4261 tag_key = scope.tag.split("}")[1]
4262 if tag_key == 'IpRanges':
4263 ip_ranges = scope.getchildren()
4264 for ipblock in ip_ranges:
4265 for block in ipblock:
4266 parsed_respond[block.tag.split("}")[1]] = block.text
4267 else:
4268 parsed_respond[tag_key] = scope.text
4269
4270 # parse children section for other attrib
4271 children_section = xmlroot_respond.find('vm:Children/', namespaces)
4272 if children_section is not None:
4273 parsed_respond['name'] = children_section.attrib['name']
4274 parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
4275 if "nestedHypervisorEnabled" in children_section.attrib else None
4276 parsed_respond['deployed'] = children_section.attrib['deployed']
4277 parsed_respond['status'] = children_section.attrib['status']
4278 parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
4279 network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
4280 nic_list = []
4281 for adapters in network_adapter:
4282 adapter_key = adapters.tag.split("}")[1]
4283 if adapter_key == 'PrimaryNetworkConnectionIndex':
4284 parsed_respond['primarynetwork'] = adapters.text
4285 if adapter_key == 'NetworkConnection':
4286 vnic = {}
4287 if 'network' in adapters.attrib:
4288 vnic['network'] = adapters.attrib['network']
4289 for adapter in adapters:
4290 setting_key = adapter.tag.split("}")[1]
4291 vnic[setting_key] = adapter.text
4292 nic_list.append(vnic)
4293
4294 for link in children_section:
4295 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4296 if link.attrib['rel'] == 'screen:acquireTicket':
4297 parsed_respond['acquireTicket'] = link.attrib
4298 if link.attrib['rel'] == 'screen:acquireMksTicket':
4299 parsed_respond['acquireMksTicket'] = link.attrib
4300
4301 parsed_respond['interfaces'] = nic_list
4302 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
4303 if vCloud_extension_section is not None:
4304 vm_vcenter_info = {}
4305 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
4306 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
4307 if vmext is not None:
4308 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
4309 parsed_respond["vm_vcenter_info"]= vm_vcenter_info
4310
4311 virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
4312 vm_virtual_hardware_info = {}
4313 if virtual_hardware_section is not None:
4314 for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
4315 if item.find("rasd:Description",namespaces).text == "Hard disk":
4316 disk_size = item.find("rasd:HostResource" ,namespaces
4317 ).attrib["{"+namespaces['vm']+"}capacity"]
4318
4319 vm_virtual_hardware_info["disk_size"]= disk_size
4320 break
4321
4322 for link in virtual_hardware_section:
4323 if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
4324 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
4325 vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
4326 break
4327
4328 parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
4329 except Exception as exp :
4330 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
4331 return parsed_respond
4332
4333 def acquire_console(self, vm_uuid=None):
4334
4335 if vm_uuid is None:
4336 return None
4337 if self.client._session:
4338 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4339 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4340 vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
4341 console_dict = vm_dict['acquireTicket']
4342 console_rest_call = console_dict['href']
4343
4344 response = self.perform_request(req_type='POST',
4345 url=console_rest_call,
4346 headers=headers)
4347
4348 if response.status_code == 403:
4349 response = self.retry_rest('POST', console_rest_call)
4350
4351 if response.status_code == requests.codes.ok:
4352 return response.text
4353
4354 return None
4355
4356 def modify_vm_disk(self, vapp_uuid, flavor_disk):
4357 """
4358 Method retrieve vm disk details
4359
4360 Args:
4361 vapp_uuid - is vapp identifier.
4362 flavor_disk - disk size as specified in VNFD (flavor)
4363
4364 Returns:
4365 The return network uuid or return None
4366 """
4367 status = None
4368 try:
4369 #Flavor disk is in GB convert it into MB
4370 flavor_disk = int(flavor_disk) * 1024
4371 vm_details = self.get_vapp_details_rest(vapp_uuid)
4372 if vm_details:
4373 vm_name = vm_details["name"]
4374 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
4375
4376 if vm_details and "vm_virtual_hardware" in vm_details:
4377 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
4378 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
4379
4380 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
4381
4382 if flavor_disk > vm_disk:
4383 status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
4384 self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
4385 vm_disk, flavor_disk ))
4386 else:
4387 status = True
4388 self.logger.info("No need to modify disk of VM {}".format(vm_name))
4389
4390 return status
4391 except Exception as exp:
4392 self.logger.info("Error occurred while modifing disk size {}".format(exp))
4393
4394
4395 def modify_vm_disk_rest(self, disk_href , disk_size):
4396 """
4397 Method retrieve modify vm disk size
4398
4399 Args:
4400 disk_href - vCD API URL to GET and PUT disk data
4401 disk_size - disk size as specified in VNFD (flavor)
4402
4403 Returns:
4404 The return network uuid or return None
4405 """
4406 if disk_href is None or disk_size is None:
4407 return None
4408
4409 if self.client._session:
4410 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4411 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4412 response = self.perform_request(req_type='GET',
4413 url=disk_href,
4414 headers=headers)
4415
4416 if response.status_code == 403:
4417 response = self.retry_rest('GET', disk_href)
4418
4419 if response.status_code != requests.codes.ok:
4420 self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
4421 response.status_code))
4422 return None
4423 try:
4424 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
4425 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
4426 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4427
4428 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
4429 if item.find("rasd:Description",namespaces).text == "Hard disk":
4430 disk_item = item.find("rasd:HostResource" ,namespaces )
4431 if disk_item is not None:
4432 disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
4433 break
4434
4435 data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
4436 xml_declaration=True)
4437
4438 #Send PUT request to modify disk size
4439 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
4440
4441 response = self.perform_request(req_type='PUT',
4442 url=disk_href,
4443 headers=headers,
4444 data=data)
4445 if response.status_code == 403:
4446 add_headers = {'Content-Type': headers['Content-Type']}
4447 response = self.retry_rest('PUT', disk_href, add_headers, data)
4448
4449 if response.status_code != 202:
4450 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
4451 response.status_code))
4452 else:
4453 modify_disk_task = self.get_task_from_response(response.text)
4454 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
4455 if result.get('status') == 'success':
4456 return True
4457 else:
4458 return False
4459 return None
4460
4461 except Exception as exp:
4462 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
4463 return None
4464
4465 def add_serial_device(self, vapp_uuid):
4466 """
4467 Method to attach a serial device to a VM
4468
4469 Args:
4470 vapp_uuid - uuid of vApp/VM
4471
4472 Returns:
4473 """
4474 self.logger.info("Add serial devices into vApp {}".format(vapp_uuid))
4475 _, content = self.get_vcenter_content()
4476 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4477 if vm_moref_id:
4478 try:
4479 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4480 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4481 if host_obj and vm_obj:
4482 spec = vim.vm.ConfigSpec()
4483 spec.deviceChange = []
4484 serial_spec = vim.vm.device.VirtualDeviceSpec()
4485 serial_spec.operation = 'add'
4486 serial_port = vim.vm.device.VirtualSerialPort()
4487 serial_port.yieldOnPoll = True
4488 backing = serial_port.URIBackingInfo()
4489 backing.serviceURI = 'tcp://:65500'
4490 backing.direction = 'server'
4491 serial_port.backing = backing
4492 serial_spec.device = serial_port
4493 spec.deviceChange.append(serial_spec)
4494 vm_obj.ReconfigVM_Task(spec=spec)
4495
4496 self.logger.info("Adding serial device to VM {}".format(vm_obj))
4497 except vmodl.MethodFault as error:
4498 self.logger.error("Error occurred while adding PCI devices {} ", error)
4499
4500 def add_pci_devices(self, vapp_uuid, pci_devices, vmname_andid):
4501 """
4502 Method to attach pci devices to VM
4503
4504 Args:
4505 vapp_uuid - uuid of vApp/VM
4506 pci_devices - pci devices infromation as specified in VNFD (flavor)
4507
4508 Returns:
4509 The status of add pci device task , vm object and
4510 vcenter_conect object
4511 """
4512 vm_obj = None
4513 self.logger.info("Add pci devices {} into vApp {}".format(pci_devices, vapp_uuid))
4514 vcenter_conect, content = self.get_vcenter_content()
4515 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
4516
4517 if vm_moref_id:
4518 try:
4519 no_of_pci_devices = len(pci_devices)
4520 if no_of_pci_devices > 0:
4521 #Get VM and its host
4522 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4523 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
4524 if host_obj and vm_obj:
4525 #get PCI devies from host on which vapp is currently installed
4526 avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
4527
4528 if avilable_pci_devices is None:
4529 #find other hosts with active pci devices
4530 new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
4531 content,
4532 no_of_pci_devices
4533 )
4534
4535 if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4536 #Migrate vm to the host where PCI devices are availble
4537 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
4538 task = self.relocate_vm(new_host_obj, vm_obj)
4539 if task is not None:
4540 result = self.wait_for_vcenter_task(task, vcenter_conect)
4541 self.logger.info("Migrate VM status: {}".format(result))
4542 host_obj = new_host_obj
4543 else:
4544 self.logger.info("Fail to migrate VM : {}".format(result))
4545 raise vimconn.vimconnNotFoundException(
4546 "Fail to migrate VM : {} to host {}".format(
4547 vmname_andid,
4548 new_host_obj)
4549 )
4550
4551 if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
4552 #Add PCI devices one by one
4553 for pci_device in avilable_pci_devices:
4554 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
4555 if task:
4556 status= self.wait_for_vcenter_task(task, vcenter_conect)
4557 if status:
4558 self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4559 else:
4560 self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
4561 return True, vm_obj, vcenter_conect
4562 else:
4563 self.logger.error("Currently there is no host with"\
4564 " {} number of avaialble PCI devices required for VM {}".format(
4565 no_of_pci_devices,
4566 vmname_andid)
4567 )
4568 raise vimconn.vimconnNotFoundException(
4569 "Currently there is no host with {} "\
4570 "number of avaialble PCI devices required for VM {}".format(
4571 no_of_pci_devices,
4572 vmname_andid))
4573 else:
4574 self.logger.debug("No infromation about PCI devices {} ",pci_devices)
4575
4576 except vmodl.MethodFault as error:
4577 self.logger.error("Error occurred while adding PCI devices {} ",error)
4578 return None, vm_obj, vcenter_conect
4579
4580 def get_vm_obj(self, content, mob_id):
4581 """
4582 Method to get the vsphere VM object associated with a given morf ID
4583 Args:
4584 vapp_uuid - uuid of vApp/VM
4585 content - vCenter content object
4586 mob_id - mob_id of VM
4587
4588 Returns:
4589 VM and host object
4590 """
4591 vm_obj = None
4592 host_obj = None
4593 try :
4594 container = content.viewManager.CreateContainerView(content.rootFolder,
4595 [vim.VirtualMachine], True
4596 )
4597 for vm in container.view:
4598 mobID = vm._GetMoId()
4599 if mobID == mob_id:
4600 vm_obj = vm
4601 host_obj = vm_obj.runtime.host
4602 break
4603 except Exception as exp:
4604 self.logger.error("Error occurred while finding VM object : {}".format(exp))
4605 return host_obj, vm_obj
4606
4607 def get_pci_devices(self, host, need_devices):
4608 """
4609 Method to get the details of pci devices on given host
4610 Args:
4611 host - vSphere host object
4612 need_devices - number of pci devices needed on host
4613
4614 Returns:
4615 array of pci devices
4616 """
4617 all_devices = []
4618 all_device_ids = []
4619 used_devices_ids = []
4620
4621 try:
4622 if host:
4623 pciPassthruInfo = host.config.pciPassthruInfo
4624 pciDevies = host.hardware.pciDevice
4625
4626 for pci_status in pciPassthruInfo:
4627 if pci_status.passthruActive:
4628 for device in pciDevies:
4629 if device.id == pci_status.id:
4630 all_device_ids.append(device.id)
4631 all_devices.append(device)
4632
4633 #check if devices are in use
4634 avalible_devices = all_devices
4635 for vm in host.vm:
4636 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
4637 vm_devices = vm.config.hardware.device
4638 for device in vm_devices:
4639 if type(device) is vim.vm.device.VirtualPCIPassthrough:
4640 if device.backing.id in all_device_ids:
4641 for use_device in avalible_devices:
4642 if use_device.id == device.backing.id:
4643 avalible_devices.remove(use_device)
4644 used_devices_ids.append(device.backing.id)
4645 self.logger.debug("Device {} from devices {}"\
4646 "is in use".format(device.backing.id,
4647 device)
4648 )
4649 if len(avalible_devices) < need_devices:
4650 self.logger.debug("Host {} don't have {} number of active devices".format(host,
4651 need_devices))
4652 self.logger.debug("found only {} devices {}".format(len(avalible_devices),
4653 avalible_devices))
4654 return None
4655 else:
4656 required_devices = avalible_devices[:need_devices]
4657 self.logger.info("Found {} PCI devices on host {} but required only {}".format(
4658 len(avalible_devices),
4659 host,
4660 need_devices))
4661 self.logger.info("Retruning {} devices as {}".format(need_devices,
4662 required_devices ))
4663 return required_devices
4664
4665 except Exception as exp:
4666 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
4667
4668 return None
4669
4670 def get_host_and_PCIdevices(self, content, need_devices):
4671 """
4672 Method to get the details of pci devices infromation on all hosts
4673
4674 Args:
4675 content - vSphere host object
4676 need_devices - number of pci devices needed on host
4677
4678 Returns:
4679 array of pci devices and host object
4680 """
4681 host_obj = None
4682 pci_device_objs = None
4683 try:
4684 if content:
4685 container = content.viewManager.CreateContainerView(content.rootFolder,
4686 [vim.HostSystem], True)
4687 for host in container.view:
4688 devices = self.get_pci_devices(host, need_devices)
4689 if devices:
4690 host_obj = host
4691 pci_device_objs = devices
4692 break
4693 except Exception as exp:
4694 self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
4695
4696 return host_obj,pci_device_objs
4697
4698 def relocate_vm(self, dest_host, vm) :
4699 """
4700 Method to get the relocate VM to new host
4701
4702 Args:
4703 dest_host - vSphere host object
4704 vm - vSphere VM object
4705
4706 Returns:
4707 task object
4708 """
4709 task = None
4710 try:
4711 relocate_spec = vim.vm.RelocateSpec(host=dest_host)
4712 task = vm.Relocate(relocate_spec)
4713 self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
4714 except Exception as exp:
4715 self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
4716 dest_host, vm, exp))
4717 return task
4718
4719 def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
4720 """
4721 Waits and provides updates on a vSphere task
4722 """
4723 while task.info.state == vim.TaskInfo.State.running:
4724 time.sleep(2)
4725
4726 if task.info.state == vim.TaskInfo.State.success:
4727 if task.info.result is not None and not hideResult:
4728 self.logger.info('{} completed successfully, result: {}'.format(
4729 actionName,
4730 task.info.result))
4731 else:
4732 self.logger.info('Task {} completed successfully.'.format(actionName))
4733 else:
4734 self.logger.error('{} did not complete successfully: {} '.format(
4735 actionName,
4736 task.info.error)
4737 )
4738
4739 return task.info.result
4740
4741 def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
4742 """
4743 Method to add pci device in given VM
4744
4745 Args:
4746 host_object - vSphere host object
4747 vm_object - vSphere VM object
4748 host_pci_dev - host_pci_dev must be one of the devices from the
4749 host_object.hardware.pciDevice list
4750 which is configured as a PCI passthrough device
4751
4752 Returns:
4753 task object
4754 """
4755 task = None
4756 if vm_object and host_object and host_pci_dev:
4757 try :
4758 #Add PCI device to VM
4759 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
4760 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
4761
4762 if host_pci_dev.id not in systemid_by_pciid:
4763 self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
4764 return None
4765
4766 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
4767 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
4768 id=host_pci_dev.id,
4769 systemId=systemid_by_pciid[host_pci_dev.id],
4770 vendorId=host_pci_dev.vendorId,
4771 deviceName=host_pci_dev.deviceName)
4772
4773 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
4774
4775 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
4776 new_device_config.operation = "add"
4777 vmConfigSpec = vim.vm.ConfigSpec()
4778 vmConfigSpec.deviceChange = [new_device_config]
4779
4780 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
4781 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
4782 host_pci_dev, vm_object, host_object)
4783 )
4784 except Exception as exp:
4785 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
4786 host_pci_dev,
4787 vm_object,
4788 exp))
4789 return task
4790
4791 def get_vm_vcenter_info(self):
4792 """
4793 Method to get details of vCenter and vm
4794
4795 Args:
4796 vapp_uuid - uuid of vApp or VM
4797
4798 Returns:
4799 Moref Id of VM and deails of vCenter
4800 """
4801 vm_vcenter_info = {}
4802
4803 if self.vcenter_ip is not None:
4804 vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
4805 else:
4806 raise vimconn.vimconnException(message="vCenter IP is not provided."\
4807 " Please provide vCenter IP while attaching datacenter to tenant in --config")
4808 if self.vcenter_port is not None:
4809 vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
4810 else:
4811 raise vimconn.vimconnException(message="vCenter port is not provided."\
4812 " Please provide vCenter port while attaching datacenter to tenant in --config")
4813 if self.vcenter_user is not None:
4814 vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
4815 else:
4816 raise vimconn.vimconnException(message="vCenter user is not provided."\
4817 " Please provide vCenter user while attaching datacenter to tenant in --config")
4818
4819 if self.vcenter_password is not None:
4820 vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
4821 else:
4822 raise vimconn.vimconnException(message="vCenter user password is not provided."\
4823 " Please provide vCenter user password while attaching datacenter to tenant in --config")
4824
4825 return vm_vcenter_info
4826
4827
4828 def get_vm_pci_details(self, vmuuid):
4829 """
4830 Method to get VM PCI device details from vCenter
4831
4832 Args:
4833 vm_obj - vSphere VM object
4834
4835 Returns:
4836 dict of PCI devives attached to VM
4837
4838 """
4839 vm_pci_devices_info = {}
4840 try:
4841 vcenter_conect, content = self.get_vcenter_content()
4842 vm_moref_id = self.get_vm_moref_id(vmuuid)
4843 if vm_moref_id:
4844 #Get VM and its host
4845 if content:
4846 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
4847 if host_obj and vm_obj:
4848 vm_pci_devices_info["host_name"]= host_obj.name
4849 vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
4850 for device in vm_obj.config.hardware.device:
4851 if type(device) == vim.vm.device.VirtualPCIPassthrough:
4852 device_details={'devide_id':device.backing.id,
4853 'pciSlotNumber':device.slotInfo.pciSlotNumber,
4854 }
4855 vm_pci_devices_info[device.deviceInfo.label] = device_details
4856 else:
4857 self.logger.error("Can not connect to vCenter while getting "\
4858 "PCI devices infromationn")
4859 return vm_pci_devices_info
4860 except Exception as exp:
4861 self.logger.error("Error occurred while getting VM infromationn"\
4862 " for VM : {}".format(exp))
4863 raise vimconn.vimconnException(message=exp)
4864
4865
4866 def reserve_memory_for_all_vms(self, vapp, memory_mb):
4867 """
4868 Method to reserve memory for all VMs
4869 Args :
4870 vapp - VApp
4871 memory_mb - Memory in MB
4872 Returns:
4873 None
4874 """
4875
4876 self.logger.info("Reserve memory for all VMs")
4877 for vms in vapp.get_all_vms():
4878 vm_id = vms.get('id').split(':')[-1]
4879
4880 url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
4881
4882 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4883 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4884 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
4885 response = self.perform_request(req_type='GET',
4886 url=url_rest_call,
4887 headers=headers)
4888
4889 if response.status_code == 403:
4890 response = self.retry_rest('GET', url_rest_call)
4891
4892 if response.status_code != 200:
4893 self.logger.error("REST call {} failed reason : {}"\
4894 "status code : {}".format(url_rest_call,
4895 response.text,
4896 response.status_code))
4897 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
4898 "memory")
4899
4900 bytexml = bytes(bytearray(response.text, encoding='utf-8'))
4901 contentelem = lxmlElementTree.XML(bytexml)
4902 namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
4903 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
4904
4905 # Find the reservation element in the response
4906 memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
4907 for memelem in memelem_list:
4908 memelem.text = str(memory_mb)
4909
4910 newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
4911
4912 response = self.perform_request(req_type='PUT',
4913 url=url_rest_call,
4914 headers=headers,
4915 data=newdata)
4916
4917 if response.status_code == 403:
4918 add_headers = {'Content-Type': headers['Content-Type']}
4919 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
4920
4921 if response.status_code != 202:
4922 self.logger.error("REST call {} failed reason : {}"\
4923 "status code : {} ".format(url_rest_call,
4924 response.text,
4925 response.status_code))
4926 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
4927 "virtual hardware memory section")
4928 else:
4929 mem_task = self.get_task_from_response(response.text)
4930 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
4931 if result.get('status') == 'success':
4932 self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
4933 .format(vm_id))
4934 else:
4935 self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
4936 .format(vm_id))
4937
4938 def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
4939 """
4940 Configure VApp network config with org vdc network
4941 Args :
4942 vapp - VApp
4943 Returns:
4944 None
4945 """
4946
4947 self.logger.info("Connecting vapp {} to org vdc network {}".
4948 format(vapp_id, net_name))
4949
4950 url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
4951
4952 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
4953 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
4954 response = self.perform_request(req_type='GET',
4955 url=url_rest_call,
4956 headers=headers)
4957
4958 if response.status_code == 403:
4959 response = self.retry_rest('GET', url_rest_call)
4960
4961 if response.status_code != 200:
4962 self.logger.error("REST call {} failed reason : {}"\
4963 "status code : {}".format(url_rest_call,
4964 response.text,
4965 response.status_code))
4966 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
4967 "network config section")
4968
4969 data = response.text
4970 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
4971 net_id = self.get_network_id_by_name(net_name)
4972 if not net_id:
4973 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
4974 "existing network")
4975
4976 bytexml = bytes(bytearray(data, encoding='utf-8'))
4977 newelem = lxmlElementTree.XML(bytexml)
4978 namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
4979 namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
4980 nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
4981
4982 # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
4983 parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
4984 if parentnetworklist:
4985 for pn in parentnetworklist:
4986 if "href" not in pn.keys():
4987 id_val = pn.get("id")
4988 href_val = "{}/api/network/{}".format(self.url, id_val)
4989 pn.set("href", href_val)
4990
4991 newstr = """<NetworkConfig networkName="{}">
4992 <Configuration>
4993 <ParentNetwork href="{}/api/network/{}"/>
4994 <FenceMode>bridged</FenceMode>
4995 </Configuration>
4996 </NetworkConfig>
4997 """.format(net_name, self.url, net_id)
4998 newcfgelem = lxmlElementTree.fromstring(newstr)
4999 if nwcfglist:
5000 nwcfglist[0].addnext(newcfgelem)
5001
5002 newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
5003
5004 response = self.perform_request(req_type='PUT',
5005 url=url_rest_call,
5006 headers=headers,
5007 data=newdata)
5008
5009 if response.status_code == 403:
5010 add_headers = {'Content-Type': headers['Content-Type']}
5011 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5012
5013 if response.status_code != 202:
5014 self.logger.error("REST call {} failed reason : {}"\
5015 "status code : {} ".format(url_rest_call,
5016 response.text,
5017 response.status_code))
5018 raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
5019 "network config section")
5020 else:
5021 vapp_task = self.get_task_from_response(response.text)
5022 result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
5023 if result.get('status') == 'success':
5024 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
5025 "network {}".format(vapp_id, net_name))
5026 else:
5027 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
5028 "connect to network {}".format(vapp_id, net_name))
5029
5030 def remove_primary_network_adapter_from_all_vms(self, vapp):
5031 """
5032 Method to remove network adapter type to vm
5033 Args :
5034 vapp - VApp
5035 Returns:
5036 None
5037 """
5038
5039 self.logger.info("Removing network adapter from all VMs")
5040 for vms in vapp.get_all_vms():
5041 vm_id = vms.get('id').split(':')[-1]
5042
5043 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5044
5045 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5046 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5047 response = self.perform_request(req_type='GET',
5048 url=url_rest_call,
5049 headers=headers)
5050
5051 if response.status_code == 403:
5052 response = self.retry_rest('GET', url_rest_call)
5053
5054 if response.status_code != 200:
5055 self.logger.error("REST call {} failed reason : {}"\
5056 "status code : {}".format(url_rest_call,
5057 response.text,
5058 response.status_code))
5059 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
5060 "network connection section")
5061
5062 data = response.text
5063 data = data.split('<Link rel="edit"')[0]
5064
5065 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5066
5067 newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
5068 <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
5069 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5070 xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
5071 xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
5072 xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
5073 xmlns:vmw="http://www.vmware.com/schema/ovf"
5074 xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
5075 xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
5076 xmlns:ns9="http://www.vmware.com/vcloud/versions"
5077 href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
5078 <ovf:Info>Specifies the available VM network connections</ovf:Info>
5079 <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
5080 <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
5081 </NetworkConnectionSection>""".format(url=url_rest_call)
5082 response = self.perform_request(req_type='PUT',
5083 url=url_rest_call,
5084 headers=headers,
5085 data=newdata)
5086
5087 if response.status_code == 403:
5088 add_headers = {'Content-Type': headers['Content-Type']}
5089 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
5090
5091 if response.status_code != 202:
5092 self.logger.error("REST call {} failed reason : {}"\
5093 "status code : {} ".format(url_rest_call,
5094 response.text,
5095 response.status_code))
5096 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
5097 "network connection section")
5098 else:
5099 nic_task = self.get_task_from_response(response.text)
5100 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5101 if result.get('status') == 'success':
5102 self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
5103 "default NIC type".format(vm_id))
5104 else:
5105 self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
5106 "connect NIC type".format(vm_id))
5107
5108 def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
5109 """
5110 Method to add network adapter type to vm
5111 Args :
5112 network_name - name of network
5113 primary_nic_index - int value for primary nic index
5114 nicIndex - int value for nic index
5115 nic_type - specify model name to which add to vm
5116 Returns:
5117 None
5118 """
5119
5120 self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
5121 format(network_name, nicIndex, nic_type))
5122 try:
5123 ip_address = None
5124 floating_ip = False
5125 mac_address = None
5126 if 'floating_ip' in net: floating_ip = net['floating_ip']
5127
5128 # Stub for ip_address feature
5129 if 'ip_address' in net: ip_address = net['ip_address']
5130
5131 if 'mac_address' in net: mac_address = net['mac_address']
5132
5133 if floating_ip:
5134 allocation_mode = "POOL"
5135 elif ip_address:
5136 allocation_mode = "MANUAL"
5137 else:
5138 allocation_mode = "DHCP"
5139
5140 if not nic_type:
5141 for vms in vapp.get_all_vms():
5142 vm_id = vms.get('id').split(':')[-1]
5143
5144 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5145
5146 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5147 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5148 response = self.perform_request(req_type='GET',
5149 url=url_rest_call,
5150 headers=headers)
5151
5152 if response.status_code == 403:
5153 response = self.retry_rest('GET', url_rest_call)
5154
5155 if response.status_code != 200:
5156 self.logger.error("REST call {} failed reason : {}"\
5157 "status code : {}".format(url_rest_call,
5158 response.text,
5159 response.status_code))
5160 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5161 "network connection section")
5162
5163 data = response.text
5164 data = data.split('<Link rel="edit"')[0]
5165 if '<PrimaryNetworkConnectionIndex>' not in data:
5166 self.logger.debug("add_network_adapter PrimaryNIC not in data")
5167 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5168 <NetworkConnection network="{}">
5169 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5170 <IsConnected>true</IsConnected>
5171 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5172 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5173 allocation_mode)
5174 # Stub for ip_address feature
5175 if ip_address:
5176 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5177 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5178
5179 if mac_address:
5180 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5181 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5182
5183 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5184 else:
5185 self.logger.debug("add_network_adapter PrimaryNIC in data")
5186 new_item = """<NetworkConnection network="{}">
5187 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5188 <IsConnected>true</IsConnected>
5189 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5190 </NetworkConnection>""".format(network_name, nicIndex,
5191 allocation_mode)
5192 # Stub for ip_address feature
5193 if ip_address:
5194 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5195 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5196
5197 if mac_address:
5198 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5199 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5200
5201 data = data + new_item + '</NetworkConnectionSection>'
5202
5203 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5204
5205 response = self.perform_request(req_type='PUT',
5206 url=url_rest_call,
5207 headers=headers,
5208 data=data)
5209
5210 if response.status_code == 403:
5211 add_headers = {'Content-Type': headers['Content-Type']}
5212 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5213
5214 if response.status_code != 202:
5215 self.logger.error("REST call {} failed reason : {}"\
5216 "status code : {} ".format(url_rest_call,
5217 response.text,
5218 response.status_code))
5219 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5220 "network connection section")
5221 else:
5222 nic_task = self.get_task_from_response(response.text)
5223 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5224 if result.get('status') == 'success':
5225 self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
5226 "default NIC type".format(vm_id))
5227 else:
5228 self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
5229 "connect NIC type".format(vm_id))
5230 else:
5231 for vms in vapp.get_all_vms():
5232 vm_id = vms.get('id').split(':')[-1]
5233
5234 url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
5235
5236 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5237 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5238 response = self.perform_request(req_type='GET',
5239 url=url_rest_call,
5240 headers=headers)
5241
5242 if response.status_code == 403:
5243 response = self.retry_rest('GET', url_rest_call)
5244
5245 if response.status_code != 200:
5246 self.logger.error("REST call {} failed reason : {}"\
5247 "status code : {}".format(url_rest_call,
5248 response.text,
5249 response.status_code))
5250 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
5251 "network connection section")
5252 data = response.text
5253 data = data.split('<Link rel="edit"')[0]
5254 vcd_netadapter_type = nic_type
5255 if nic_type in ['SR-IOV', 'VF']:
5256 vcd_netadapter_type = "SRIOVETHERNETCARD"
5257
5258 if '<PrimaryNetworkConnectionIndex>' not in data:
5259 self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
5260 item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
5261 <NetworkConnection network="{}">
5262 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5263 <IsConnected>true</IsConnected>
5264 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5265 <NetworkAdapterType>{}</NetworkAdapterType>
5266 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
5267 allocation_mode, vcd_netadapter_type)
5268 # Stub for ip_address feature
5269 if ip_address:
5270 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5271 item = item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5272
5273 if mac_address:
5274 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5275 item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5276
5277 data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
5278 else:
5279 self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
5280 new_item = """<NetworkConnection network="{}">
5281 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
5282 <IsConnected>true</IsConnected>
5283 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
5284 <NetworkAdapterType>{}</NetworkAdapterType>
5285 </NetworkConnection>""".format(network_name, nicIndex,
5286 allocation_mode, vcd_netadapter_type)
5287 # Stub for ip_address feature
5288 if ip_address:
5289 ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
5290 new_item = new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
5291
5292 if mac_address:
5293 mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
5294 new_item = new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
5295
5296 data = data + new_item + '</NetworkConnectionSection>'
5297
5298 headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
5299
5300 response = self.perform_request(req_type='PUT',
5301 url=url_rest_call,
5302 headers=headers,
5303 data=data)
5304
5305 if response.status_code == 403:
5306 add_headers = {'Content-Type': headers['Content-Type']}
5307 response = self.retry_rest('PUT', url_rest_call, add_headers, data)
5308
5309 if response.status_code != 202:
5310 self.logger.error("REST call {} failed reason : {}"\
5311 "status code : {}".format(url_rest_call,
5312 response.text,
5313 response.status_code))
5314 raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
5315 "network connection section")
5316 else:
5317 nic_task = self.get_task_from_response(response.text)
5318 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
5319 if result.get('status') == 'success':
5320 self.logger.info("add_network_adapter_to_vms(): VM {} "\
5321 "conneced to NIC type {}".format(vm_id, nic_type))
5322 else:
5323 self.logger.error("add_network_adapter_to_vms(): VM {} "\
5324 "failed to connect NIC type {}".format(vm_id, nic_type))
5325 except Exception as exp:
5326 self.logger.error("add_network_adapter_to_vms() : exception occurred "\
5327 "while adding Network adapter")
5328 raise vimconn.vimconnException(message=exp)
5329
5330
5331 def set_numa_affinity(self, vmuuid, paired_threads_id):
5332 """
5333 Method to assign numa affinity in vm configuration parammeters
5334 Args :
5335 vmuuid - vm uuid
5336 paired_threads_id - one or more virtual processor
5337 numbers
5338 Returns:
5339 return if True
5340 """
5341 try:
5342 vcenter_conect, content = self.get_vcenter_content()
5343 vm_moref_id = self.get_vm_moref_id(vmuuid)
5344
5345 host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
5346 if vm_obj:
5347 config_spec = vim.vm.ConfigSpec()
5348 config_spec.extraConfig = []
5349 opt = vim.option.OptionValue()
5350 opt.key = 'numa.nodeAffinity'
5351 opt.value = str(paired_threads_id)
5352 config_spec.extraConfig.append(opt)
5353 task = vm_obj.ReconfigVM_Task(config_spec)
5354 if task:
5355 result = self.wait_for_vcenter_task(task, vcenter_conect)
5356 extra_config = vm_obj.config.extraConfig
5357 flag = False
5358 for opts in extra_config:
5359 if 'numa.nodeAffinity' in opts.key:
5360 flag = True
5361 self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
5362 "value {} for vm {}".format(opt.value, vm_obj))
5363 if flag:
5364 return
5365 else:
5366 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
5367 except Exception as exp:
5368 self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
5369 "for VM {} : {}".format(vm_obj, vm_moref_id))
5370 raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
5371 "affinity".format(exp))
5372
5373
5374 def cloud_init(self, vapp, cloud_config):
5375 """
5376 Method to inject ssh-key
5377 vapp - vapp object
5378 cloud_config a dictionary with:
5379 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
5380 'users': (optional) list of users to be inserted, each item is a dict with:
5381 'name': (mandatory) user name,
5382 'key-pairs': (optional) list of strings with the public key to be inserted to the user
5383 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
5384 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
5385 'config-files': (optional). List of files to be transferred. Each item is a dict with:
5386 'dest': (mandatory) string with the destination absolute path
5387 'encoding': (optional, by default text). Can be one of:
5388 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
5389 'content' (mandatory): string with the content of the file
5390 'permissions': (optional) string with file permissions, typically octal notation '0644'
5391 'owner': (optional) file owner, string with the format 'owner:group'
5392 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
5393 """
5394 try:
5395 if not isinstance(cloud_config, dict):
5396 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
5397 else:
5398 key_pairs = []
5399 userdata = []
5400 if "key-pairs" in cloud_config:
5401 key_pairs = cloud_config["key-pairs"]
5402
5403 if "users" in cloud_config:
5404 userdata = cloud_config["users"]
5405
5406 self.logger.debug("cloud_init : Guest os customization started..")
5407 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
5408 customize_script = customize_script.replace("&","&amp;")
5409 self.guest_customization(vapp, customize_script)
5410
5411 except Exception as exp:
5412 self.logger.error("cloud_init : exception occurred while injecting "\
5413 "ssh-key")
5414 raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
5415 "ssh-key".format(exp))
5416
5417 def format_script(self, key_pairs=[], users_list=[]):
5418 bash_script = """#!/bin/sh
5419 echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5420 if [ "$1" = "precustomization" ];then
5421 echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
5422 """
5423
5424 keys = "\n".join(key_pairs)
5425 if keys:
5426 keys_data = """
5427 if [ ! -d /root/.ssh ];then
5428 mkdir /root/.ssh
5429 chown root:root /root/.ssh
5430 chmod 700 /root/.ssh
5431 touch /root/.ssh/authorized_keys
5432 chown root:root /root/.ssh/authorized_keys
5433 chmod 600 /root/.ssh/authorized_keys
5434 # make centos with selinux happy
5435 which restorecon && restorecon -Rv /root/.ssh
5436 else
5437 touch /root/.ssh/authorized_keys
5438 chown root:root /root/.ssh/authorized_keys
5439 chmod 600 /root/.ssh/authorized_keys
5440 fi
5441 echo '{key}' >> /root/.ssh/authorized_keys
5442 """.format(key=keys)
5443
5444 bash_script+= keys_data
5445
5446 for user in users_list:
5447 if 'name' in user: user_name = user['name']
5448 if 'key-pairs' in user:
5449 user_keys = "\n".join(user['key-pairs'])
5450 else:
5451 user_keys = None
5452
5453 add_user_name = """
5454 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
5455 """.format(user_name=user_name)
5456
5457 bash_script+= add_user_name
5458
5459 if user_keys:
5460 user_keys_data = """
5461 mkdir /home/{user_name}/.ssh
5462 chown {user_name}:{user_name} /home/{user_name}/.ssh
5463 chmod 700 /home/{user_name}/.ssh
5464 touch /home/{user_name}/.ssh/authorized_keys
5465 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
5466 chmod 600 /home/{user_name}/.ssh/authorized_keys
5467 # make centos with selinux happy
5468 which restorecon && restorecon -Rv /home/{user_name}/.ssh
5469 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
5470 """.format(user_name=user_name,user_key=user_keys)
5471
5472 bash_script+= user_keys_data
5473
5474 return bash_script+"\n\tfi"
5475
5476 def guest_customization(self, vapp, customize_script):
5477 """
5478 Method to customize guest os
5479 vapp - Vapp object
5480 customize_script - Customize script to be run at first boot of VM.
5481 """
5482 for vm in vapp.get_all_vms():
5483 vm_id = vm.get('id').split(':')[-1]
5484 vm_name = vm.get('name')
5485 vm_name = vm_name.replace('_','-')
5486
5487 vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
5488 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5489 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5490
5491 headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
5492
5493 data = """<GuestCustomizationSection
5494 xmlns="http://www.vmware.com/vcloud/v1.5"
5495 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
5496 ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
5497 <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
5498 <Enabled>true</Enabled>
5499 <ChangeSid>false</ChangeSid>
5500 <VirtualMachineId>{}</VirtualMachineId>
5501 <JoinDomainEnabled>false</JoinDomainEnabled>
5502 <UseOrgSettings>false</UseOrgSettings>
5503 <AdminPasswordEnabled>false</AdminPasswordEnabled>
5504 <AdminPasswordAuto>true</AdminPasswordAuto>
5505 <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
5506 <AdminAutoLogonCount>0</AdminAutoLogonCount>
5507 <ResetPasswordRequired>false</ResetPasswordRequired>
5508 <CustomizationScript>{}</CustomizationScript>
5509 <ComputerName>{}</ComputerName>
5510 <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
5511 </GuestCustomizationSection>
5512 """.format(vm_customization_url,
5513 vm_id,
5514 customize_script,
5515 vm_name,
5516 vm_customization_url)
5517
5518 response = self.perform_request(req_type='PUT',
5519 url=vm_customization_url,
5520 headers=headers,
5521 data=data)
5522 if response.status_code == 202:
5523 guest_task = self.get_task_from_response(response.text)
5524 self.client.get_task_monitor().wait_for_success(task=guest_task)
5525 self.logger.info("guest_customization : customized guest os task "\
5526 "completed for VM {}".format(vm_name))
5527 else:
5528 self.logger.error("guest_customization : task for customized guest os"\
5529 "failed for VM {}".format(vm_name))
5530 raise vimconn.vimconnException("guest_customization : failed to perform"\
5531 "guest os customization on VM {}".format(vm_name))
5532
5533 def add_new_disk(self, vapp_uuid, disk_size):
5534 """
5535 Method to create an empty vm disk
5536
5537 Args:
5538 vapp_uuid - is vapp identifier.
5539 disk_size - size of disk to be created in GB
5540
5541 Returns:
5542 None
5543 """
5544 status = False
5545 vm_details = None
5546 try:
5547 #Disk size in GB, convert it into MB
5548 if disk_size is not None:
5549 disk_size_mb = int(disk_size) * 1024
5550 vm_details = self.get_vapp_details_rest(vapp_uuid)
5551
5552 if vm_details and "vm_virtual_hardware" in vm_details:
5553 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5554 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
5555 status = self.add_new_disk_rest(disk_href, disk_size_mb)
5556
5557 except Exception as exp:
5558 msg = "Error occurred while creating new disk {}.".format(exp)
5559 self.rollback_newvm(vapp_uuid, msg)
5560
5561 if status:
5562 self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
5563 else:
5564 #If failed to add disk, delete VM
5565 msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
5566 self.rollback_newvm(vapp_uuid, msg)
5567
5568
5569 def add_new_disk_rest(self, disk_href, disk_size_mb):
5570 """
5571 Retrives vApp Disks section & add new empty disk
5572
5573 Args:
5574 disk_href: Disk section href to addd disk
5575 disk_size_mb: Disk size in MB
5576
5577 Returns: Status of add new disk task
5578 """
5579 status = False
5580 if self.client._session:
5581 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5582 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
5583 response = self.perform_request(req_type='GET',
5584 url=disk_href,
5585 headers=headers)
5586
5587 if response.status_code == 403:
5588 response = self.retry_rest('GET', disk_href)
5589
5590 if response.status_code != requests.codes.ok:
5591 self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
5592 .format(disk_href, response.status_code))
5593 return status
5594 try:
5595 #Find but type & max of instance IDs assigned to disks
5596 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
5597 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
5598 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
5599 instance_id = 0
5600 for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
5601 if item.find("rasd:Description",namespaces).text == "Hard disk":
5602 inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
5603 if inst_id > instance_id:
5604 instance_id = inst_id
5605 disk_item = item.find("rasd:HostResource" ,namespaces)
5606 bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
5607 bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
5608
5609 instance_id = instance_id + 1
5610 new_item = """<Item>
5611 <rasd:Description>Hard disk</rasd:Description>
5612 <rasd:ElementName>New disk</rasd:ElementName>
5613 <rasd:HostResource
5614 xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
5615 vcloud:capacity="{}"
5616 vcloud:busSubType="{}"
5617 vcloud:busType="{}"></rasd:HostResource>
5618 <rasd:InstanceID>{}</rasd:InstanceID>
5619 <rasd:ResourceType>17</rasd:ResourceType>
5620 </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
5621
5622 new_data = response.text
5623 #Add new item at the bottom
5624 new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
5625
5626 # Send PUT request to modify virtual hardware section with new disk
5627 headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
5628
5629 response = self.perform_request(req_type='PUT',
5630 url=disk_href,
5631 data=new_data,
5632 headers=headers)
5633
5634 if response.status_code == 403:
5635 add_headers = {'Content-Type': headers['Content-Type']}
5636 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
5637
5638 if response.status_code != 202:
5639 self.logger.error("PUT REST API call {} failed. Return status code {}. response.text:{}"
5640 .format(disk_href, response.status_code, response.text))
5641 else:
5642 add_disk_task = self.get_task_from_response(response.text)
5643 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
5644 if result.get('status') == 'success':
5645 status = True
5646 else:
5647 self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
5648
5649 except Exception as exp:
5650 self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
5651
5652 return status
5653
5654
5655 def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
5656 """
5657 Method to add existing disk to vm
5658 Args :
5659 catalogs - List of VDC catalogs
5660 image_id - Catalog ID
5661 template_name - Name of template in catalog
5662 vapp_uuid - UUID of vApp
5663 Returns:
5664 None
5665 """
5666 disk_info = None
5667 vcenter_conect, content = self.get_vcenter_content()
5668 #find moref-id of vm in image
5669 catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
5670 image_id=image_id,
5671 )
5672
5673 if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
5674 if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
5675 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
5676 if catalog_vm_moref_id:
5677 self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
5678 host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
5679 if catalog_vm_obj:
5680 #find existing disk
5681 disk_info = self.find_disk(catalog_vm_obj)
5682 else:
5683 exp_msg = "No VM with image id {} found".format(image_id)
5684 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5685 else:
5686 exp_msg = "No Image found with image ID {} ".format(image_id)
5687 self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
5688
5689 if disk_info:
5690 self.logger.info("Existing disk_info : {}".format(disk_info))
5691 #get VM
5692 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5693 host, vm_obj = self.get_vm_obj(content, vm_moref_id)
5694 if vm_obj:
5695 status = self.add_disk(vcenter_conect=vcenter_conect,
5696 vm=vm_obj,
5697 disk_info=disk_info,
5698 size=size,
5699 vapp_uuid=vapp_uuid
5700 )
5701 if status:
5702 self.logger.info("Disk from image id {} added to {}".format(image_id,
5703 vm_obj.config.name)
5704 )
5705 else:
5706 msg = "No disk found with image id {} to add in VM {}".format(
5707 image_id,
5708 vm_obj.config.name)
5709 self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
5710
5711
5712 def find_disk(self, vm_obj):
5713 """
5714 Method to find details of existing disk in VM
5715 Args :
5716 vm_obj - vCenter object of VM
5717 image_id - Catalog ID
5718 Returns:
5719 disk_info : dict of disk details
5720 """
5721 disk_info = {}
5722 if vm_obj:
5723 try:
5724 devices = vm_obj.config.hardware.device
5725 for device in devices:
5726 if type(device) is vim.vm.device.VirtualDisk:
5727 if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
5728 disk_info["full_path"] = device.backing.fileName
5729 disk_info["datastore"] = device.backing.datastore
5730 disk_info["capacityKB"] = device.capacityInKB
5731 break
5732 except Exception as exp:
5733 self.logger.error("find_disk() : exception occurred while "\
5734 "getting existing disk details :{}".format(exp))
5735 return disk_info
5736
5737
5738 def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
5739 """
5740 Method to add existing disk in VM
5741 Args :
5742 vcenter_conect - vCenter content object
5743 vm - vCenter vm object
5744 disk_info : dict of disk details
5745 Returns:
5746 status : status of add disk task
5747 """
5748 datastore = disk_info["datastore"] if "datastore" in disk_info else None
5749 fullpath = disk_info["full_path"] if "full_path" in disk_info else None
5750 capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
5751 if size is not None:
5752 #Convert size from GB to KB
5753 sizeKB = int(size) * 1024 * 1024
5754 #compare size of existing disk and user given size.Assign whicherver is greater
5755 self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
5756 sizeKB, capacityKB))
5757 if sizeKB > capacityKB:
5758 capacityKB = sizeKB
5759
5760 if datastore and fullpath and capacityKB:
5761 try:
5762 spec = vim.vm.ConfigSpec()
5763 # get all disks on a VM, set unit_number to the next available
5764 unit_number = 0
5765 for dev in vm.config.hardware.device:
5766 if hasattr(dev.backing, 'fileName'):
5767 unit_number = int(dev.unitNumber) + 1
5768 # unit_number 7 reserved for scsi controller
5769 if unit_number == 7:
5770 unit_number += 1
5771 if isinstance(dev, vim.vm.device.VirtualDisk):
5772 #vim.vm.device.VirtualSCSIController
5773 controller_key = dev.controllerKey
5774
5775 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
5776 unit_number, controller_key))
5777 # add disk here
5778 dev_changes = []
5779 disk_spec = vim.vm.device.VirtualDeviceSpec()
5780 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
5781 disk_spec.device = vim.vm.device.VirtualDisk()
5782 disk_spec.device.backing = \
5783 vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
5784 disk_spec.device.backing.thinProvisioned = True
5785 disk_spec.device.backing.diskMode = 'persistent'
5786 disk_spec.device.backing.datastore = datastore
5787 disk_spec.device.backing.fileName = fullpath
5788
5789 disk_spec.device.unitNumber = unit_number
5790 disk_spec.device.capacityInKB = capacityKB
5791 disk_spec.device.controllerKey = controller_key
5792 dev_changes.append(disk_spec)
5793 spec.deviceChange = dev_changes
5794 task = vm.ReconfigVM_Task(spec=spec)
5795 status = self.wait_for_vcenter_task(task, vcenter_conect)
5796 return status
5797 except Exception as exp:
5798 exp_msg = "add_disk() : exception {} occurred while adding disk "\
5799 "{} to vm {}".format(exp,
5800 fullpath,
5801 vm.config.name)
5802 self.rollback_newvm(vapp_uuid, exp_msg)
5803 else:
5804 msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
5805 self.rollback_newvm(vapp_uuid, msg)
5806
5807
5808 def get_vcenter_content(self):
5809 """
5810 Get the vsphere content object
5811 """
5812 try:
5813 vm_vcenter_info = self.get_vm_vcenter_info()
5814 except Exception as exp:
5815 self.logger.error("Error occurred while getting vCenter infromationn"\
5816 " for VM : {}".format(exp))
5817 raise vimconn.vimconnException(message=exp)
5818
5819 context = None
5820 if hasattr(ssl, '_create_unverified_context'):
5821 context = ssl._create_unverified_context()
5822
5823 vcenter_conect = SmartConnect(
5824 host=vm_vcenter_info["vm_vcenter_ip"],
5825 user=vm_vcenter_info["vm_vcenter_user"],
5826 pwd=vm_vcenter_info["vm_vcenter_password"],
5827 port=int(vm_vcenter_info["vm_vcenter_port"]),
5828 sslContext=context
5829 )
5830 atexit.register(Disconnect, vcenter_conect)
5831 content = vcenter_conect.RetrieveContent()
5832 return vcenter_conect, content
5833
5834
5835 def get_vm_moref_id(self, vapp_uuid):
5836 """
5837 Get the moref_id of given VM
5838 """
5839 try:
5840 if vapp_uuid:
5841 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
5842 if vm_details and "vm_vcenter_info" in vm_details:
5843 vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
5844 return vm_moref_id
5845
5846 except Exception as exp:
5847 self.logger.error("Error occurred while getting VM moref ID "\
5848 " for VM : {}".format(exp))
5849 return None
5850
5851
5852 def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
5853 """
5854 Method to get vApp template details
5855 Args :
5856 catalogs - list of VDC catalogs
5857 image_id - Catalog ID to find
5858 template_name : template name in catalog
5859 Returns:
5860 parsed_respond : dict of vApp tempalte details
5861 """
5862 parsed_response = {}
5863
5864 vca = self.connect_as_admin()
5865 if not vca:
5866 raise vimconn.vimconnConnectionException("Failed to connect vCD")
5867
5868 try:
5869 org, vdc = self.get_vdc_details()
5870 catalog = self.get_catalog_obj(image_id, catalogs)
5871 if catalog:
5872 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
5873 catalog_items = [items.attrib]
5874
5875 if len(catalog_items) == 1:
5876 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
5877 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
5878
5879 response = self.perform_request(req_type='GET',
5880 url=catalog_items[0].get('href'),
5881 headers=headers)
5882 catalogItem = XmlElementTree.fromstring(response.text)
5883 entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
5884 vapp_tempalte_href = entity.get("href")
5885 #get vapp details and parse moref id
5886
5887 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
5888 'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
5889 'vmw': 'http://www.vmware.com/schema/ovf',
5890 'vm': 'http://www.vmware.com/vcloud/v1.5',
5891 'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
5892 'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
5893 'xmlns':"http://www.vmware.com/vcloud/v1.5"
5894 }
5895
5896 if vca._session:
5897 response = self.perform_request(req_type='GET',
5898 url=vapp_tempalte_href,
5899 headers=headers)
5900
5901 if response.status_code != requests.codes.ok:
5902 self.logger.debug("REST API call {} failed. Return status code {}".format(
5903 vapp_tempalte_href, response.status_code))
5904
5905 else:
5906 xmlroot_respond = XmlElementTree.fromstring(response.text)
5907 children_section = xmlroot_respond.find('vm:Children/', namespaces)
5908 if children_section is not None:
5909 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
5910 if vCloud_extension_section is not None:
5911 vm_vcenter_info = {}
5912 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
5913 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
5914 if vmext is not None:
5915 vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
5916 parsed_response["vm_vcenter_info"]= vm_vcenter_info
5917
5918 except Exception as exp :
5919 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
5920
5921 return parsed_response
5922
5923
5924 def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
5925 """
5926 Method to delete vApp
5927 Args :
5928 vapp_uuid - vApp UUID
5929 msg - Error message to be logged
5930 exp_type : Exception type
5931 Returns:
5932 None
5933 """
5934 if vapp_uuid:
5935 status = self.delete_vminstance(vapp_uuid)
5936 else:
5937 msg = "No vApp ID"
5938 self.logger.error(msg)
5939 if exp_type == "Genric":
5940 raise vimconn.vimconnException(msg)
5941 elif exp_type == "NotFound":
5942 raise vimconn.vimconnNotFoundException(message=msg)
5943
5944 def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
5945 """
5946 Method to attach SRIOV adapters to VM
5947
5948 Args:
5949 vapp_uuid - uuid of vApp/VM
5950 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
5951 vmname_andid - vmname
5952
5953 Returns:
5954 The status of add SRIOV adapter task , vm object and
5955 vcenter_conect object
5956 """
5957 vm_obj = None
5958 vcenter_conect, content = self.get_vcenter_content()
5959 vm_moref_id = self.get_vm_moref_id(vapp_uuid)
5960
5961 if vm_moref_id:
5962 try:
5963 no_of_sriov_devices = len(sriov_nets)
5964 if no_of_sriov_devices > 0:
5965 #Get VM and its host
5966 host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
5967 self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
5968 if host_obj and vm_obj:
5969 #get SRIOV devies from host on which vapp is currently installed
5970 avilable_sriov_devices = self.get_sriov_devices(host_obj,
5971 no_of_sriov_devices,
5972 )
5973
5974 if len(avilable_sriov_devices) == 0:
5975 #find other hosts with active pci devices
5976 new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
5977 content,
5978 no_of_sriov_devices,
5979 )
5980
5981 if new_host_obj is not None and len(avilable_sriov_devices)> 0:
5982 #Migrate vm to the host where SRIOV devices are available
5983 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
5984 new_host_obj))
5985 task = self.relocate_vm(new_host_obj, vm_obj)
5986 if task is not None:
5987 result = self.wait_for_vcenter_task(task, vcenter_conect)
5988 self.logger.info("Migrate VM status: {}".format(result))
5989 host_obj = new_host_obj
5990 else:
5991 self.logger.info("Fail to migrate VM : {}".format(result))
5992 raise vimconn.vimconnNotFoundException(
5993 "Fail to migrate VM : {} to host {}".format(
5994 vmname_andid,
5995 new_host_obj)
5996 )
5997
5998 if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
5999 #Add SRIOV devices one by one
6000 for sriov_net in sriov_nets:
6001 network_name = sriov_net.get('net_id')
6002 dvs_portgr_name = self.create_dvPort_group(network_name)
6003 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
6004 #add vlan ID ,Modify portgroup for vlan ID
6005 self.configure_vlanID(content, vcenter_conect, network_name)
6006
6007 task = self.add_sriov_to_vm(content,
6008 vm_obj,
6009 host_obj,
6010 network_name,
6011 avilable_sriov_devices[0]
6012 )
6013 if task:
6014 status= self.wait_for_vcenter_task(task, vcenter_conect)
6015 if status:
6016 self.logger.info("Added SRIOV {} to VM {}".format(
6017 no_of_sriov_devices,
6018 str(vm_obj)))
6019 else:
6020 self.logger.error("Fail to add SRIOV {} to VM {}".format(
6021 no_of_sriov_devices,
6022 str(vm_obj)))
6023 raise vimconn.vimconnUnexpectedResponse(
6024 "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
6025 )
6026 return True, vm_obj, vcenter_conect
6027 else:
6028 self.logger.error("Currently there is no host with"\
6029 " {} number of avaialble SRIOV "\
6030 "VFs required for VM {}".format(
6031 no_of_sriov_devices,
6032 vmname_andid)
6033 )
6034 raise vimconn.vimconnNotFoundException(
6035 "Currently there is no host with {} "\
6036 "number of avaialble SRIOV devices required for VM {}".format(
6037 no_of_sriov_devices,
6038 vmname_andid))
6039 else:
6040 self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
6041
6042 except vmodl.MethodFault as error:
6043 self.logger.error("Error occurred while adding SRIOV {} ",error)
6044 return None, vm_obj, vcenter_conect
6045
6046
6047 def get_sriov_devices(self,host, no_of_vfs):
6048 """
6049 Method to get the details of SRIOV devices on given host
6050 Args:
6051 host - vSphere host object
6052 no_of_vfs - number of VFs needed on host
6053
6054 Returns:
6055 array of SRIOV devices
6056 """
6057 sriovInfo=[]
6058 if host:
6059 for device in host.config.pciPassthruInfo:
6060 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
6061 if device.numVirtualFunction >= no_of_vfs:
6062 sriovInfo.append(device)
6063 break
6064 return sriovInfo
6065
6066
6067 def get_host_and_sriov_devices(self, content, no_of_vfs):
6068 """
6069 Method to get the details of SRIOV devices infromation on all hosts
6070
6071 Args:
6072 content - vSphere host object
6073 no_of_vfs - number of pci VFs needed on host
6074
6075 Returns:
6076 array of SRIOV devices and host object
6077 """
6078 host_obj = None
6079 sriov_device_objs = None
6080 try:
6081 if content:
6082 container = content.viewManager.CreateContainerView(content.rootFolder,
6083 [vim.HostSystem], True)
6084 for host in container.view:
6085 devices = self.get_sriov_devices(host, no_of_vfs)
6086 if devices:
6087 host_obj = host
6088 sriov_device_objs = devices
6089 break
6090 except Exception as exp:
6091 self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
6092
6093 return host_obj,sriov_device_objs
6094
6095
6096 def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
6097 """
6098 Method to add SRIOV adapter to vm
6099
6100 Args:
6101 host_obj - vSphere host object
6102 vm_obj - vSphere vm object
6103 content - vCenter content object
6104 network_name - name of distributed virtaul portgroup
6105 sriov_device - SRIOV device info
6106
6107 Returns:
6108 task object
6109 """
6110 devices = []
6111 vnic_label = "sriov nic"
6112 try:
6113 dvs_portgr = self.get_dvport_group(network_name)
6114 network_name = dvs_portgr.name
6115 nic = vim.vm.device.VirtualDeviceSpec()
6116 # VM device
6117 nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
6118 nic.device = vim.vm.device.VirtualSriovEthernetCard()
6119 nic.device.addressType = 'assigned'
6120 #nic.device.key = 13016
6121 nic.device.deviceInfo = vim.Description()
6122 nic.device.deviceInfo.label = vnic_label
6123 nic.device.deviceInfo.summary = network_name
6124 nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
6125
6126 nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
6127 nic.device.backing.deviceName = network_name
6128 nic.device.backing.useAutoDetect = False
6129 nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
6130 nic.device.connectable.startConnected = True
6131 nic.device.connectable.allowGuestControl = True
6132
6133 nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
6134 nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
6135 nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
6136
6137 devices.append(nic)
6138 vmconf = vim.vm.ConfigSpec(deviceChange=devices)
6139 task = vm_obj.ReconfigVM_Task(vmconf)
6140 return task
6141 except Exception as exp:
6142 self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
6143 return None
6144
6145
6146 def create_dvPort_group(self, network_name):
6147 """
6148 Method to create disributed virtual portgroup
6149
6150 Args:
6151 network_name - name of network/portgroup
6152
6153 Returns:
6154 portgroup key
6155 """
6156 try:
6157 new_network_name = [network_name, '-', str(uuid.uuid4())]
6158 network_name=''.join(new_network_name)
6159 vcenter_conect, content = self.get_vcenter_content()
6160
6161 dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
6162 if dv_switch:
6163 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6164 dv_pg_spec.name = network_name
6165
6166 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
6167 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6168 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
6169 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
6170 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
6171 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
6172
6173 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
6174 self.wait_for_vcenter_task(task, vcenter_conect)
6175
6176 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
6177 if dvPort_group:
6178 self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
6179 return dvPort_group.key
6180 else:
6181 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
6182
6183 except Exception as exp:
6184 self.logger.error("Error occurred while creating disributed virtaul port group {}"\
6185 " : {}".format(network_name, exp))
6186 return None
6187
6188 def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
6189 """
6190 Method to reconfigure disributed virtual portgroup
6191
6192 Args:
6193 dvPort_group_name - name of disributed virtual portgroup
6194 content - vCenter content object
6195 config_info - disributed virtual portgroup configuration
6196
6197 Returns:
6198 task object
6199 """
6200 try:
6201 dvPort_group = self.get_dvport_group(dvPort_group_name)
6202 if dvPort_group:
6203 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
6204 dv_pg_spec.configVersion = dvPort_group.config.configVersion
6205 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
6206 if "vlanID" in config_info:
6207 dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
6208 dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
6209
6210 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
6211 return task
6212 else:
6213 return None
6214 except Exception as exp:
6215 self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
6216 " : {}".format(dvPort_group_name, exp))
6217 return None
6218
6219
6220 def destroy_dvport_group(self , dvPort_group_name):
6221 """
6222 Method to destroy disributed virtual portgroup
6223
6224 Args:
6225 network_name - name of network/portgroup
6226
6227 Returns:
6228 True if portgroup successfully got deleted else false
6229 """
6230 vcenter_conect, content = self.get_vcenter_content()
6231 try:
6232 status = None
6233 dvPort_group = self.get_dvport_group(dvPort_group_name)
6234 if dvPort_group:
6235 task = dvPort_group.Destroy_Task()
6236 status = self.wait_for_vcenter_task(task, vcenter_conect)
6237 return status
6238 except vmodl.MethodFault as exp:
6239 self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
6240 exp, dvPort_group_name))
6241 return None
6242
6243
6244 def get_dvport_group(self, dvPort_group_name):
6245 """
6246 Method to get disributed virtual portgroup
6247
6248 Args:
6249 network_name - name of network/portgroup
6250
6251 Returns:
6252 portgroup object
6253 """
6254 vcenter_conect, content = self.get_vcenter_content()
6255 dvPort_group = None
6256 try:
6257 container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
6258 for item in container.view:
6259 if item.key == dvPort_group_name:
6260 dvPort_group = item
6261 break
6262 return dvPort_group
6263 except vmodl.MethodFault as exp:
6264 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6265 exp, dvPort_group_name))
6266 return None
6267
6268 def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
6269 """
6270 Method to get disributed virtual portgroup vlanID
6271
6272 Args:
6273 network_name - name of network/portgroup
6274
6275 Returns:
6276 vlan ID
6277 """
6278 vlanId = None
6279 try:
6280 dvPort_group = self.get_dvport_group(dvPort_group_name)
6281 if dvPort_group:
6282 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
6283 except vmodl.MethodFault as exp:
6284 self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
6285 exp, dvPort_group_name))
6286 return vlanId
6287
6288
6289 def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
6290 """
6291 Method to configure vlanID in disributed virtual portgroup vlanID
6292
6293 Args:
6294 network_name - name of network/portgroup
6295
6296 Returns:
6297 None
6298 """
6299 vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
6300 if vlanID == 0:
6301 #configure vlanID
6302 vlanID = self.genrate_vlanID(dvPort_group_name)
6303 config = {"vlanID":vlanID}
6304 task = self.reconfig_portgroup(content, dvPort_group_name,
6305 config_info=config)
6306 if task:
6307 status= self.wait_for_vcenter_task(task, vcenter_conect)
6308 if status:
6309 self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
6310 dvPort_group_name,vlanID))
6311 else:
6312 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
6313 dvPort_group_name, vlanID))
6314
6315
6316 def genrate_vlanID(self, network_name):
6317 """
6318 Method to get unused vlanID
6319 Args:
6320 network_name - name of network/portgroup
6321 Returns:
6322 vlanID
6323 """
6324 vlan_id = None
6325 used_ids = []
6326 if self.config.get('vlanID_range') == None:
6327 raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
6328 "at config value before creating sriov network with vlan tag")
6329 if "used_vlanIDs" not in self.persistent_info:
6330 self.persistent_info["used_vlanIDs"] = {}
6331 else:
6332 used_ids = list(self.persistent_info["used_vlanIDs"].values())
6333
6334 for vlanID_range in self.config.get('vlanID_range'):
6335 start_vlanid, end_vlanid = vlanID_range.split("-")
6336 if start_vlanid > end_vlanid:
6337 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
6338 vlanID_range))
6339
6340 for id in range(int(start_vlanid), int(end_vlanid) + 1):
6341 if id not in used_ids:
6342 vlan_id = id
6343 self.persistent_info["used_vlanIDs"][network_name] = vlan_id
6344 return vlan_id
6345 if vlan_id is None:
6346 raise vimconn.vimconnConflictException("All Vlan IDs are in use")
6347
6348
6349 def get_obj(self, content, vimtype, name):
6350 """
6351 Get the vsphere object associated with a given text name
6352 """
6353 obj = None
6354 container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
6355 for item in container.view:
6356 if item.name == name:
6357 obj = item
6358 break
6359 return obj
6360
6361
6362 def insert_media_to_vm(self, vapp, image_id):
6363 """
6364 Method to insert media CD-ROM (ISO image) from catalog to vm.
6365 vapp - vapp object to get vm id
6366 Image_id - image id for cdrom to be inerted to vm
6367 """
6368 # create connection object
6369 vca = self.connect()
6370 try:
6371 # fetching catalog details
6372 rest_url = "{}/api/catalog/{}".format(self.url, image_id)
6373 if vca._session:
6374 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6375 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6376 response = self.perform_request(req_type='GET',
6377 url=rest_url,
6378 headers=headers)
6379
6380 if response.status_code != 200:
6381 self.logger.error("REST call {} failed reason : {}"\
6382 "status code : {}".format(url_rest_call,
6383 response.text,
6384 response.status_code))
6385 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
6386 "catalog details")
6387 # searching iso name and id
6388 iso_name, media_id = self.get_media_details(vca, response.text)
6389
6390 if iso_name and media_id:
6391 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
6392 <ns6:MediaInsertOrEjectParams
6393 xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
6394 xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
6395 xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common"
6396 xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
6397 xmlns:ns6="http://www.vmware.com/vcloud/v1.5"
6398 xmlns:ns7="http://www.vmware.com/schema/ovf"
6399 xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1"
6400 xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
6401 <ns6:Media
6402 type="application/vnd.vmware.vcloud.media+xml"
6403 name="{}"
6404 id="urn:vcloud:media:{}"
6405 href="https://{}/api/media/{}"/>
6406 </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
6407 self.url,media_id)
6408
6409 for vms in vapp.get_all_vms():
6410 vm_id = vms.get('id').split(':')[-1]
6411
6412 headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
6413 rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
6414
6415 response = self.perform_request(req_type='POST',
6416 url=rest_url,
6417 data=data,
6418 headers=headers)
6419
6420 if response.status_code != 202:
6421 error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
6422 "Status code {}".format(response.text, response.status_code)
6423 self.logger.error(error_msg)
6424 raise vimconn.vimconnException(error_msg)
6425 else:
6426 task = self.get_task_from_response(response.text)
6427 result = self.client.get_task_monitor().wait_for_success(task=task)
6428 if result.get('status') == 'success':
6429 self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
6430 " image to vm {}".format(vm_id))
6431
6432 except Exception as exp:
6433 self.logger.error("insert_media_to_vm() : exception occurred "\
6434 "while inserting media CD-ROM")
6435 raise vimconn.vimconnException(message=exp)
6436
6437
6438 def get_media_details(self, vca, content):
6439 """
6440 Method to get catalog item details
6441 vca - connection object
6442 content - Catalog details
6443 Return - Media name, media id
6444 """
6445 cataloghref_list = []
6446 try:
6447 if content:
6448 vm_list_xmlroot = XmlElementTree.fromstring(content)
6449 for child in vm_list_xmlroot.iter():
6450 if 'CatalogItem' in child.tag:
6451 cataloghref_list.append(child.attrib.get('href'))
6452 if cataloghref_list is not None:
6453 for href in cataloghref_list:
6454 if href:
6455 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6456 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
6457 response = self.perform_request(req_type='GET',
6458 url=href,
6459 headers=headers)
6460 if response.status_code != 200:
6461 self.logger.error("REST call {} failed reason : {}"\
6462 "status code : {}".format(href,
6463 response.text,
6464 response.status_code))
6465 raise vimconn.vimconnException("get_media_details : Failed to get "\
6466 "catalogitem details")
6467 list_xmlroot = XmlElementTree.fromstring(response.text)
6468 for child in list_xmlroot.iter():
6469 if 'Entity' in child.tag:
6470 if 'media' in child.attrib.get('href'):
6471 name = child.attrib.get('name')
6472 media_id = child.attrib.get('href').split('/').pop()
6473 return name,media_id
6474 else:
6475 self.logger.debug("Media name and id not found")
6476 return False,False
6477 except Exception as exp:
6478 self.logger.error("get_media_details : exception occurred "\
6479 "getting media details")
6480 raise vimconn.vimconnException(message=exp)
6481
6482
6483 def retry_rest(self, method, url, add_headers=None, data=None):
6484 """ Method to get Token & retry respective REST request
6485 Args:
6486 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
6487 url - request url to be used
6488 add_headers - Additional headers (optional)
6489 data - Request payload data to be passed in request
6490 Returns:
6491 response - Response of request
6492 """
6493 response = None
6494
6495 #Get token
6496 self.get_token()
6497
6498 if self.client._session:
6499 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6500 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6501
6502 if add_headers:
6503 headers.update(add_headers)
6504
6505 if method == 'GET':
6506 response = self.perform_request(req_type='GET',
6507 url=url,
6508 headers=headers)
6509 elif method == 'PUT':
6510 response = self.perform_request(req_type='PUT',
6511 url=url,
6512 headers=headers,
6513 data=data)
6514 elif method == 'POST':
6515 response = self.perform_request(req_type='POST',
6516 url=url,
6517 headers=headers,
6518 data=data)
6519 elif method == 'DELETE':
6520 response = self.perform_request(req_type='DELETE',
6521 url=url,
6522 headers=headers)
6523 return response
6524
6525
6526 def get_token(self):
6527 """ Generate a new token if expired
6528
6529 Returns:
6530 The return client object that letter can be used to connect to vCloud director as admin for VDC
6531 """
6532 self.client = self.connect()
6533
6534 def get_vdc_details(self):
6535 """ Get VDC details using pyVcloud Lib
6536
6537 Returns org and vdc object
6538 """
6539 vdc = None
6540 try:
6541 org = Org(self.client, resource=self.client.get_org())
6542 vdc = org.get_vdc(self.tenant_name)
6543 except Exception as e:
6544 # pyvcloud not giving a specific exception, Refresh nevertheless
6545 self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
6546
6547 #Retry once, if failed by refreshing token
6548 if vdc is None:
6549 self.get_token()
6550 org = Org(self.client, resource=self.client.get_org())
6551 vdc = org.get_vdc(self.tenant_name)
6552
6553 return org, vdc
6554
6555
6556 def perform_request(self, req_type, url, headers=None, data=None):
6557 """Perform the POST/PUT/GET/DELETE request."""
6558
6559 #Log REST request details
6560 self.log_request(req_type, url=url, headers=headers, data=data)
6561 # perform request and return its result
6562 if req_type == 'GET':
6563 response = requests.get(url=url,
6564 headers=headers,
6565 verify=False)
6566 elif req_type == 'PUT':
6567 response = requests.put(url=url,
6568 headers=headers,
6569 data=data,
6570 verify=False)
6571 elif req_type == 'POST':
6572 response = requests.post(url=url,
6573 headers=headers,
6574 data=data,
6575 verify=False)
6576 elif req_type == 'DELETE':
6577 response = requests.delete(url=url,
6578 headers=headers,
6579 verify=False)
6580 #Log the REST response
6581 self.log_response(response)
6582
6583 return response
6584
6585
6586 def log_request(self, req_type, url=None, headers=None, data=None):
6587 """Logs REST request details"""
6588
6589 if req_type is not None:
6590 self.logger.debug("Request type: {}".format(req_type))
6591
6592 if url is not None:
6593 self.logger.debug("Request url: {}".format(url))
6594
6595 if headers is not None:
6596 for header in headers:
6597 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
6598
6599 if data is not None:
6600 self.logger.debug("Request data: {}".format(data))
6601
6602
6603 def log_response(self, response):
6604 """Logs REST response details"""
6605
6606 self.logger.debug("Response status code: {} ".format(response.status_code))
6607
6608
6609 def get_task_from_response(self, content):
6610 """
6611 content - API response.text(response.text)
6612 return task object
6613 """
6614 xmlroot = XmlElementTree.fromstring(content)
6615 if xmlroot.tag.split('}')[1] == "Task":
6616 return xmlroot
6617 else:
6618 for ele in xmlroot:
6619 if ele.tag.split("}")[1] == "Tasks":
6620 task = ele[0]
6621 break
6622 return task
6623
6624
6625 def power_on_vapp(self,vapp_id, vapp_name):
6626 """
6627 vapp_id - vApp uuid
6628 vapp_name - vAapp name
6629 return - Task object
6630 """
6631 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
6632 'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
6633
6634 poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
6635 vapp_id)
6636 response = self.perform_request(req_type='POST',
6637 url=poweron_href,
6638 headers=headers)
6639
6640 if response.status_code != 202:
6641 self.logger.error("REST call {} failed reason : {}"\
6642 "status code : {} ".format(poweron_href,
6643 response.text,
6644 response.status_code))
6645 raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
6646 "vApp {}".format(vapp_name))
6647 else:
6648 poweron_task = self.get_task_from_response(response.text)
6649 return poweron_task
6650
6651